From 41dbac69037ddd770a94cf41f39beff92aec9568 Mon Sep 17 00:00:00 2001 From: Alex Filipchik Date: Tue, 27 Aug 2019 19:19:52 -0700 Subject: [PATCH] Fixed unit test --- .../hudi/common/util/TestCompactionUtils.java | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/hudi-common/src/test/java/org/apache/hudi/common/util/TestCompactionUtils.java b/hudi-common/src/test/java/org/apache/hudi/common/util/TestCompactionUtils.java index 4c37d8a45..a5a139dd6 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/util/TestCompactionUtils.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/util/TestCompactionUtils.java @@ -162,7 +162,22 @@ public class TestCompactionUtils { HoodieCompactionPlan plan2 = createCompactionPlan(metaClient, "002", "003", 0, false, false); scheduleCompaction(metaClient, "001", plan1); scheduleCompaction(metaClient, "003", plan2); - // schedule same plan again so that there will be duplicates + // schedule similar plan again so that there will be duplicates + plan1.getOperations().get(0).setDataFilePath("bla"); + scheduleCompaction(metaClient, "005", plan1); + metaClient = new HoodieTableMetaClient(metaClient.getHadoopConf(), basePath, true); + Map> res = + CompactionUtils.getAllPendingCompactionOperations(metaClient); + } + + @Test + public void testGetAllPendingCompactionOperationsWithFullDupFileId() throws IOException { + // Case where there is duplicate fileIds in compaction requests + HoodieCompactionPlan plan1 = createCompactionPlan(metaClient, "000", "001", 10, true, true); + HoodieCompactionPlan plan2 = createCompactionPlan(metaClient, "002", "003", 0, false, false); + scheduleCompaction(metaClient, "001", plan1); + scheduleCompaction(metaClient, "003", plan2); + // schedule same plan again so that there will be duplicates. It should not fail as it is a full duplicate scheduleCompaction(metaClient, "005", plan1); metaClient = new HoodieTableMetaClient(metaClient.getHadoopConf(), basePath, true); Map> res =