From 366bb10d8c4fe98424f09a6cf6f4aee7716451a4 Mon Sep 17 00:00:00 2001 From: Raymond Xu <2701446+xushiyan@users.noreply.github.com> Date: Wed, 6 May 2020 04:15:20 -0700 Subject: [PATCH] [HUDI-812] Migrate hudi common tests to JUnit 5 (#1590) * [HUDI-812] Migrate hudi-common tests to JUnit 5 --- .../hudi/common/HoodieClientTestHarness.java | 4 +- hudi-common/pom.xml | 14 +- .../hudi/common/bloom/TestBloomFilter.java | 51 ++- .../bloom/TestInternalDynamicBloomFilter.java | 10 +- .../apache/hudi/common/fs/TestFSUtils.java | 124 +++--- .../common/fs/inline/TestHFileInLining.java | 34 +- .../fs/inline/TestInLineFileSystem.java | 148 +++---- .../fs/inline/TestInMemoryFileSystem.java | 61 ++- .../hudi/common/model/HoodieTestUtils.java | 6 +- .../model/TestHoodieCommitMetadata.java | 27 +- .../hudi/common/model/TestHoodieRecord.java | 16 +- .../common/model/TestHoodieWriteStat.java | 6 +- .../common/storage/TestStorageSchemes.java | 15 +- .../table/TestHoodieTableMetaClient.java | 4 +- .../common/table/log/TestHoodieLogFormat.java | 363 +++++++++--------- .../log/TestHoodieLogFormatAppendFailure.java | 18 +- .../table/log/TestHoodieLogFormatVersion.java | 6 +- .../timeline/TestHoodieActiveTimeline.java | 73 ++-- .../view/TestHoodieTableFileSystemView.java | 4 +- .../table/view/TestIncrementalFSViewSync.java | 124 +++--- ...TestRocksDBBasedIncrementalFSViewSync.java | 4 +- .../HoodieCommonTestHarness.java | 15 +- .../HoodieCommonTestHarnessJunit5.java | 52 --- .../hudi/common/util/CompactionTestUtils.java | 12 +- .../hudi/common/util/TestBase64CodecUtil.java | 7 +- .../hudi/common/util/TestCompactionUtils.java | 53 +-- .../util/TestDFSPropertiesConfiguration.java | 33 +- .../hudi/common/util/TestFileIOUtils.java | 4 +- .../hudi/common/util/TestNumericUtils.java | 6 +- .../hudi/common/util/TestParquetUtils.java | 67 ++-- .../common/util/TestSerializationUtils.java | 15 +- .../hudi/common/util/TestStringUtils.java | 10 +- .../util/collection/TestDiskBasedMap.java | 16 +- .../collection/TestExternalSpillableMap.java | 31 +- .../util/collection/TestRocksDBManager.java | 53 +-- .../util/collection/TestRocksDbBasedMap.java | 13 +- .../hadoop/TestHoodieROTablePathFilter.java | 4 +- .../TestHoodieCombineHiveInputFormat.java | 6 +- .../java/org/apache/hudi/hive/TestUtil.java | 11 +- .../utilities/TestHoodieSnapshotCopier.java | 4 +- .../TestKafkaConnectHdfsProvider.java | 4 +- pom.xml | 8 - style/checkstyle.xml | 6 +- 43 files changed, 714 insertions(+), 828 deletions(-) rename hudi-common/src/test/java/org/apache/hudi/common/{ => testutils}/HoodieCommonTestHarness.java (90%) delete mode 100644 hudi-common/src/test/java/org/apache/hudi/common/testutils/HoodieCommonTestHarnessJunit5.java diff --git a/hudi-client/src/test/java/org/apache/hudi/common/HoodieClientTestHarness.java b/hudi-client/src/test/java/org/apache/hudi/common/HoodieClientTestHarness.java index 624c1b804..1312b23ee 100644 --- a/hudi-client/src/test/java/org/apache/hudi/common/HoodieClientTestHarness.java +++ b/hudi-client/src/test/java/org/apache/hudi/common/HoodieClientTestHarness.java @@ -23,7 +23,7 @@ import org.apache.hudi.common.fs.FSUtils; import org.apache.hudi.common.minicluster.HdfsTestService; import org.apache.hudi.common.model.HoodieTestUtils; import org.apache.hudi.common.table.HoodieTableMetaClient; -import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5; +import org.apache.hudi.common.testutils.HoodieCommonTestHarness; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -45,7 +45,7 @@ import java.util.concurrent.atomic.AtomicInteger; /** * The test harness for resource initialization and cleanup. */ -public abstract class HoodieClientTestHarness extends HoodieCommonTestHarnessJunit5 implements Serializable { +public abstract class HoodieClientTestHarness extends HoodieCommonTestHarness implements Serializable { private static final Logger LOG = LoggerFactory.getLogger(HoodieClientTestHarness.class); diff --git a/hudi-common/pom.xml b/hudi-common/pom.xml index b65cc3473..a05b56920 100644 --- a/hudi-common/pom.xml +++ b/hudi-common/pom.xml @@ -147,12 +147,6 @@ test - - junit - junit - test - - org.junit.jupiter junit-jupiter-api @@ -192,14 +186,8 @@ com.github.stefanbirkner system-rules - 1.16.0 + 1.17.2 test - - - junit - junit-dep - - diff --git a/hudi-common/src/test/java/org/apache/hudi/common/bloom/TestBloomFilter.java b/hudi-common/src/test/java/org/apache/hudi/common/bloom/TestBloomFilter.java index 1e0514cdd..552098e71 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/bloom/TestBloomFilter.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/bloom/TestBloomFilter.java @@ -18,72 +18,65 @@ package org.apache.hudi.common.bloom; -import org.junit.Assert; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.List; import java.util.UUID; +import static org.junit.jupiter.api.Assertions.assertTrue; + /** * Unit tests {@link SimpleBloomFilter} and {@link HoodieDynamicBoundedBloomFilter}. */ -@RunWith(Parameterized.class) public class TestBloomFilter { - private final String versionToTest; - // name attribute is optional, provide a unique name for test // multiple parameters, uses Collection - @Parameters() - public static Collection data() { - return Arrays.asList(new Object[][] { - {BloomFilterTypeCode.SIMPLE.name()}, - {BloomFilterTypeCode.DYNAMIC_V0.name()} - }); + public static List bloomFilterTypeCodes() { + return Arrays.asList( + Arguments.of(BloomFilterTypeCode.SIMPLE.name()), + Arguments.of(BloomFilterTypeCode.DYNAMIC_V0.name()) + ); } - public TestBloomFilter(String versionToTest) { - this.versionToTest = versionToTest; - } - - @Test - public void testAddKey() { + @ParameterizedTest + @MethodSource("bloomFilterTypeCodes") + public void testAddKey(String typeCode) { List inputs; int[] sizes = {100, 1000, 10000}; for (int size : sizes) { inputs = new ArrayList<>(); - BloomFilter filter = getBloomFilter(versionToTest, size, 0.000001, size * 10); + BloomFilter filter = getBloomFilter(typeCode, size, 0.000001, size * 10); for (int i = 0; i < size; i++) { String key = UUID.randomUUID().toString(); inputs.add(key); filter.add(key); } for (java.lang.String key : inputs) { - Assert.assertTrue("Filter should have returned true for " + key, filter.mightContain(key)); + assertTrue(filter.mightContain(key), "Filter should have returned true for " + key); } for (int i = 0; i < 100; i++) { String randomKey = UUID.randomUUID().toString(); if (inputs.contains(randomKey)) { - Assert.assertTrue("Filter should have returned true for " + randomKey, filter.mightContain(randomKey)); + assertTrue(filter.mightContain(randomKey), "Filter should have returned true for " + randomKey); } } } } - @Test - public void testSerialize() { + @ParameterizedTest + @MethodSource("bloomFilterTypeCodes") + public void testSerialize(String typeCode) { List inputs; int[] sizes = {100, 1000, 10000}; for (int size : sizes) { inputs = new ArrayList<>(); - BloomFilter filter = getBloomFilter(versionToTest, size, 0.000001, size * 10); + BloomFilter filter = getBloomFilter(typeCode, size, 0.000001, size * 10); for (int i = 0; i < size; i++) { String key = UUID.randomUUID().toString(); inputs.add(key); @@ -92,9 +85,9 @@ public class TestBloomFilter { String serString = filter.serializeToString(); BloomFilter recreatedBloomFilter = BloomFilterFactory - .fromString(serString, versionToTest); + .fromString(serString, typeCode); for (String key : inputs) { - Assert.assertTrue("Filter should have returned true for " + key, recreatedBloomFilter.mightContain(key)); + assertTrue(recreatedBloomFilter.mightContain(key), "Filter should have returned true for " + key); } } } diff --git a/hudi-common/src/test/java/org/apache/hudi/common/bloom/TestInternalDynamicBloomFilter.java b/hudi-common/src/test/java/org/apache/hudi/common/bloom/TestInternalDynamicBloomFilter.java index 58400215e..5940da15d 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/bloom/TestInternalDynamicBloomFilter.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/bloom/TestInternalDynamicBloomFilter.java @@ -19,11 +19,13 @@ package org.apache.hudi.common.bloom; import org.apache.hadoop.util.hash.Hash; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.UUID; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + /** * Unit tests {@link InternalDynamicBloomFilter} for size bounding. */ @@ -48,9 +50,9 @@ public class TestInternalDynamicBloomFilter { if (index != 0) { int curLength = serString.length(); if (index > indexForMaxGrowth) { - Assert.assertEquals("Length should not increase after hitting max entries", curLength, lastKnownBloomSize); + assertEquals(curLength, lastKnownBloomSize, "Length should not increase after hitting max entries"); } else { - Assert.assertTrue("Length should increase until max entries are reached", curLength > lastKnownBloomSize); + assertTrue(curLength > lastKnownBloomSize, "Length should increase until max entries are reached"); } } lastKnownBloomSize = serString.length(); diff --git a/hudi-common/src/test/java/org/apache/hudi/common/fs/TestFSUtils.java b/hudi-common/src/test/java/org/apache/hudi/common/fs/TestFSUtils.java index fb16cc362..b6a86a1e2 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/fs/TestFSUtils.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/fs/TestFSUtils.java @@ -18,25 +18,25 @@ package org.apache.hudi.common.fs; -import org.apache.hudi.common.HoodieCommonTestHarness; import org.apache.hudi.common.model.HoodieLogFile; import org.apache.hudi.common.model.HoodieTestUtils; import org.apache.hudi.common.table.HoodieTableMetaClient; import org.apache.hudi.common.table.timeline.HoodieInstant; import org.apache.hudi.common.table.timeline.HoodieTimeline; +import org.apache.hudi.common.testutils.HoodieCommonTestHarness; import org.apache.hudi.exception.HoodieException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.junit.Assert; -import org.junit.Before; import org.junit.Rule; -import org.junit.Test; import org.junit.contrib.java.lang.system.EnvironmentVariables; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import java.io.File; -import java.io.FilenameFilter; import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Arrays; @@ -46,13 +46,17 @@ import java.util.UUID; import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests file system utils. */ public class TestFSUtils extends HoodieCommonTestHarness { + private final long minRollbackToKeep = 10; private final long minCleanToKeep = 10; @@ -61,7 +65,7 @@ public class TestFSUtils extends HoodieCommonTestHarness { @Rule public final EnvironmentVariables environmentVariables = new EnvironmentVariables(); - @Before + @BeforeEach public void setUp() throws IOException { initMetaClient(); } @@ -120,10 +124,10 @@ public class TestFSUtils extends HoodieCommonTestHarness { return true; }, true); - assertTrue("Hoodie MetaFolder MUST be skipped but got :" + collected, - collected.stream().noneMatch(s -> s.contains(HoodieTableMetaClient.METAFOLDER_NAME))); + assertTrue(collected.stream().noneMatch(s -> s.contains(HoodieTableMetaClient.METAFOLDER_NAME)), + "Hoodie MetaFolder MUST be skipped but got :" + collected); // Check if only files are listed - Assert.assertEquals(2, collected.size()); + assertEquals(2, collected.size()); // Test including meta-folder final List collected2 = new ArrayList<>(); @@ -132,10 +136,10 @@ public class TestFSUtils extends HoodieCommonTestHarness { return true; }, false); - Assert.assertFalse("Hoodie MetaFolder will be present :" + collected2, - collected2.stream().noneMatch(s -> s.contains(HoodieTableMetaClient.METAFOLDER_NAME))); + assertFalse(collected2.stream().noneMatch(s -> s.contains(HoodieTableMetaClient.METAFOLDER_NAME)), + "Hoodie MetaFolder will be present :" + collected2); // Check if only files are listed including hoodie.properties - Assert.assertEquals("Collected=" + collected2, 5, collected2.size()); + assertEquals(5, collected2.size(), "Collected=" + collected2); } @Test @@ -143,7 +147,7 @@ public class TestFSUtils extends HoodieCommonTestHarness { String instantTime = new SimpleDateFormat("yyyyMMddHHmmss").format(new Date()); String fileName = UUID.randomUUID().toString(); String fullFileName = FSUtils.makeDataFileName(instantTime, TEST_WRITE_TOKEN, fileName); - assertEquals(FSUtils.getCommitTime(fullFileName), instantTime); + assertEquals(instantTime, FSUtils.getCommitTime(fullFileName)); } @Test @@ -151,7 +155,7 @@ public class TestFSUtils extends HoodieCommonTestHarness { String instantTime = new SimpleDateFormat("yyyyMMddHHmmss").format(new Date()); String fileName = UUID.randomUUID().toString(); String fullFileName = FSUtils.makeDataFileName(instantTime, TEST_WRITE_TOKEN, fileName); - assertEquals(FSUtils.getFileId(fullFileName), fileName); + assertEquals(fileName, FSUtils.getFileId(fullFileName)); } @Test @@ -204,10 +208,10 @@ public class TestFSUtils extends HoodieCommonTestHarness { assertEquals(fileName, FSUtils.getFileIdFromLogPath(rlPath)); assertEquals("100", FSUtils.getBaseCommitTimeFromLogPath(rlPath)); assertEquals(1, FSUtils.getFileVersionFromLog(rlPath)); - Assert.assertNull(FSUtils.getTaskPartitionIdFromLogPath(rlPath)); - Assert.assertNull(FSUtils.getStageIdFromLogPath(rlPath)); - Assert.assertNull(FSUtils.getTaskAttemptIdFromLogPath(rlPath)); - Assert.assertNull(FSUtils.getWriteTokenFromLogPath(rlPath)); + assertNull(FSUtils.getTaskPartitionIdFromLogPath(rlPath)); + assertNull(FSUtils.getStageIdFromLogPath(rlPath)); + assertNull(FSUtils.getTaskAttemptIdFromLogPath(rlPath)); + assertNull(FSUtils.getWriteTokenFromLogPath(rlPath)); } @Test @@ -222,9 +226,9 @@ public class TestFSUtils extends HoodieCommonTestHarness { assertEquals(fileName, FSUtils.getFileIdFromLogPath(rlPath)); assertEquals("100", FSUtils.getBaseCommitTimeFromLogPath(rlPath)); assertEquals(2, FSUtils.getFileVersionFromLog(rlPath)); - assertEquals(new Integer(1), FSUtils.getTaskPartitionIdFromLogPath(rlPath)); - assertEquals(new Integer(0), FSUtils.getStageIdFromLogPath(rlPath)); - assertEquals(new Integer(1), FSUtils.getTaskAttemptIdFromLogPath(rlPath)); + assertEquals(1, FSUtils.getTaskPartitionIdFromLogPath(rlPath)); + assertEquals(0, FSUtils.getStageIdFromLogPath(rlPath)); + assertEquals(1, FSUtils.getTaskAttemptIdFromLogPath(rlPath)); } /** @@ -277,20 +281,19 @@ public class TestFSUtils extends HoodieCommonTestHarness { List hoodieInstants = new ArrayList<>(); // create rollback files for (String instantTime : instantTimes) { - new File(basePath + "/" + HoodieTableMetaClient.METAFOLDER_NAME + "/" - + instantTime + HoodieTimeline.ROLLBACK_EXTENSION).createNewFile(); + Files.createFile(Paths.get(basePath, + HoodieTableMetaClient.METAFOLDER_NAME, + instantTime + HoodieTimeline.ROLLBACK_EXTENSION)); hoodieInstants.add(new HoodieInstant(false, HoodieTimeline.ROLLBACK_ACTION, instantTime)); } + String metaPath = Paths.get(basePath, ".hoodie").toString(); FSUtils.deleteOlderRollbackMetaFiles(FSUtils.getFs(basePath, new Configuration()), - basePath + "/.hoodie", hoodieInstants.stream()); - File[] rollbackFiles = new File(basePath + "/.hoodie").listFiles(new FilenameFilter() { - @Override - public boolean accept(File dir, String name) { - return name.contains(HoodieTimeline.ROLLBACK_EXTENSION); - } - }); - assertTrue(rollbackFiles.length == minRollbackToKeep); + metaPath, hoodieInstants.stream()); + File[] rollbackFiles = new File(metaPath).listFiles((dir, name) + -> name.contains(HoodieTimeline.ROLLBACK_EXTENSION)); + assertNotNull(rollbackFiles); + assertEquals(rollbackFiles.length, minRollbackToKeep); } @Test @@ -301,19 +304,18 @@ public class TestFSUtils extends HoodieCommonTestHarness { List hoodieInstants = new ArrayList<>(); // create rollback files for (String instantTime : instantTimes) { - new File(basePath + "/" + HoodieTableMetaClient.METAFOLDER_NAME + "/" - + instantTime + HoodieTimeline.CLEAN_EXTENSION).createNewFile(); + Files.createFile(Paths.get(basePath, + HoodieTableMetaClient.METAFOLDER_NAME, + instantTime + HoodieTimeline.CLEAN_EXTENSION)); hoodieInstants.add(new HoodieInstant(false, HoodieTimeline.CLEAN_ACTION, instantTime)); } + String metaPath = Paths.get(basePath, ".hoodie").toString(); FSUtils.deleteOlderCleanMetaFiles(FSUtils.getFs(basePath, new Configuration()), - basePath + "/.hoodie", hoodieInstants.stream()); - File[] cleanFiles = new File(basePath + "/.hoodie").listFiles(new FilenameFilter() { - @Override - public boolean accept(File dir, String name) { - return name.contains(HoodieTimeline.CLEAN_EXTENSION); - } - }); - assertTrue(cleanFiles.length == minCleanToKeep); + metaPath, hoodieInstants.stream()); + File[] cleanFiles = new File(metaPath).listFiles((dir, name) + -> name.contains(HoodieTimeline.CLEAN_EXTENSION)); + assertNotNull(cleanFiles); + assertEquals(cleanFiles.length, minCleanToKeep); } @Test @@ -329,29 +331,29 @@ public class TestFSUtils extends HoodieCommonTestHarness { // data file name String dataFileName = FSUtils.makeDataFileName(instantTime, writeToken, fileId); - assertTrue(instantTime.equals(FSUtils.getCommitTime(dataFileName))); - assertTrue(fileId.equals(FSUtils.getFileId(dataFileName))); + assertEquals(instantTime, FSUtils.getCommitTime(dataFileName)); + assertEquals(fileId, FSUtils.getFileId(dataFileName)); String logFileName = FSUtils.makeLogFileName(fileId, LOG_EXTENTION, instantTime, version, writeToken); assertTrue(FSUtils.isLogFile(new Path(logFileName))); - assertTrue(instantTime.equals(FSUtils.getBaseCommitTimeFromLogPath(new Path(logFileName)))); - assertTrue(fileId.equals(FSUtils.getFileIdFromLogPath(new Path(logFileName)))); - assertTrue(version == FSUtils.getFileVersionFromLog(new Path(logFileName))); - assertTrue(LOG_STR.equals(FSUtils.getFileExtensionFromLog(new Path(logFileName)))); + assertEquals(instantTime, FSUtils.getBaseCommitTimeFromLogPath(new Path(logFileName))); + assertEquals(fileId, FSUtils.getFileIdFromLogPath(new Path(logFileName))); + assertEquals(version, FSUtils.getFileVersionFromLog(new Path(logFileName))); + assertEquals(LOG_STR, FSUtils.getFileExtensionFromLog(new Path(logFileName))); // create three versions of log file - String partitionPath = basePath + "/" + partitionStr; - new File(partitionPath).mkdirs(); - String log1 = FSUtils.makeLogFileName(fileId, LOG_EXTENTION, instantTime, 1, writeToken); - new File(partitionPath + "/" + log1).createNewFile(); - String log2 = FSUtils.makeLogFileName(fileId, LOG_EXTENTION, instantTime, 2, writeToken); - new File(partitionPath + "/" + log2).createNewFile(); - String log3 = FSUtils.makeLogFileName(fileId, LOG_EXTENTION, instantTime, 3, writeToken); - new File(partitionPath + "/" + log3).createNewFile(); + java.nio.file.Path partitionPath = Paths.get(basePath, partitionStr); + Files.createDirectories(partitionPath); + String log1 = FSUtils.makeLogFileName(fileId, LOG_EXTENTION, instantTime, 1, writeToken); + Files.createFile(partitionPath.resolve(log1)); + String log2 = FSUtils.makeLogFileName(fileId, LOG_EXTENTION, instantTime, 2, writeToken); + Files.createFile(partitionPath.resolve(log2)); + String log3 = FSUtils.makeLogFileName(fileId, LOG_EXTENTION, instantTime, 3, writeToken); + Files.createFile(partitionPath.resolve(log3)); - assertTrue(3 == FSUtils.getLatestLogVersion(FSUtils.getFs(basePath, new Configuration()), - new Path(partitionPath), fileId, LOG_EXTENTION, instantTime).get().getLeft()); - assertTrue(4 == FSUtils.computeNextLogVersion(FSUtils.getFs(basePath, new Configuration()), - new Path(partitionPath), fileId, LOG_EXTENTION, instantTime)); + assertEquals(3, (int) FSUtils.getLatestLogVersion(FSUtils.getFs(basePath, new Configuration()), + new Path(partitionPath.toString()), fileId, LOG_EXTENTION, instantTime).get().getLeft()); + assertEquals(4, FSUtils.computeNextLogVersion(FSUtils.getFs(basePath, new Configuration()), + new Path(partitionPath.toString()), fileId, LOG_EXTENTION, instantTime)); } } diff --git a/hudi-common/src/test/java/org/apache/hudi/common/fs/inline/TestHFileInLining.java b/hudi-common/src/test/java/org/apache/hudi/common/fs/inline/TestHFileInLining.java index ddf262d37..96d939ad0 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/fs/inline/TestHFileInLining.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/fs/inline/TestHFileInLining.java @@ -30,14 +30,12 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.After; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; -import java.util.Arrays; import java.util.HashSet; import java.util.Set; import java.util.UUID; @@ -46,6 +44,9 @@ import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.FILE_SCHEME; import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.RANDOM; import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.getPhantomFile; import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.getRandomOuterInMemPath; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; /** * Tests {@link InLineFileSystem} to inline HFile. @@ -66,7 +67,7 @@ public class TestHFileInLining { inlineConf.set("fs." + InLineFileSystem.SCHEME + ".impl", InLineFileSystem.class.getName()); } - @After + @AfterEach public void teardown() throws IOException { if (generatedPath != null) { File filePath = new File(generatedPath.toString().substring(generatedPath.toString().indexOf(':') + 1)); @@ -117,23 +118,22 @@ public class TestHFileInLining { Set rowIdsToSearch = getRandomValidRowIds(10); for (int rowId : rowIdsToSearch) { - Assert.assertTrue("location lookup failed", - scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(rowId))) == 0); + assertEquals(0, scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(rowId))), + "location lookup failed"); // read the key and see if it matches ByteBuffer readKey = scanner.getKey(); - Assert.assertTrue("seeked key does not match", Arrays.equals(getSomeKey(rowId), - Bytes.toBytes(readKey))); + assertArrayEquals(getSomeKey(rowId), Bytes.toBytes(readKey), "seeked key does not match"); scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(rowId))); ByteBuffer val1 = scanner.getValue(); scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(rowId))); ByteBuffer val2 = scanner.getValue(); - Assert.assertTrue(Arrays.equals(Bytes.toBytes(val1), Bytes.toBytes(val2))); + assertArrayEquals(Bytes.toBytes(val1), Bytes.toBytes(val2)); } int[] invalidRowIds = {-4, maxRows, maxRows + 1, maxRows + 120, maxRows + 160, maxRows + 1000}; for (int rowId : invalidRowIds) { - Assert.assertFalse("location lookup should have failed", - scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(rowId))) == 0); + assertNotEquals(0, scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(rowId))), + "location lookup should have failed"); } reader.close(); fin.close(); @@ -197,16 +197,16 @@ public class TestHFileInLining { Bytes.toBytes("qual"), Bytes.toBytes(valStr)); byte[] keyBytes = new KeyValue.KeyOnlyKeyValue(Bytes.toBytes(key), 0, Bytes.toBytes(key).length).getKey(); - Assert.assertTrue("bytes for keys do not match " + keyStr + " " - + Bytes.toString(Bytes.toBytes(key)), Arrays.equals(kv.getKey(), keyBytes)); + assertArrayEquals(kv.getKey(), keyBytes, + "bytes for keys do not match " + keyStr + " " + Bytes.toString(Bytes.toBytes(key))); byte[] valBytes = Bytes.toBytes(val); - Assert.assertTrue("bytes for vals do not match " + valStr + " " - + Bytes.toString(valBytes), Arrays.equals(Bytes.toBytes(valStr), valBytes)); + assertArrayEquals(Bytes.toBytes(valStr), valBytes, + "bytes for vals do not match " + valStr + " " + Bytes.toString(valBytes)); if (!scanner.next()) { break; } } - Assert.assertEquals(i, start + n - 1); + assertEquals(i, start + n - 1); return (start + n); } diff --git a/hudi-common/src/test/java/org/apache/hudi/common/fs/inline/TestInLineFileSystem.java b/hudi-common/src/test/java/org/apache/hudi/common/fs/inline/TestInLineFileSystem.java index a3ba38444..d2adbb360 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/fs/inline/TestInLineFileSystem.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/fs/inline/TestInLineFileSystem.java @@ -25,10 +25,9 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; -import org.junit.After; -import org.junit.Assert; -import org.junit.Ignore; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; import java.io.File; import java.io.FileNotFoundException; @@ -41,6 +40,11 @@ import java.util.List; import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.RANDOM; import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.getRandomOuterFSPath; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests {@link InLineFileSystem}. @@ -55,7 +59,7 @@ public class TestInLineFileSystem { this.listOfGeneratedPaths = new ArrayList<>(); } - @After + @AfterEach public void teardown() throws IOException { for (Path pathToDelete : listOfGeneratedPaths) { File filePath = new File(pathToDelete.toString().substring(pathToDelete.toString().indexOf(':') + 1)); @@ -102,32 +106,32 @@ public class TestInLineFileSystem { Path inlinePath = FileSystemTestUtils.getPhantomFile(outerPath, startOffsetLengthPair.getLeft(), startOffsetLengthPair.getRight()); InLineFileSystem inlineFileSystem = (InLineFileSystem) inlinePath.getFileSystem(conf); FSDataInputStream fsDataInputStream = inlineFileSystem.open(inlinePath); - Assert.assertTrue(inlineFileSystem.exists(inlinePath)); + assertTrue(inlineFileSystem.exists(inlinePath)); verifyFileStatus(expectedFileStatus, inlinePath, startOffsetLengthPair.getRight(), inlineFileSystem.getFileStatus(inlinePath)); FileStatus[] actualFileStatuses = inlineFileSystem.listStatus(inlinePath); - Assert.assertEquals(1, actualFileStatuses.length); + assertEquals(1, actualFileStatuses.length); verifyFileStatus(expectedFileStatus, inlinePath, startOffsetLengthPair.getRight(), actualFileStatuses[0]); byte[] actualBytes = new byte[expectedBytes.length]; fsDataInputStream.readFully(0, actualBytes); - Assert.assertArrayEquals(expectedBytes, actualBytes); + assertArrayEquals(expectedBytes, actualBytes); fsDataInputStream.close(); - Assert.assertEquals(InLineFileSystem.SCHEME, inlineFileSystem.getScheme()); - Assert.assertEquals(URI.create(InLineFileSystem.SCHEME), inlineFileSystem.getUri()); + assertEquals(InLineFileSystem.SCHEME, inlineFileSystem.getScheme()); + assertEquals(URI.create(InLineFileSystem.SCHEME), inlineFileSystem.getUri()); } } @Test - @Ignore // Disabling flaky test for now https://issues.apache.org/jira/browse/HUDI-786 + @Disabled("Disabling flaky test for now https://issues.apache.org/jira/browse/HUDI-786") public void testFileSystemApis() throws IOException { OuterPathInfo outerPathInfo = generateOuterFileAndGetInfo(1000); Path inlinePath = FileSystemTestUtils.getPhantomFile(outerPathInfo.outerPath, outerPathInfo.startOffset, outerPathInfo.length); InLineFileSystem inlineFileSystem = (InLineFileSystem) inlinePath.getFileSystem(conf); - FSDataInputStream fsDataInputStream = inlineFileSystem.open(inlinePath); + final FSDataInputStream fsDataInputStream = inlineFileSystem.open(inlinePath); byte[] actualBytes = new byte[outerPathInfo.expectedBytes.length]; // verify pos - Assert.assertEquals(0 - outerPathInfo.startOffset, fsDataInputStream.getPos()); + assertEquals(0 - outerPathInfo.startOffset, fsDataInputStream.getPos()); fsDataInputStream.readFully(0, actualBytes); - Assert.assertArrayEquals(outerPathInfo.expectedBytes, actualBytes); + assertArrayEquals(outerPathInfo.expectedBytes, actualBytes); // read partial data // test read(long position, byte[] buffer, int offset, int length) @@ -138,13 +142,9 @@ public class TestInLineFileSystem { fsDataInputStream.read(25, actualBytes, 100, 210); verifyArrayEquality(outerPathInfo.expectedBytes, 25, 210, actualBytes, 100, 210); // give length to read > than actual inline content - actualBytes = new byte[1100]; - try { - fsDataInputStream.read(0, actualBytes, 0, 1101); - Assert.fail("Should have thrown IndexOutOfBoundsException"); - } catch (IndexOutOfBoundsException e) { - // no op - } + assertThrows(IndexOutOfBoundsException.class, () -> { + fsDataInputStream.read(0, new byte[1100], 0, 1101); + }, "Should have thrown IndexOutOfBoundsException"); // test readFully(long position, byte[] buffer, int offset, int length) actualBytes = new byte[100]; @@ -154,13 +154,9 @@ public class TestInLineFileSystem { fsDataInputStream.readFully(25, actualBytes, 100, 210); verifyArrayEquality(outerPathInfo.expectedBytes, 25, 210, actualBytes, 100, 210); // give length to read > than actual inline content - actualBytes = new byte[1100]; - try { - fsDataInputStream.readFully(0, actualBytes, 0, 1101); - Assert.fail("Should have thrown IndexOutOfBoundsException"); - } catch (IndexOutOfBoundsException e) { - // no op - } + assertThrows(IndexOutOfBoundsException.class, () -> { + fsDataInputStream.readFully(0, new byte[1100], 0, 1101); + }, "Should have thrown IndexOutOfBoundsException"); // test readFully(long position, byte[] buffer) actualBytes = new byte[100]; @@ -185,28 +181,20 @@ public class TestInLineFileSystem { */ // test read(ByteBuffer buf) ByteBuffer actualByteBuffer = ByteBuffer.allocate(100); - try { + assertThrows(UnsupportedOperationException.class, () -> { fsDataInputStream.read(actualByteBuffer); - Assert.fail("Should have thrown"); - } catch (UnsupportedOperationException e) { - // ignore - } + }, "Should have thrown"); - Assert.assertEquals(outerPathInfo.outerPath.getFileSystem(conf).open(outerPathInfo.outerPath).getFileDescriptor(), fsDataInputStream.getFileDescriptor()); + assertEquals(outerPathInfo.outerPath.getFileSystem(conf).open(outerPathInfo.outerPath).getFileDescriptor(), + fsDataInputStream.getFileDescriptor()); - try { + assertThrows(UnsupportedOperationException.class, () -> { fsDataInputStream.setReadahead(10L); - Assert.fail("Should have thrown exception"); - } catch (UnsupportedOperationException e) { - // ignore - } + }, "Should have thrown exception"); - try { + assertThrows(UnsupportedOperationException.class, () -> { fsDataInputStream.setDropBehind(true); - Assert.fail("Should have thrown exception"); - } catch (UnsupportedOperationException e) { - // ignore - } + }, "Should have thrown exception"); // yet to test // read(ByteBufferPool bufferPool, int maxLength, EnumSet opts) @@ -218,7 +206,8 @@ public class TestInLineFileSystem { private void verifyArrayEquality(byte[] expected, int expectedOffset, int expectedLength, byte[] actual, int actualOffset, int actualLength) { - Assert.assertArrayEquals(Arrays.copyOfRange(expected, expectedOffset, expectedOffset + expectedLength), Arrays.copyOfRange(actual, actualOffset, actualOffset + actualLength)); + assertArrayEquals(Arrays.copyOfRange(expected, expectedOffset, expectedOffset + expectedLength), + Arrays.copyOfRange(actual, actualOffset, actualOffset + actualLength)); } private OuterPathInfo generateOuterFileAndGetInfo(int inlineContentSize) throws IOException { @@ -251,84 +240,63 @@ public class TestInLineFileSystem { public void testOpen() throws IOException { Path inlinePath = getRandomInlinePath(); // open non existant path - try { + assertThrows(FileNotFoundException.class, () -> { inlinePath.getFileSystem(conf).open(inlinePath); - Assert.fail("Should have thrown exception"); - } catch (FileNotFoundException e) { - // ignore - } + }, "Should have thrown exception"); } @Test public void testCreate() throws IOException { Path inlinePath = getRandomInlinePath(); - try { + assertThrows(UnsupportedOperationException.class, () -> { inlinePath.getFileSystem(conf).create(inlinePath, true); - Assert.fail("Should have thrown exception"); - } catch (UnsupportedOperationException e) { - // ignore - } + }, "Should have thrown exception"); } @Test public void testAppend() throws IOException { Path inlinePath = getRandomInlinePath(); - try { + assertThrows(UnsupportedOperationException.class, () -> { inlinePath.getFileSystem(conf).append(inlinePath); - Assert.fail("Should have thrown exception"); - } catch (UnsupportedOperationException e) { - // ignore - } + }, "Should have thrown exception"); } @Test public void testRename() throws IOException { Path inlinePath = getRandomInlinePath(); - try { + assertThrows(UnsupportedOperationException.class, () -> { inlinePath.getFileSystem(conf).rename(inlinePath, inlinePath); - Assert.fail("Should have thrown exception"); - } catch (UnsupportedOperationException e) { - // ignore - } + }, "Should have thrown exception"); } @Test public void testDelete() throws IOException { Path inlinePath = getRandomInlinePath(); - try { + assertThrows(UnsupportedOperationException.class, () -> { inlinePath.getFileSystem(conf).delete(inlinePath, true); - Assert.fail("Should have thrown exception"); - } catch (UnsupportedOperationException e) { - // ignore - } + }, "Should have thrown exception"); } @Test public void testgetWorkingDir() throws IOException { Path inlinePath = getRandomInlinePath(); - try { + assertThrows(UnsupportedOperationException.class, () -> { inlinePath.getFileSystem(conf).getWorkingDirectory(); - Assert.fail("Should have thrown exception"); - } catch (UnsupportedOperationException e) { - // ignore - } + }, "Should have thrown exception"); } @Test public void testsetWorkingDirectory() throws IOException { Path inlinePath = getRandomInlinePath(); - try { + assertThrows(UnsupportedOperationException.class, () -> { inlinePath.getFileSystem(conf).setWorkingDirectory(inlinePath); - Assert.fail("Should have thrown exception"); - } catch (UnsupportedOperationException e) { - // ignore - } + }, "Should have thrown exception"); } @Test public void testExists() throws IOException { Path inlinePath = getRandomInlinePath(); - Assert.assertFalse(inlinePath.getFileSystem(conf).exists(inlinePath)); + assertFalse(inlinePath.getFileSystem(conf).exists(inlinePath)); } private Path getRandomInlinePath() { @@ -338,15 +306,15 @@ public class TestInLineFileSystem { } private void verifyFileStatus(FileStatus expected, Path inlinePath, long expectedLength, FileStatus actual) { - Assert.assertEquals(inlinePath, actual.getPath()); - Assert.assertEquals(expectedLength, actual.getLen()); - Assert.assertEquals(expected.getAccessTime(), actual.getAccessTime()); - Assert.assertEquals(expected.getBlockSize(), actual.getBlockSize()); - Assert.assertEquals(expected.getGroup(), actual.getGroup()); - Assert.assertEquals(expected.getModificationTime(), actual.getModificationTime()); - Assert.assertEquals(expected.getOwner(), actual.getOwner()); - Assert.assertEquals(expected.getPermission(), actual.getPermission()); - Assert.assertEquals(expected.getReplication(), actual.getReplication()); + assertEquals(inlinePath, actual.getPath()); + assertEquals(expectedLength, actual.getLen()); + assertEquals(expected.getAccessTime(), actual.getAccessTime()); + assertEquals(expected.getBlockSize(), actual.getBlockSize()); + assertEquals(expected.getGroup(), actual.getGroup()); + assertEquals(expected.getModificationTime(), actual.getModificationTime()); + assertEquals(expected.getOwner(), actual.getOwner()); + assertEquals(expected.getPermission(), actual.getPermission()); + assertEquals(expected.getReplication(), actual.getReplication()); } class OuterPathInfo { diff --git a/hudi-common/src/test/java/org/apache/hudi/common/fs/inline/TestInMemoryFileSystem.java b/hudi-common/src/test/java/org/apache/hudi/common/fs/inline/TestInMemoryFileSystem.java index 4e3e8892c..260607408 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/fs/inline/TestInMemoryFileSystem.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/fs/inline/TestInMemoryFileSystem.java @@ -21,14 +21,17 @@ package org.apache.hudi.common.fs.inline; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.net.URI; import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.RANDOM; import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.getRandomOuterInMemPath; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; /** * Unit tests {@link InMemoryFileSystem}. @@ -53,93 +56,75 @@ public class TestInMemoryFileSystem { out.close(); InMemoryFileSystem inMemoryFileSystem = (InMemoryFileSystem) outerInMemFSPath.getFileSystem(conf); byte[] bytesRead = inMemoryFileSystem.getFileAsBytes(); - Assert.assertArrayEquals(randomBytes, bytesRead); - Assert.assertEquals(InMemoryFileSystem.SCHEME, inMemoryFileSystem.getScheme()); - Assert.assertEquals(URI.create(outerInMemFSPath.toString()), inMemoryFileSystem.getUri()); + assertArrayEquals(randomBytes, bytesRead); + assertEquals(InMemoryFileSystem.SCHEME, inMemoryFileSystem.getScheme()); + assertEquals(URI.create(outerInMemFSPath.toString()), inMemoryFileSystem.getUri()); } @Test public void testOpen() throws IOException { Path outerInMemFSPath = getRandomOuterInMemPath(); - Assert.assertNull(outerInMemFSPath.getFileSystem(conf).open(outerInMemFSPath)); + assertNull(outerInMemFSPath.getFileSystem(conf).open(outerInMemFSPath)); } @Test public void testAppend() throws IOException { Path outerInMemFSPath = getRandomOuterInMemPath(); - Assert.assertNull(outerInMemFSPath.getFileSystem(conf).append(outerInMemFSPath)); + assertNull(outerInMemFSPath.getFileSystem(conf).append(outerInMemFSPath)); } @Test public void testRename() throws IOException { Path outerInMemFSPath = getRandomOuterInMemPath(); - try { + assertThrows(UnsupportedOperationException.class, () -> { outerInMemFSPath.getFileSystem(conf).rename(outerInMemFSPath, outerInMemFSPath); - Assert.fail("Should have thrown exception"); - } catch (UnsupportedOperationException e) { - // ignore - } + }, "Should have thrown exception"); } @Test public void testDelete() throws IOException { Path outerInMemFSPath = getRandomOuterInMemPath(); - try { + assertThrows(UnsupportedOperationException.class, () -> { outerInMemFSPath.getFileSystem(conf).delete(outerInMemFSPath, true); - Assert.fail("Should have thrown exception"); - } catch (UnsupportedOperationException e) { - // ignore - } + }, "Should have thrown exception"); } @Test public void testgetWorkingDir() throws IOException { Path outerInMemFSPath = getRandomOuterInMemPath(); - Assert.assertNull(outerInMemFSPath.getFileSystem(conf).getWorkingDirectory()); + assertNull(outerInMemFSPath.getFileSystem(conf).getWorkingDirectory()); } @Test public void testsetWorkingDirectory() throws IOException { Path outerInMemFSPath = getRandomOuterInMemPath(); - try { + assertThrows(UnsupportedOperationException.class, () -> { outerInMemFSPath.getFileSystem(conf).setWorkingDirectory(outerInMemFSPath); - Assert.fail("Should have thrown exception"); - } catch (UnsupportedOperationException e) { - // ignore - } + }, "Should have thrown exception"); } @Test public void testExists() throws IOException { Path outerInMemFSPath = getRandomOuterInMemPath(); - try { + assertThrows(UnsupportedOperationException.class, () -> { outerInMemFSPath.getFileSystem(conf).exists(outerInMemFSPath); - Assert.fail("Should have thrown exception"); - } catch (UnsupportedOperationException e) { - // ignore - } + }, "Should have thrown exception"); } @Test public void testFileStatus() throws IOException { Path outerInMemFSPath = getRandomOuterInMemPath(); - try { + assertThrows(UnsupportedOperationException.class, () -> { outerInMemFSPath.getFileSystem(conf).getFileStatus(outerInMemFSPath); - Assert.fail("Should have thrown exception"); - } catch (UnsupportedOperationException e) { - // ignore - } + }, "Should have thrown exception"); } @Test public void testListStatus() throws IOException { Path outerInMemFSPath = getRandomOuterInMemPath(); - try { + assertThrows(UnsupportedOperationException.class, () -> { outerInMemFSPath.getFileSystem(conf).listStatus(outerInMemFSPath); - Assert.fail("Should have thrown exception"); - } catch (UnsupportedOperationException e) { - // ignore - } + }, "Should have thrown exception"); } } diff --git a/hudi-common/src/test/java/org/apache/hudi/common/model/HoodieTestUtils.java b/hudi-common/src/test/java/org/apache/hudi/common/model/HoodieTestUtils.java index 060b5a3e9..945b8103b 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/model/HoodieTestUtils.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/model/HoodieTestUtils.java @@ -80,8 +80,8 @@ import java.util.UUID; import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; /** * A utility class for testing. @@ -352,7 +352,7 @@ public class HoodieTestUtils { Iterator iter1 = expected.iterator(); Iterator iter2 = actual.iterator(); while (iter1.hasNext() && iter2.hasNext()) { - assertEquals(message, iter1.next(), iter2.next()); + assertEquals(iter1.next(), iter2.next(), message); } assert !iter1.hasNext() && !iter2.hasNext(); } diff --git a/hudi-common/src/test/java/org/apache/hudi/common/model/TestHoodieCommitMetadata.java b/hudi-common/src/test/java/org/apache/hudi/common/model/TestHoodieCommitMetadata.java index c845f330e..844359f3e 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/model/TestHoodieCommitMetadata.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/model/TestHoodieCommitMetadata.java @@ -20,11 +20,14 @@ package org.apache.hudi.common.model; import org.apache.hudi.common.util.FileIOUtils; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.List; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; + /** * Tests hoodie commit metadata {@link HoodieCommitMetadata}. */ @@ -36,17 +39,17 @@ public class TestHoodieCommitMetadata { List fakeHoodieWriteStats = HoodieTestUtils.generateFakeHoodieWriteStat(100); HoodieCommitMetadata commitMetadata = new HoodieCommitMetadata(); fakeHoodieWriteStats.forEach(stat -> commitMetadata.addWriteStat(stat.getPartitionPath(), stat)); - Assert.assertTrue(commitMetadata.getTotalCreateTime() > 0); - Assert.assertTrue(commitMetadata.getTotalUpsertTime() > 0); - Assert.assertTrue(commitMetadata.getTotalScanTime() > 0); - Assert.assertTrue(commitMetadata.getTotalLogFilesCompacted() > 0); + assertTrue(commitMetadata.getTotalCreateTime() > 0); + assertTrue(commitMetadata.getTotalUpsertTime() > 0); + assertTrue(commitMetadata.getTotalScanTime() > 0); + assertTrue(commitMetadata.getTotalLogFilesCompacted() > 0); String serializedCommitMetadata = commitMetadata.toJsonString(); HoodieCommitMetadata metadata = HoodieCommitMetadata.fromJsonString(serializedCommitMetadata, HoodieCommitMetadata.class); // Make sure timing metrics are not written to instant file - Assert.assertEquals(0, (long) metadata.getTotalScanTime()); - Assert.assertTrue(metadata.getTotalLogFilesCompacted() > 0); + assertEquals(0, (long) metadata.getTotalScanTime()); + assertTrue(metadata.getTotalLogFilesCompacted() > 0); } @Test @@ -56,17 +59,17 @@ public class TestHoodieCommitMetadata { FileIOUtils.readAsUTFString(TestHoodieCommitMetadata.class.getResourceAsStream("/old-version.commit")); HoodieCommitMetadata metadata = HoodieCommitMetadata.fromJsonString(serializedCommitMetadata, HoodieCommitMetadata.class); - Assert.assertTrue(metadata.getOperationType() == WriteOperationType.UNKNOWN); + assertSame(metadata.getOperationType(), WriteOperationType.UNKNOWN); // test operate type HoodieCommitMetadata commitMetadata = new HoodieCommitMetadata(); commitMetadata.setOperationType(WriteOperationType.INSERT); - Assert.assertTrue(commitMetadata.getOperationType() == WriteOperationType.INSERT); + assertSame(commitMetadata.getOperationType(), WriteOperationType.INSERT); // test serialized serializedCommitMetadata = commitMetadata.toJsonString(); metadata = - HoodieCommitMetadata.fromJsonString(serializedCommitMetadata, HoodieCommitMetadata.class); - Assert.assertTrue(metadata.getOperationType() == WriteOperationType.INSERT); + HoodieCommitMetadata.fromJsonString(serializedCommitMetadata, HoodieCommitMetadata.class); + assertSame(metadata.getOperationType(), WriteOperationType.INSERT); } } diff --git a/hudi-common/src/test/java/org/apache/hudi/common/model/TestHoodieRecord.java b/hudi-common/src/test/java/org/apache/hudi/common/model/TestHoodieRecord.java index 7dd6da748..6fbe9a545 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/model/TestHoodieRecord.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/model/TestHoodieRecord.java @@ -23,15 +23,14 @@ import org.apache.hudi.common.util.SchemaTestUtil; import org.apache.avro.generic.GenericRecord; import org.apache.avro.generic.IndexedRecord; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import java.util.List; import java.util.UUID; import java.util.stream.Collectors; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertThrows; /** * Tests for {@link HoodieRecord}. @@ -40,7 +39,7 @@ public class TestHoodieRecord { private HoodieRecord hoodieRecord; - @Before + @BeforeEach public void setUp() throws Exception { final List indexedRecords = SchemaTestUtil.generateHoodieTestRecords(0, 1); final List hoodieRecords = @@ -53,12 +52,9 @@ public class TestHoodieRecord { public void testModificationAfterSeal() { hoodieRecord.seal(); final HoodieRecordLocation location = new HoodieRecordLocation("100", "0"); - try { + assertThrows(UnsupportedOperationException.class, () -> { hoodieRecord.setCurrentLocation(location); - fail("should fail since modification after sealed is not allowed"); - } catch (Exception e) { - Assert.assertTrue(e instanceof UnsupportedOperationException); - } + }, "should fail since modification after sealed is not allowed"); } @Test diff --git a/hudi-common/src/test/java/org/apache/hudi/common/model/TestHoodieWriteStat.java b/hudi-common/src/test/java/org/apache/hudi/common/model/TestHoodieWriteStat.java index 2f5daa690..a01effa6f 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/model/TestHoodieWriteStat.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/model/TestHoodieWriteStat.java @@ -21,14 +21,14 @@ package org.apache.hudi.common.model; import org.apache.hudi.common.fs.FSUtils; import org.apache.hadoop.fs.Path; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.text.SimpleDateFormat; import java.util.Date; import java.util.UUID; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; /** * Tests hoodie write stat {@link HoodieWriteStat}. diff --git a/hudi-common/src/test/java/org/apache/hudi/common/storage/TestStorageSchemes.java b/hudi-common/src/test/java/org/apache/hudi/common/storage/TestStorageSchemes.java index 4c7d21e30..22507c857 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/storage/TestStorageSchemes.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/storage/TestStorageSchemes.java @@ -20,11 +20,11 @@ package org.apache.hudi.common.storage; import org.apache.hudi.common.fs.StorageSchemes; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests {@link StorageSchemes}. @@ -42,11 +42,8 @@ public class TestStorageSchemes { assertFalse(StorageSchemes.isAppendSupported("abfs")); assertFalse(StorageSchemes.isAppendSupported("oss")); assertTrue(StorageSchemes.isAppendSupported("viewfs")); - try { + assertThrows(IllegalArgumentException.class, () -> { StorageSchemes.isAppendSupported("s2"); - fail("Should throw exception for unsupported schemes"); - } catch (IllegalArgumentException ignore) { - // expected. - } + }, "Should throw exception for unsupported schemes"); } } diff --git a/hudi-common/src/test/java/org/apache/hudi/common/table/TestHoodieTableMetaClient.java b/hudi-common/src/test/java/org/apache/hudi/common/table/TestHoodieTableMetaClient.java index 63653102b..88d356715 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/table/TestHoodieTableMetaClient.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/table/TestHoodieTableMetaClient.java @@ -22,7 +22,7 @@ import org.apache.hudi.common.model.HoodieTestUtils; import org.apache.hudi.common.table.timeline.HoodieActiveTimeline; import org.apache.hudi.common.table.timeline.HoodieInstant; import org.apache.hudi.common.table.timeline.HoodieTimeline; -import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5; +import org.apache.hudi.common.testutils.HoodieCommonTestHarness; import org.apache.hudi.common.util.Option; import org.junit.jupiter.api.BeforeEach; @@ -41,7 +41,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests hoodie table meta client {@link HoodieTableMetaClient}. */ -public class TestHoodieTableMetaClient extends HoodieCommonTestHarnessJunit5 { +public class TestHoodieTableMetaClient extends HoodieCommonTestHarness { @BeforeEach public void init() throws IOException { diff --git a/hudi-common/src/test/java/org/apache/hudi/common/table/log/TestHoodieLogFormat.java b/hudi-common/src/test/java/org/apache/hudi/common/table/log/TestHoodieLogFormat.java index 3cc251c19..68f755827 100755 --- a/hudi-common/src/test/java/org/apache/hudi/common/table/log/TestHoodieLogFormat.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/table/log/TestHoodieLogFormat.java @@ -19,7 +19,6 @@ package org.apache.hudi.common.table.log; import org.apache.hudi.avro.HoodieAvroUtils; -import org.apache.hudi.common.HoodieCommonTestHarness; import org.apache.hudi.common.fs.FSUtils; import org.apache.hudi.common.minicluster.MiniClusterUtil; import org.apache.hudi.common.model.HoodieArchivedLogFile; @@ -36,6 +35,7 @@ import org.apache.hudi.common.table.log.block.HoodieDeleteBlock; import org.apache.hudi.common.table.log.block.HoodieLogBlock; import org.apache.hudi.common.table.log.block.HoodieLogBlock.HeaderMetadataType; import org.apache.hudi.common.table.log.block.HoodieLogBlock.HoodieLogBlockType; +import org.apache.hudi.common.testutils.HoodieCommonTestHarness; import org.apache.hudi.common.util.SchemaTestUtil; import org.apache.hudi.exception.CorruptedLogFileException; @@ -46,19 +46,18 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import java.io.IOException; import java.io.UncheckedIOException; import java.net.URISyntaxException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -69,56 +68,45 @@ import java.util.Set; import java.util.stream.Collectors; import static org.apache.hudi.common.util.SchemaTestUtil.getSimpleSchema; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests hoodie log format {@link HoodieLogFormat}. */ @SuppressWarnings("Duplicates") -@RunWith(Parameterized.class) public class TestHoodieLogFormat extends HoodieCommonTestHarness { private static String BASE_OUTPUT_PATH = "/tmp/"; private FileSystem fs; private Path partitionPath; private int bufferSize = 4096; - private Boolean readBlocksLazily; - public TestHoodieLogFormat(Boolean readBlocksLazily) { - this.readBlocksLazily = readBlocksLazily; - } - - @Parameterized.Parameters(name = "LogBlockReadMode") - public static Collection data() { - return Arrays.asList(new Boolean[][] {{true}, {false}}); - } - - @BeforeClass + @BeforeAll public static void setUpClass() throws IOException, InterruptedException { // Append is not supported in LocalFileSystem. HDFS needs to be setup. MiniClusterUtil.setUp(); } - @AfterClass + @AfterAll public static void tearDownClass() { MiniClusterUtil.shutdown(); } - @Before + @BeforeEach public void setUp() throws IOException, InterruptedException { this.fs = MiniClusterUtil.fileSystem; - assertTrue(fs.mkdirs(new Path(folder.getRoot().getPath()))); - this.partitionPath = new Path(folder.getRoot().getPath()); - this.basePath = folder.getRoot().getParent(); + assertTrue(fs.mkdirs(new Path(tempDir.toAbsolutePath().toString()))); + this.partitionPath = new Path(tempDir.toAbsolutePath().toString()); + this.basePath = tempDir.getParent().toString(); HoodieTestUtils.init(MiniClusterUtil.configuration, basePath, HoodieTableType.MERGE_ON_READ); } - @After + @AfterEach public void tearDown() throws IOException { fs.delete(partitionPath, true); } @@ -128,9 +116,9 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { Writer writer = HoodieLogFormat.newWriterBuilder().onParentPath(partitionPath).withFileExtension(HoodieLogFile.DELTA_EXTENSION) .withFileId("test-fileid1").overBaseCommit("100").withFs(fs).build(); - assertEquals("Just created this log, size should be 0", 0, writer.getCurrentSize()); - assertTrue("Check all log files should start with a .", writer.getLogFile().getFileName().startsWith(".")); - assertEquals("Version should be 1 for new log created", 1, writer.getLogFile().getLogVersion()); + assertEquals(0, writer.getCurrentSize(), "Just created this log, size should be 0"); + assertTrue(writer.getLogFile().getFileName().startsWith("."), "Check all log files should start with a ."); + assertEquals(1, writer.getLogFile().getLogVersion(), "Version should be 1 for new log created"); } @Test @@ -145,9 +133,9 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { HoodieAvroDataBlock dataBlock = new HoodieAvroDataBlock(records, header); writer = writer.appendBlock(dataBlock); long size = writer.getCurrentSize(); - assertTrue("We just wrote a block - size should be > 0", size > 0); - assertEquals("Write should be auto-flushed. The size reported by FileStatus and the writer should match", size, - fs.getFileStatus(writer.getLogFile().getPath()).getLen()); + assertTrue(size > 0, "We just wrote a block - size should be > 0"); + assertEquals(size, fs.getFileStatus(writer.getLogFile().getPath()).getLen(), + "Write should be auto-flushed. The size reported by FileStatus and the writer should match"); writer.close(); } @@ -175,8 +163,8 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { header.put(HoodieLogBlock.HeaderMetadataType.SCHEMA, getSimpleSchema().toString()); dataBlock = new HoodieAvroDataBlock(records, header); writer = writer.appendBlock(dataBlock); - assertEquals("This should be a new log file and hence size should be 0", 0, writer.getCurrentSize()); - assertEquals("Version should be rolled to 2", 2, writer.getLogFile().getLogVersion()); + assertEquals(0, writer.getCurrentSize(), "This should be a new log file and hence size should be 0"); + assertEquals(2, writer.getLogFile().getLogVersion(), "Version should be rolled to 2"); writer.close(); } @@ -232,7 +220,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { writer.close(); writer2.close(); assertNotNull(logFile1.getLogWriteToken()); - assertEquals("Log Files must have different versions", logFile1.getLogVersion(), logFile2.getLogVersion() - 1); + assertEquals(logFile1.getLogVersion(), logFile2.getLogVersion() - 1, "Log Files must have different versions"); } @Test @@ -257,9 +245,9 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { dataBlock = new HoodieAvroDataBlock(records, header); writer = writer.appendBlock(dataBlock); long size2 = writer.getCurrentSize(); - assertTrue("We just wrote a new block - size2 should be > size1", size2 > size1); - assertEquals("Write should be auto-flushed. The size reported by FileStatus and the writer should match", size2, - fs.getFileStatus(writer.getLogFile().getPath()).getLen()); + assertTrue(size2 > size1, "We just wrote a new block - size2 should be > size1"); + assertEquals(size2, fs.getFileStatus(writer.getLogFile().getPath()).getLen(), + "Write should be auto-flushed. The size reported by FileStatus and the writer should match"); writer.close(); // Close and Open again and append 100 more records @@ -271,18 +259,16 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { dataBlock = new HoodieAvroDataBlock(records, header); writer = writer.appendBlock(dataBlock); long size3 = writer.getCurrentSize(); - assertTrue("We just wrote a new block - size3 should be > size2", size3 > size2); - assertEquals("Write should be auto-flushed. The size reported by FileStatus and the writer should match", size3, - fs.getFileStatus(writer.getLogFile().getPath()).getLen()); + assertTrue(size3 > size2, "We just wrote a new block - size3 should be > size2"); + assertEquals(size3, fs.getFileStatus(writer.getLogFile().getPath()).getLen(), + "Write should be auto-flushed. The size reported by FileStatus and the writer should match"); writer.close(); // Cannot get the current size after closing the log - try { - writer.getCurrentSize(); - fail("getCurrentSize should fail after the logAppender is closed"); - } catch (IllegalStateException e) { - // pass - } + final Writer closedWriter = writer; + assertThrows(IllegalStateException.class, () -> { + closedWriter.getCurrentSize(); + }, "getCurrentSize should fail after the logAppender is closed"); } /* @@ -354,14 +340,14 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { writer.close(); Reader reader = HoodieLogFormat.newReader(fs, writer.getLogFile(), SchemaTestUtil.getSimpleSchema()); - assertTrue("We wrote a block, we should be able to read it", reader.hasNext()); + assertTrue(reader.hasNext(), "We wrote a block, we should be able to read it"); HoodieLogBlock nextBlock = reader.next(); - assertEquals("The next block should be a data block", HoodieLogBlockType.AVRO_DATA_BLOCK, nextBlock.getBlockType()); + assertEquals(HoodieLogBlockType.AVRO_DATA_BLOCK, nextBlock.getBlockType(), "The next block should be a data block"); HoodieAvroDataBlock dataBlockRead = (HoodieAvroDataBlock) nextBlock; - assertEquals("Read records size should be equal to the written records size", copyOfRecords.size(), - dataBlockRead.getRecords().size()); - assertEquals("Both records lists should be the same. (ordering guaranteed)", copyOfRecords, - dataBlockRead.getRecords()); + assertEquals(copyOfRecords.size(), dataBlockRead.getRecords().size(), + "Read records size should be equal to the written records size"); + assertEquals(copyOfRecords, dataBlockRead.getRecords(), + "Both records lists should be the same. (ordering guaranteed)"); reader.close(); } @@ -405,35 +391,37 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { writer.close(); Reader reader = HoodieLogFormat.newReader(fs, writer.getLogFile(), SchemaTestUtil.getSimpleSchema()); - assertTrue("First block should be available", reader.hasNext()); + assertTrue(reader.hasNext(), "First block should be available"); HoodieLogBlock nextBlock = reader.next(); HoodieAvroDataBlock dataBlockRead = (HoodieAvroDataBlock) nextBlock; - assertEquals("Read records size should be equal to the written records size", copyOfRecords1.size(), - dataBlockRead.getRecords().size()); - assertEquals("Both records lists should be the same. (ordering guaranteed)", copyOfRecords1, - dataBlockRead.getRecords()); + assertEquals(copyOfRecords1.size(), dataBlockRead.getRecords().size(), + "Read records size should be equal to the written records size"); + assertEquals(copyOfRecords1, dataBlockRead.getRecords(), + "Both records lists should be the same. (ordering guaranteed)"); assertEquals(dataBlockRead.getSchema(), getSimpleSchema()); reader.hasNext(); nextBlock = reader.next(); dataBlockRead = (HoodieAvroDataBlock) nextBlock; - assertEquals("Read records size should be equal to the written records size", copyOfRecords2.size(), - dataBlockRead.getRecords().size()); - assertEquals("Both records lists should be the same. (ordering guaranteed)", copyOfRecords2, - dataBlockRead.getRecords()); + assertEquals(copyOfRecords2.size(), dataBlockRead.getRecords().size(), + "Read records size should be equal to the written records size"); + assertEquals(copyOfRecords2, dataBlockRead.getRecords(), + "Both records lists should be the same. (ordering guaranteed)"); reader.hasNext(); nextBlock = reader.next(); dataBlockRead = (HoodieAvroDataBlock) nextBlock; - assertEquals("Read records size should be equal to the written records size", copyOfRecords3.size(), - dataBlockRead.getRecords().size()); - assertEquals("Both records lists should be the same. (ordering guaranteed)", copyOfRecords3, - dataBlockRead.getRecords()); + assertEquals(copyOfRecords3.size(), dataBlockRead.getRecords().size(), + "Read records size should be equal to the written records size"); + assertEquals(copyOfRecords3, dataBlockRead.getRecords(), + "Both records lists should be the same. (ordering guaranteed)"); reader.close(); } - @Test - public void testBasicAppendAndScanMultipleFiles() throws IOException, URISyntaxException, InterruptedException { + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testBasicAppendAndScanMultipleFiles(boolean readBlocksLazily) + throws IOException, URISyntaxException, InterruptedException { Writer writer = HoodieLogFormat.newWriterBuilder().onParentPath(partitionPath).withFileExtension(HoodieLogFile.DELTA_EXTENSION) .withSizeThreshold(1024).withFileId("test-fileid1").overBaseCommit("100").withFs(fs).build(); @@ -467,8 +455,8 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { scannedRecords.add((IndexedRecord) record.getData().getInsertValue(schema).get()); } - assertEquals("Scanner records count should be the same as appended records", scannedRecords.size(), - allRecords.stream().mapToLong(Collection::size).sum()); + assertEquals(scannedRecords.size(), allRecords.stream().mapToLong(Collection::size).sum(), + "Scanner records count should be the same as appended records"); } @@ -511,17 +499,16 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { writer = writer.appendBlock(dataBlock); writer.close(); - // First round of reads - we should be able to read the first block and then EOF Reader reader = HoodieLogFormat.newReader(fs, writer.getLogFile(), SchemaTestUtil.getSimpleSchema()); - assertTrue("First block should be available", reader.hasNext()); + assertTrue(reader.hasNext(), "First block should be available"); reader.next(); - assertTrue("We should have corrupted block next", reader.hasNext()); + assertTrue(reader.hasNext(), "We should have corrupted block next"); HoodieLogBlock block = reader.next(); - assertEquals("The read block should be a corrupt block", HoodieLogBlockType.CORRUPT_BLOCK, block.getBlockType()); - assertTrue("Third block should be available", reader.hasNext()); + assertEquals(HoodieLogBlockType.CORRUPT_BLOCK, block.getBlockType(), "The read block should be a corrupt block"); + assertTrue(reader.hasNext(), "Third block should be available"); reader.next(); - assertFalse("There should be no more block left", reader.hasNext()); + assertFalse(reader.hasNext(), "There should be no more block left"); reader.close(); @@ -552,23 +539,25 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { // Second round of reads - we should be able to read the first and last block reader = HoodieLogFormat.newReader(fs, writer.getLogFile(), SchemaTestUtil.getSimpleSchema()); - assertTrue("First block should be available", reader.hasNext()); + assertTrue(reader.hasNext(), "First block should be available"); reader.next(); - assertTrue("We should get the 1st corrupted block next", reader.hasNext()); + assertTrue(reader.hasNext(), "We should get the 1st corrupted block next"); reader.next(); - assertTrue("Third block should be available", reader.hasNext()); + assertTrue(reader.hasNext(), "Third block should be available"); reader.next(); - assertTrue("We should get the 2nd corrupted block next", reader.hasNext()); + assertTrue(reader.hasNext(), "We should get the 2nd corrupted block next"); block = reader.next(); - assertEquals("The read block should be a corrupt block", HoodieLogBlockType.CORRUPT_BLOCK, block.getBlockType()); - assertTrue("We should get the last block next", reader.hasNext()); + assertEquals(HoodieLogBlockType.CORRUPT_BLOCK, block.getBlockType(), "The read block should be a corrupt block"); + assertTrue(reader.hasNext(), "We should get the last block next"); reader.next(); - assertFalse("We should have no more blocks left", reader.hasNext()); + assertFalse(reader.hasNext(), "We should have no more blocks left"); reader.close(); } - @Test - public void testAvroLogRecordReaderBasic() throws IOException, URISyntaxException, InterruptedException { + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testAvroLogRecordReaderBasic(boolean readBlocksLazily) + throws IOException, URISyntaxException, InterruptedException { Schema schema = HoodieAvroUtils.addMetadataFields(getSimpleSchema()); // Set a small threshold so that every block is a new version Writer writer = @@ -600,19 +589,20 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "100", 10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH); - assertEquals("", 200, scanner.getTotalLogRecords()); + assertEquals(200, scanner.getTotalLogRecords()); Set readKeys = new HashSet<>(200); scanner.forEach(s -> readKeys.add(s.getKey().getRecordKey())); - assertEquals("Stream collect should return all 200 records", 200, readKeys.size()); + assertEquals(200, readKeys.size(), "Stream collect should return all 200 records"); copyOfRecords1.addAll(copyOfRecords2); Set originalKeys = copyOfRecords1.stream().map(s -> ((GenericRecord) s).get(HoodieRecord.RECORD_KEY_METADATA_FIELD).toString()) .collect(Collectors.toSet()); - assertEquals("CompositeAvroLogReader should return 200 records from 2 versions", originalKeys, readKeys); + assertEquals(originalKeys, readKeys, "CompositeAvroLogReader should return 200 records from 2 versions"); } - @Test - public void testAvroLogRecordReaderWithRollbackTombstone() + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testAvroLogRecordReaderWithRollbackTombstone(boolean readBlocksLazily) throws IOException, URISyntaxException, InterruptedException { Schema schema = HoodieAvroUtils.addMetadataFields(getSimpleSchema()); // Set a small threshold so that every block is a new version @@ -661,15 +651,15 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "102", 10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH); - assertEquals("We read 200 records from 2 write batches", 200, scanner.getTotalLogRecords()); + assertEquals(200, scanner.getTotalLogRecords(), "We read 200 records from 2 write batches"); Set readKeys = new HashSet<>(200); scanner.forEach(s -> readKeys.add(s.getKey().getRecordKey())); - assertEquals("Stream collect should return all 200 records", 200, readKeys.size()); + assertEquals(200, readKeys.size(), "Stream collect should return all 200 records"); copyOfRecords1.addAll(copyOfRecords3); Set originalKeys = copyOfRecords1.stream().map(s -> ((GenericRecord) s).get(HoodieRecord.RECORD_KEY_METADATA_FIELD).toString()) .collect(Collectors.toSet()); - assertEquals("CompositeAvroLogReader should return 200 records from 2 versions", originalKeys, readKeys); + assertEquals(originalKeys, readKeys, "CompositeAvroLogReader should return 200 records from 2 versions"); } @Test @@ -740,19 +730,20 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "103", 10240L, true, false, bufferSize, BASE_OUTPUT_PATH); - assertEquals("We would read 200 records", 200, scanner.getTotalLogRecords()); + assertEquals(200, scanner.getTotalLogRecords(), "We would read 200 records"); Set readKeys = new HashSet<>(200); scanner.forEach(s -> readKeys.add(s.getKey().getRecordKey())); - assertEquals("Stream collect should return all 200 records", 200, readKeys.size()); + assertEquals(200, readKeys.size(), "Stream collect should return all 200 records"); copyOfRecords1.addAll(copyOfRecords3); Set originalKeys = copyOfRecords1.stream().map(s -> ((GenericRecord) s).get(HoodieRecord.RECORD_KEY_METADATA_FIELD).toString()) .collect(Collectors.toSet()); - assertEquals("CompositeAvroLogReader should return 200 records from 2 versions", originalKeys, readKeys); + assertEquals(originalKeys, readKeys, "CompositeAvroLogReader should return 200 records from 2 versions"); } - @Test - public void testAvroLogRecordReaderWithDeleteAndRollback() + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testAvroLogRecordReaderWithDeleteAndRollback(boolean readBlocksLazily) throws IOException, URISyntaxException, InterruptedException { Schema schema = HoodieAvroUtils.addMetadataFields(getSimpleSchema()); // Set a small threshold so that every block is a new version @@ -799,7 +790,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "102", 10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH); - assertEquals("We still would read 200 records", 200, scanner.getTotalLogRecords()); + assertEquals(200, scanner.getTotalLogRecords(), "We still would read 200 records"); final List readKeys = new ArrayList<>(200); final List emptyPayloads = new ArrayList<>(); scanner.forEach(s -> readKeys.add(s.getKey().getRecordKey())); @@ -812,12 +803,12 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { throw new UncheckedIOException(io); } }); - assertEquals("Stream collect should return all 200 records", 200, readKeys.size()); - assertEquals("Stream collect should return all 50 records with empty payloads", 50, emptyPayloads.size()); + assertEquals(200, readKeys.size(), "Stream collect should return all 200 records"); + assertEquals(50, emptyPayloads.size(), "Stream collect should return all 50 records with empty payloads"); originalKeys.removeAll(deletedKeys); Collections.sort(originalKeys); Collections.sort(readKeys); - assertEquals("CompositeAvroLogReader should return 150 records from 2 versions", originalKeys, readKeys); + assertEquals(originalKeys, readKeys, "CompositeAvroLogReader should return 150 records from 2 versions"); // Rollback the last block header.put(HoodieLogBlock.HeaderMetadataType.INSTANT_TIME, "103"); @@ -831,11 +822,12 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "101", 10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH); scanner.forEach(s -> readKeys.add(s.getKey().getRecordKey())); - assertEquals("Stream collect should return all 200 records after rollback of delete", 200, readKeys.size()); + assertEquals(200, readKeys.size(), "Stream collect should return all 200 records after rollback of delete"); } - @Test - public void testAvroLogRecordReaderWithFailedRollbacks() + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testAvroLogRecordReaderWithFailedRollbacks(boolean readBlocksLazily) throws IOException, URISyntaxException, InterruptedException { // Write a Data block and Delete block with same InstantTime (written in same batch) @@ -894,15 +886,16 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { // all data must be rolled back before merge HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "100", 10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH); - assertEquals("We would have scanned 0 records because of rollback", 0, scanner.getTotalLogRecords()); + assertEquals(0, scanner.getTotalLogRecords(), "We would have scanned 0 records because of rollback"); final List readKeys = new ArrayList<>(); scanner.forEach(s -> readKeys.add(s.getKey().getRecordKey())); - assertEquals("Stream collect should return all 0 records", 0, readKeys.size()); + assertEquals(0, readKeys.size(), "Stream collect should return all 0 records"); } - @Test - public void testAvroLogRecordReaderWithInsertDeleteAndRollback() + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testAvroLogRecordReaderWithInsertDeleteAndRollback(boolean readBlocksLazily) throws IOException, URISyntaxException, InterruptedException { // Write a Data block and Delete block with same InstantTime (written in same batch) @@ -944,11 +937,12 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "100", 10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH); - assertEquals("We would read 0 records", 0, scanner.getTotalLogRecords()); + assertEquals(0, scanner.getTotalLogRecords(), "We would read 0 records"); } - @Test - public void testAvroLogRecordReaderWithInvalidRollback() + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testAvroLogRecordReaderWithInvalidRollback(boolean readBlocksLazily) throws IOException, URISyntaxException, InterruptedException { Schema schema = HoodieAvroUtils.addMetadataFields(getSimpleSchema()); // Set a small threshold so that every block is a new version @@ -977,14 +971,15 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "100", 10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH); - assertEquals("We still would read 100 records", 100, scanner.getTotalLogRecords()); + assertEquals(100, scanner.getTotalLogRecords(), "We still would read 100 records"); final List readKeys = new ArrayList<>(100); scanner.forEach(s -> readKeys.add(s.getKey().getRecordKey())); - assertEquals("Stream collect should return all 150 records", 100, readKeys.size()); + assertEquals(100, readKeys.size(), "Stream collect should return all 150 records"); } - @Test - public void testAvroLogRecordReaderWithInsertsDeleteAndRollback() + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testAvroLogRecordReaderWithInsertsDeleteAndRollback(boolean readBlocksLazily) throws IOException, URISyntaxException, InterruptedException { // Write a 3 Data blocs with same InstantTime (written in same batch) @@ -1029,11 +1024,12 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "101", 10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH); - assertEquals("We would read 0 records", 0, scanner.getTotalLogRecords()); + assertEquals(0, scanner.getTotalLogRecords(), "We would read 0 records"); } - @Test - public void testAvroLogRecordReaderWithMixedInsertsCorruptsAndRollback() + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testAvroLogRecordReaderWithMixedInsertsCorruptsAndRollback(boolean readBlocksLazily) throws IOException, URISyntaxException, InterruptedException { // Write a 3 Data blocs with same InstantTime (written in same batch) @@ -1118,7 +1114,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "101", 10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH); - assertEquals("We would read 0 records", 0, scanner.getTotalLogRecords()); + assertEquals(0, scanner.getTotalLogRecords(), "We would read 0 records"); } /* @@ -1134,7 +1130,8 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { * duplicate data. * */ - private void testAvroLogRecordReaderMergingMultipleLogFiles(int numRecordsInLog1, int numRecordsInLog2) { + private void testAvroLogRecordReaderMergingMultipleLogFiles(int numRecordsInLog1, int numRecordsInLog2, + boolean readBlocksLazily) { try { // Write one Data block with same InstantTime (written in same batch) Schema schema = HoodieAvroUtils.addMetadataFields(getSimpleSchema()); @@ -1175,43 +1172,48 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "100", 10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH); - assertEquals("We would read 100 records", - Math.max(numRecordsInLog1, numRecordsInLog2), scanner.getNumMergedRecordsInLog()); + assertEquals(Math.max(numRecordsInLog1, numRecordsInLog2), scanner.getNumMergedRecordsInLog(), + "We would read 100 records"); } catch (Exception e) { e.printStackTrace(); } } - @Test - public void testAvroLogRecordReaderWithFailedTaskInFirstStageAttempt() { + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testAvroLogRecordReaderWithFailedTaskInFirstStageAttempt(boolean readBlocksLazily) { /* * FIRST_ATTEMPT_FAILED: * Original task from the stage attempt failed, but subsequent stage retry succeeded. */ - testAvroLogRecordReaderMergingMultipleLogFiles(77, 100); + testAvroLogRecordReaderMergingMultipleLogFiles(77, 100, readBlocksLazily); } - @Test - public void testAvroLogRecordReaderWithFailedTaskInSecondStageAttempt() { + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testAvroLogRecordReaderWithFailedTaskInSecondStageAttempt(boolean readBlocksLazily) { /* * SECOND_ATTEMPT_FAILED: * Original task from stage attempt succeeded, but subsequent retry attempt failed. */ - testAvroLogRecordReaderMergingMultipleLogFiles(100, 66); + testAvroLogRecordReaderMergingMultipleLogFiles(100, 66, readBlocksLazily); } - @Test - public void testAvroLogRecordReaderTasksSucceededInBothStageAttempts() { + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testAvroLogRecordReaderTasksSucceededInBothStageAttempts(boolean readBlocksLazily) { /* * BOTH_ATTEMPTS_SUCCEEDED: * Original task from the stage attempt and duplicate task from the stage retry succeeded. */ - testAvroLogRecordReaderMergingMultipleLogFiles(100, 100); + testAvroLogRecordReaderMergingMultipleLogFiles(100, 100, readBlocksLazily); } - @Test - public void testBasicAppendAndReadInReverse() throws IOException, URISyntaxException, InterruptedException { + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testBasicAppendAndReadInReverse(boolean readBlocksLazily) + throws IOException, URISyntaxException, InterruptedException { Writer writer = HoodieLogFormat.newWriterBuilder().onParentPath(partitionPath).withFileExtension(HoodieLogFile.DELTA_EXTENSION) .withFileId("test-fileid1").overBaseCommit("100").withFs(fs).build(); @@ -1250,37 +1252,39 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { HoodieLogFileReader reader = new HoodieLogFileReader(fs, writer.getLogFile(), SchemaTestUtil.getSimpleSchema(), bufferSize, readBlocksLazily, true); - assertTrue("Last block should be available", reader.hasPrev()); + assertTrue(reader.hasPrev(), "Last block should be available"); HoodieLogBlock prevBlock = reader.prev(); HoodieAvroDataBlock dataBlockRead = (HoodieAvroDataBlock) prevBlock; - assertEquals("Third records size should be equal to the written records size", copyOfRecords3.size(), - dataBlockRead.getRecords().size()); - assertEquals("Both records lists should be the same. (ordering guaranteed)", copyOfRecords3, - dataBlockRead.getRecords()); + assertEquals(copyOfRecords3.size(), dataBlockRead.getRecords().size(), + "Third records size should be equal to the written records size"); + assertEquals(copyOfRecords3, dataBlockRead.getRecords(), + "Both records lists should be the same. (ordering guaranteed)"); - assertTrue("Second block should be available", reader.hasPrev()); + assertTrue(reader.hasPrev(), "Second block should be available"); prevBlock = reader.prev(); dataBlockRead = (HoodieAvroDataBlock) prevBlock; - assertEquals("Read records size should be equal to the written records size", copyOfRecords2.size(), - dataBlockRead.getRecords().size()); - assertEquals("Both records lists should be the same. (ordering guaranteed)", copyOfRecords2, - dataBlockRead.getRecords()); + assertEquals(copyOfRecords2.size(), dataBlockRead.getRecords().size(), + "Read records size should be equal to the written records size"); + assertEquals(copyOfRecords2, dataBlockRead.getRecords(), + "Both records lists should be the same. (ordering guaranteed)"); - assertTrue("First block should be available", reader.hasPrev()); + assertTrue(reader.hasPrev(), "First block should be available"); prevBlock = reader.prev(); dataBlockRead = (HoodieAvroDataBlock) prevBlock; - assertEquals("Read records size should be equal to the written records size", copyOfRecords1.size(), - dataBlockRead.getRecords().size()); - assertEquals("Both records lists should be the same. (ordering guaranteed)", copyOfRecords1, - dataBlockRead.getRecords()); + assertEquals(copyOfRecords1.size(), dataBlockRead.getRecords().size(), + "Read records size should be equal to the written records size"); + assertEquals(copyOfRecords1, dataBlockRead.getRecords(), + "Both records lists should be the same. (ordering guaranteed)"); assertFalse(reader.hasPrev()); reader.close(); } - @Test - public void testAppendAndReadOnCorruptedLogInReverse() throws IOException, URISyntaxException, InterruptedException { + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testAppendAndReadOnCorruptedLogInReverse(boolean readBlocksLazily) + throws IOException, URISyntaxException, InterruptedException { Writer writer = HoodieLogFormat.newWriterBuilder().onParentPath(partitionPath).withFileExtension(HoodieLogFile.DELTA_EXTENSION) .withFileId("test-fileid1").overBaseCommit("100").withFs(fs).build(); @@ -1323,22 +1327,21 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { HoodieLogFileReader reader = new HoodieLogFileReader(fs, writer.getLogFile(), schema, bufferSize, readBlocksLazily, true); - assertTrue("Last block should be available", reader.hasPrev()); + assertTrue(reader.hasPrev(), "Last block should be available"); HoodieLogBlock block = reader.prev(); - assertTrue("Last block should be datablock", block instanceof HoodieAvroDataBlock); + assertTrue(block instanceof HoodieAvroDataBlock, "Last block should be datablock"); - assertTrue("Last block should be available", reader.hasPrev()); - try { + assertTrue(reader.hasPrev(), "Last block should be available"); + assertThrows(CorruptedLogFileException.class, () -> { reader.prev(); - } catch (CorruptedLogFileException e) { - e.printStackTrace(); - // We should have corrupted block - } + }); reader.close(); } - @Test - public void testBasicAppendAndTraverseInReverse() throws IOException, URISyntaxException, InterruptedException { + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testBasicAppendAndTraverseInReverse(boolean readBlocksLazily) + throws IOException, URISyntaxException, InterruptedException { Writer writer = HoodieLogFormat.newWriterBuilder().onParentPath(partitionPath).withFileExtension(HoodieLogFile.DELTA_EXTENSION) .withFileId("test-fileid1").overBaseCommit("100").withFs(fs).build(); @@ -1373,20 +1376,20 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { HoodieLogFileReader reader = new HoodieLogFileReader(fs, writer.getLogFile(), SchemaTestUtil.getSimpleSchema(), bufferSize, readBlocksLazily, true); - assertTrue("Third block should be available", reader.hasPrev()); + assertTrue(reader.hasPrev(), "Third block should be available"); reader.moveToPrev(); - assertTrue("Second block should be available", reader.hasPrev()); + assertTrue(reader.hasPrev(), "Second block should be available"); reader.moveToPrev(); // After moving twice, this last reader.prev() should read the First block written - assertTrue("First block should be available", reader.hasPrev()); + assertTrue(reader.hasPrev(), "First block should be available"); HoodieLogBlock prevBlock = reader.prev(); HoodieAvroDataBlock dataBlockRead = (HoodieAvroDataBlock) prevBlock; - assertEquals("Read records size should be equal to the written records size", copyOfRecords1.size(), - dataBlockRead.getRecords().size()); - assertEquals("Both records lists should be the same. (ordering guaranteed)", copyOfRecords1, - dataBlockRead.getRecords()); + assertEquals(copyOfRecords1.size(), dataBlockRead.getRecords().size(), + "Read records size should be equal to the written records size"); + assertEquals(copyOfRecords1, dataBlockRead.getRecords(), + "Both records lists should be the same. (ordering guaranteed)"); assertFalse(reader.hasPrev()); reader.close(); @@ -1400,15 +1403,15 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { Schema schema = getSimpleSchema(); List records = SchemaTestUtil.generateTestRecords(0, 100); List recordsCopy = new ArrayList<>(records); - assertEquals(records.size(), 100); - assertEquals(recordsCopy.size(), 100); + assertEquals(100, records.size()); + assertEquals(100, recordsCopy.size()); HoodieAvroDataBlock dataBlock = new HoodieAvroDataBlock(records, schema); byte[] content = dataBlock.getBytes(schema); assertTrue(content.length > 0); HoodieLogBlock logBlock = HoodieAvroDataBlock.getBlock(content, schema); - assertEquals(logBlock.getBlockType(), HoodieLogBlockType.AVRO_DATA_BLOCK); - List readRecords = ((HoodieAvroDataBlock)logBlock).getRecords(); + assertEquals(HoodieLogBlockType.AVRO_DATA_BLOCK, logBlock.getBlockType()); + List readRecords = ((HoodieAvroDataBlock) logBlock).getRecords(); assertEquals(readRecords.size(), recordsCopy.size()); for (int i = 0; i < recordsCopy.size(); ++i) { assertEquals(recordsCopy.get(i), readRecords.get(i)); @@ -1416,8 +1419,8 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness { // Reader schema is optional if it is same as write schema logBlock = HoodieAvroDataBlock.getBlock(content, null); - assertEquals(logBlock.getBlockType(), HoodieLogBlockType.AVRO_DATA_BLOCK); - readRecords = ((HoodieAvroDataBlock)logBlock).getRecords(); + assertEquals(HoodieLogBlockType.AVRO_DATA_BLOCK, logBlock.getBlockType()); + readRecords = ((HoodieAvroDataBlock) logBlock).getRecords(); assertEquals(readRecords.size(), recordsCopy.size()); for (int i = 0; i < recordsCopy.size(); ++i) { assertEquals(recordsCopy.get(i), readRecords.get(i)); diff --git a/hudi-common/src/test/java/org/apache/hudi/common/table/log/TestHoodieLogFormatAppendFailure.java b/hudi-common/src/test/java/org/apache/hudi/common/table/log/TestHoodieLogFormatAppendFailure.java index 4acdf0748..58396462b 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/table/log/TestHoodieLogFormatAppendFailure.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/table/log/TestHoodieLogFormatAppendFailure.java @@ -36,10 +36,10 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.File; import java.io.IOException; @@ -51,6 +51,7 @@ import java.util.UUID; import java.util.concurrent.TimeoutException; import static org.apache.hudi.common.util.SchemaTestUtil.getSimpleSchema; +import static org.junit.jupiter.api.Assertions.assertNotEquals; /** * This class is intentionally using a different way of setting up the MiniDFSCluster and not relying on @@ -62,7 +63,7 @@ public class TestHoodieLogFormatAppendFailure { private static File baseDir; private static MiniDFSCluster cluster; - @BeforeClass + @BeforeAll public static void setUpClass() throws IOException { // NOTE : The MiniClusterDFS leaves behind the directory under which the cluster was created baseDir = new File("/tmp/" + UUID.randomUUID().toString()); @@ -77,14 +78,15 @@ public class TestHoodieLogFormatAppendFailure { cluster = new MiniDFSCluster.Builder(conf).checkExitOnShutdown(true).numDataNodes(4).build(); } - @AfterClass + @AfterAll public static void tearDownClass() { cluster.shutdown(true); // Force clean up the directory under which the cluster was created FileUtil.fullyDelete(baseDir); } - @Test(timeout = 60000) + @Test + @Timeout(60) public void testFailedToGetAppendStreamFromHDFSNameNode() throws IOException, URISyntaxException, InterruptedException, TimeoutException { @@ -137,7 +139,7 @@ public class TestHoodieLogFormatAppendFailure { .withFileExtension(HoodieArchivedLogFile.ARCHIVE_EXTENSION).withFileId("commits.archive") .overBaseCommit("").withFs(fs).build(); // The log version should be different for this new writer - Assert.assertNotEquals(writer.getLogFile().getLogVersion(), logFileVersion); + assertNotEquals(writer.getLogFile().getLogVersion(), logFileVersion); } } diff --git a/hudi-common/src/test/java/org/apache/hudi/common/table/log/TestHoodieLogFormatVersion.java b/hudi-common/src/test/java/org/apache/hudi/common/table/log/TestHoodieLogFormatVersion.java index 0e2a0bbb2..7665e694e 100755 --- a/hudi-common/src/test/java/org/apache/hudi/common/table/log/TestHoodieLogFormatVersion.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/table/log/TestHoodieLogFormatVersion.java @@ -18,10 +18,10 @@ package org.apache.hudi.common.table.log; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests HUDI log format version {@link HoodieLogFormatVersion}. diff --git a/hudi-common/src/test/java/org/apache/hudi/common/table/timeline/TestHoodieActiveTimeline.java b/hudi-common/src/test/java/org/apache/hudi/common/table/timeline/TestHoodieActiveTimeline.java index 3d8eff780..0915cf25c 100755 --- a/hudi-common/src/test/java/org/apache/hudi/common/table/timeline/TestHoodieActiveTimeline.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/table/timeline/TestHoodieActiveTimeline.java @@ -18,18 +18,17 @@ package org.apache.hudi.common.table.timeline; -import org.apache.hadoop.fs.Path; -import org.apache.hudi.common.HoodieCommonTestHarness; import org.apache.hudi.common.model.HoodieTestUtils; import org.apache.hudi.common.table.HoodieTableMetaClient; import org.apache.hudi.common.table.timeline.HoodieInstant.State; import org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion; +import org.apache.hudi.common.testutils.HoodieCommonTestHarness; import org.apache.hudi.common.util.CollectionUtils; import org.apache.hudi.common.util.Option; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; + +import org.apache.hadoop.fs.Path; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.ArrayList; @@ -44,9 +43,9 @@ import java.util.stream.Collectors; import java.util.stream.Stream; import static org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion.VERSION_0; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests {@link HoodieActiveTimeline}. @@ -54,10 +53,8 @@ import static org.junit.Assert.assertTrue; public class TestHoodieActiveTimeline extends HoodieCommonTestHarness { private HoodieActiveTimeline timeline; - @Rule - public final ExpectedException exception = ExpectedException.none(); - @Before + @BeforeEach public void setUp() throws Exception { initMetaClient(); } @@ -95,7 +92,7 @@ public class TestHoodieActiveTimeline extends HoodieCommonTestHarness { timeline.createNewInstant(instant5); timeline = timeline.reload(); - assertEquals("Total instants should be 5", 5, timeline.countInstants()); + assertEquals(5, timeline.countInstants(), "Total instants should be 5"); HoodieTestUtils.assertStreamEquals("Check the instants stream", Stream.of(instant1Complete, instant2Complete, instant3Complete, instant4Complete, instant5), timeline.getInstants()); @@ -124,7 +121,7 @@ public class TestHoodieActiveTimeline extends HoodieCommonTestHarness { // Ensure aux file is present assertTrue(metaClient.getFs().exists(new Path(metaClient.getMetaAuxiliaryPath(), instant6.getFileName()))); // Read 5 bytes - assertEquals(timeline.readCompactionPlanAsBytes(instant6).get().length, 5); + assertEquals(5, timeline.readCompactionPlanAsBytes(instant6).get().length); // Delete auxiliary file to mimic future release where we stop writing to aux metaClient.getFs().delete(new Path(metaClient.getMetaAuxiliaryPath(), instant6.getFileName())); @@ -133,19 +130,19 @@ public class TestHoodieActiveTimeline extends HoodieCommonTestHarness { assertFalse(metaClient.getFs().exists(new Path(metaClient.getMetaAuxiliaryPath(), instant6.getFileName()))); // Now read compaction plan again which should not throw exception - assertEquals(timeline.readCompactionPlanAsBytes(instant6).get().length, 5); + assertEquals(5, timeline.readCompactionPlanAsBytes(instant6).get().length); } @Test public void testTimelineOperationsBasic() { timeline = new HoodieActiveTimeline(metaClient); assertTrue(timeline.empty()); - assertEquals("", 0, timeline.countInstants()); - assertEquals("", Option.empty(), timeline.firstInstant()); - assertEquals("", Option.empty(), timeline.nthInstant(5)); - assertEquals("", Option.empty(), timeline.nthInstant(-1)); - assertEquals("", Option.empty(), timeline.lastInstant()); - assertFalse("", timeline.containsInstant(new HoodieInstant(false, HoodieTimeline.COMMIT_ACTION, "01"))); + assertEquals(0, timeline.countInstants()); + assertEquals(Option.empty(), timeline.firstInstant()); + assertEquals(Option.empty(), timeline.nthInstant(5)); + assertEquals(Option.empty(), timeline.nthInstant(-1)); + assertEquals(Option.empty(), timeline.lastInstant()); + assertFalse(timeline.containsInstant(new HoodieInstant(false, HoodieTimeline.COMMIT_ACTION, "01"))); } @Test @@ -163,17 +160,17 @@ public class TestHoodieActiveTimeline extends HoodieCommonTestHarness { .getInstants().map(HoodieInstant::getTimestamp)); assertFalse(timeline.empty()); assertFalse(timeline.getCommitTimeline().filterPendingExcludingCompaction().empty()); - assertEquals("", 12, timeline.countInstants()); + assertEquals(12, timeline.countInstants()); HoodieTimeline activeCommitTimeline = timeline.getCommitTimeline().filterCompletedInstants(); - assertEquals("", 10, activeCommitTimeline.countInstants()); + assertEquals(10, activeCommitTimeline.countInstants()); - assertEquals("", "01", activeCommitTimeline.firstInstant().get().getTimestamp()); - assertEquals("", "11", activeCommitTimeline.nthInstant(5).get().getTimestamp()); - assertEquals("", "19", activeCommitTimeline.lastInstant().get().getTimestamp()); - assertEquals("", "09", activeCommitTimeline.nthFromLastInstant(5).get().getTimestamp()); - assertTrue("", activeCommitTimeline.containsInstant(new HoodieInstant(false, HoodieTimeline.COMMIT_ACTION, "09"))); - assertFalse("", activeCommitTimeline.isBeforeTimelineStarts("02")); - assertTrue("", activeCommitTimeline.isBeforeTimelineStarts("00")); + assertEquals("01", activeCommitTimeline.firstInstant().get().getTimestamp()); + assertEquals("11", activeCommitTimeline.nthInstant(5).get().getTimestamp()); + assertEquals("19", activeCommitTimeline.lastInstant().get().getTimestamp()); + assertEquals("09", activeCommitTimeline.nthFromLastInstant(5).get().getTimestamp()); + assertTrue(activeCommitTimeline.containsInstant(new HoodieInstant(false, HoodieTimeline.COMMIT_ACTION, "09"))); + assertFalse(activeCommitTimeline.isBeforeTimelineStarts("02")); + assertTrue(activeCommitTimeline.isBeforeTimelineStarts("00")); } @Test @@ -220,25 +217,25 @@ public class TestHoodieActiveTimeline extends HoodieCommonTestHarness { @Test public void testTimelineInstantOperations() { timeline = new HoodieActiveTimeline(metaClient, true); - assertEquals("No instant present", timeline.countInstants(), 0); + assertEquals(0, timeline.countInstants(), "No instant present"); // revertToInflight HoodieInstant commit = new HoodieInstant(State.COMPLETED, HoodieTimeline.COMMIT_ACTION, "1"); timeline.createNewInstant(commit); timeline = timeline.reload(); - assertEquals(timeline.countInstants(), 1); + assertEquals(1, timeline.countInstants()); assertTrue(timeline.containsInstant(commit)); HoodieInstant inflight = timeline.revertToInflight(commit); // revert creates the .requested file timeline = timeline.reload(); - assertEquals(timeline.countInstants(), 1); + assertEquals(1, timeline.countInstants()); assertTrue(timeline.containsInstant(inflight)); assertFalse(timeline.containsInstant(commit)); // deleteInflight timeline.deleteInflight(inflight); timeline = timeline.reload(); - assertEquals(timeline.countInstants(), 1); + assertEquals(1, timeline.countInstants()); assertFalse(timeline.containsInstant(inflight)); assertFalse(timeline.containsInstant(commit)); @@ -246,10 +243,10 @@ public class TestHoodieActiveTimeline extends HoodieCommonTestHarness { timeline.createNewInstant(commit); timeline.createNewInstant(inflight); timeline = timeline.reload(); - assertEquals(timeline.countInstants(), 1); + assertEquals(1, timeline.countInstants()); timeline.deletePending(inflight); timeline = timeline.reload(); - assertEquals(timeline.countInstants(), 1); + assertEquals(1, timeline.countInstants()); assertFalse(timeline.containsInstant(inflight)); assertTrue(timeline.containsInstant(commit)); @@ -257,10 +254,10 @@ public class TestHoodieActiveTimeline extends HoodieCommonTestHarness { HoodieInstant compaction = new HoodieInstant(State.REQUESTED, HoodieTimeline.COMPACTION_ACTION, "2"); timeline.createNewInstant(compaction); timeline = timeline.reload(); - assertEquals(timeline.countInstants(), 2); + assertEquals(2, timeline.countInstants()); timeline.deleteCompactionRequested(compaction); timeline = timeline.reload(); - assertEquals(timeline.countInstants(), 1); + assertEquals(1, timeline.countInstants()); assertFalse(timeline.containsInstant(inflight)); assertFalse(timeline.containsInstant(compaction)); assertTrue(timeline.containsInstant(commit)); diff --git a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java index 6d6b83027..d8b2adb89 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestHoodieTableFileSystemView.java @@ -34,7 +34,7 @@ import org.apache.hudi.common.table.timeline.HoodieTimeline; import org.apache.hudi.common.table.timeline.TimelineMetadataUtils; import org.apache.hudi.common.table.view.TableFileSystemView.BaseFileOnlyView; import org.apache.hudi.common.table.view.TableFileSystemView.SliceView; -import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5; +import org.apache.hudi.common.testutils.HoodieCommonTestHarness; import org.apache.hudi.common.util.CompactionUtils; import org.apache.hudi.common.util.Option; import org.apache.hudi.common.util.collection.Pair; @@ -66,7 +66,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue; * Tests hoodie table file system view {@link HoodieTableFileSystemView}. */ @SuppressWarnings("ResultOfMethodCallIgnored") -public class TestHoodieTableFileSystemView extends HoodieCommonTestHarnessJunit5 { +public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness { private static final Logger LOG = LogManager.getLogger(TestHoodieTableFileSystemView.class); diff --git a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestIncrementalFSViewSync.java b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestIncrementalFSViewSync.java index 437269bde..9ffa7f8c5 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestIncrementalFSViewSync.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestIncrementalFSViewSync.java @@ -23,7 +23,6 @@ import org.apache.hudi.avro.model.HoodieCompactionPlan; import org.apache.hudi.avro.model.HoodieRestoreMetadata; import org.apache.hudi.avro.model.HoodieRollbackMetadata; import org.apache.hudi.common.HoodieCleanStat; -import org.apache.hudi.common.HoodieCommonTestHarness; import org.apache.hudi.common.HoodieRollbackStat; import org.apache.hudi.common.fs.FSUtils; import org.apache.hudi.common.model.CompactionOperation; @@ -40,6 +39,7 @@ import org.apache.hudi.common.table.timeline.HoodieInstant; import org.apache.hudi.common.table.timeline.HoodieInstant.State; import org.apache.hudi.common.table.timeline.HoodieTimeline; import org.apache.hudi.common.table.timeline.TimelineMetadataUtils; +import org.apache.hudi.common.testutils.HoodieCommonTestHarness; import org.apache.hudi.common.util.CleanerUtils; import org.apache.hudi.common.util.CollectionUtils; import org.apache.hudi.common.util.CompactionUtils; @@ -51,13 +51,14 @@ import org.apache.hudi.exception.HoodieException; import org.apache.hadoop.fs.Path; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import java.io.File; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -70,6 +71,9 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; import static org.apache.hudi.common.table.timeline.HoodieTimeline.COMPACTION_ACTION; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests incremental file system view sync. @@ -84,18 +88,20 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness { private final List fileIdsPerPartition = IntStream.range(0, 10).mapToObj(x -> UUID.randomUUID().toString()).collect(Collectors.toList()); - @Before + @BeforeEach public void init() throws IOException { initMetaClient(); - partitions.forEach(p -> new File(basePath + "/" + p).mkdirs()); + for (String p : partitions) { + Files.createDirectories(Paths.get(basePath, p)); + } refreshFsView(); } @Test public void testEmptyPartitionsAndTimeline() throws IOException { SyncableFileSystemView view = getFileSystemView(metaClient); - Assert.assertFalse(view.getLastInstant().isPresent()); - partitions.forEach(p -> Assert.assertEquals(0, view.getLatestFileSlices(p).count())); + assertFalse(view.getLastInstant().isPresent()); + partitions.forEach(p -> assertEquals(0, view.getLatestFileSlices(p).count())); } @Test @@ -167,11 +173,11 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness { Option.of(metadata.toJsonString().getBytes(StandardCharsets.UTF_8))); view.sync(); - Assert.assertTrue(view.getLastInstant().isPresent()); - Assert.assertEquals("11", view.getLastInstant().get().getTimestamp()); - Assert.assertEquals(State.COMPLETED, view.getLastInstant().get().getState()); - Assert.assertEquals(HoodieTimeline.COMMIT_ACTION, view.getLastInstant().get().getAction()); - partitions.forEach(p -> Assert.assertEquals(0, view.getLatestFileSlices(p).count())); + assertTrue(view.getLastInstant().isPresent()); + assertEquals("11", view.getLastInstant().get().getTimestamp()); + assertEquals(State.COMPLETED, view.getLastInstant().get().getState()); + assertEquals(HoodieTimeline.COMMIT_ACTION, view.getLastInstant().get().getAction()); + partitions.forEach(p -> assertEquals(0, view.getLatestFileSlices(p).count())); metaClient.reloadActiveTimeline(); SyncableFileSystemView newView = getFileSystemView(metaClient); @@ -316,7 +322,7 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness { private void testCleans(SyncableFileSystemView view, List newCleanerInstants, Map> deltaInstantMap, Map> instantsToFiles, List cleanedInstants) { - Assert.assertEquals(newCleanerInstants.size(), cleanedInstants.size()); + assertEquals(newCleanerInstants.size(), cleanedInstants.size()); long exp = partitions.stream().mapToLong(p1 -> view.getAllFileSlices(p1).count()).findAny().getAsLong(); LOG.info("Initial File Slices :" + exp); for (int idx = 0; idx < newCleanerInstants.size(); idx++) { @@ -330,17 +336,17 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness { exp -= fileIdsPerPartition.size(); final long expTotalFileSlicesPerPartition = exp; view.sync(); - Assert.assertTrue(view.getLastInstant().isPresent()); - Assert.assertEquals(newCleanerInstants.get(idx), view.getLastInstant().get().getTimestamp()); - Assert.assertEquals(State.COMPLETED, view.getLastInstant().get().getState()); - Assert.assertEquals(HoodieTimeline.CLEAN_ACTION, view.getLastInstant().get().getAction()); + assertTrue(view.getLastInstant().isPresent()); + assertEquals(newCleanerInstants.get(idx), view.getLastInstant().get().getTimestamp()); + assertEquals(State.COMPLETED, view.getLastInstant().get().getState()); + assertEquals(HoodieTimeline.CLEAN_ACTION, view.getLastInstant().get().getAction()); partitions.forEach(p -> { LOG.info("PARTITION : " + p); LOG.info("\tFileSlices :" + view.getAllFileSlices(p).collect(Collectors.toList())); }); - partitions.forEach(p -> Assert.assertEquals(fileIdsPerPartition.size(), view.getLatestFileSlices(p).count())); - partitions.forEach(p -> Assert.assertEquals(expTotalFileSlicesPerPartition, view.getAllFileSlices(p).count())); + partitions.forEach(p -> assertEquals(fileIdsPerPartition.size(), view.getLatestFileSlices(p).count())); + partitions.forEach(p -> assertEquals(expTotalFileSlicesPerPartition, view.getAllFileSlices(p).count())); metaClient.reloadActiveTimeline(); SyncableFileSystemView newView = getFileSystemView(metaClient); @@ -364,7 +370,7 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness { private void testRestore(SyncableFileSystemView view, List newRestoreInstants, boolean isDeltaCommit, Map> instantsToFiles, List rolledBackInstants, String emptyRestoreInstant, boolean isRestore) { - Assert.assertEquals(newRestoreInstants.size(), rolledBackInstants.size()); + assertEquals(newRestoreInstants.size(), rolledBackInstants.size()); long initialFileSlices = partitions.stream().mapToLong(p -> view.getAllFileSlices(p).count()).findAny().getAsLong(); IntStream.range(0, newRestoreInstants.size()).forEach(idx -> { String instant = rolledBackInstants.get(idx); @@ -373,21 +379,21 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness { final long expTotalFileSlicesPerPartition = isDeltaCommit ? initialFileSlices : initialFileSlices - ((idx + 1) * fileIdsPerPartition.size()); view.sync(); - Assert.assertTrue(view.getLastInstant().isPresent()); + assertTrue(view.getLastInstant().isPresent()); LOG.info("Last Instant is :" + view.getLastInstant().get()); if (isRestore) { - Assert.assertEquals(newRestoreInstants.get(idx), view.getLastInstant().get().getTimestamp()); - Assert.assertEquals(HoodieTimeline.RESTORE_ACTION, view.getLastInstant().get().getAction()); + assertEquals(newRestoreInstants.get(idx), view.getLastInstant().get().getTimestamp()); + assertEquals(HoodieTimeline.RESTORE_ACTION, view.getLastInstant().get().getAction()); } - Assert.assertEquals(State.COMPLETED, view.getLastInstant().get().getState()); + assertEquals(State.COMPLETED, view.getLastInstant().get().getState()); if (HoodieTimeline.compareTimestamps(newRestoreInstants.get(idx), HoodieTimeline.GREATER_THAN_OR_EQUALS, emptyRestoreInstant )) { - partitions.forEach(p -> Assert.assertEquals(0, view.getLatestFileSlices(p).count())); + partitions.forEach(p -> assertEquals(0, view.getLatestFileSlices(p).count())); } else { - partitions.forEach(p -> Assert.assertEquals(fileIdsPerPartition.size(), view.getLatestFileSlices(p).count())); + partitions.forEach(p -> assertEquals(fileIdsPerPartition.size(), view.getLatestFileSlices(p).count())); } - partitions.forEach(p -> Assert.assertEquals(expTotalFileSlicesPerPartition, view.getAllFileSlices(p).count())); + partitions.forEach(p -> assertEquals(expTotalFileSlicesPerPartition, view.getAllFileSlices(p).count())); metaClient.reloadActiveTimeline(); SyncableFileSystemView newView = getFileSystemView(metaClient); @@ -504,14 +510,13 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness { view.sync(); partitions.forEach(p -> { view.getLatestFileSlices(p).forEach(fs -> { - Assert.assertEquals(instantTime, fs.getBaseInstantTime()); - Assert.assertEquals(p, fs.getPartitionPath()); - Assert.assertFalse(fs.getBaseFile().isPresent()); + assertEquals(instantTime, fs.getBaseInstantTime()); + assertEquals(p, fs.getPartitionPath()); + assertFalse(fs.getBaseFile().isPresent()); }); view.getLatestMergedFileSlicesBeforeOrOn(p, instantTime).forEach(fs -> { - Assert - .assertTrue(HoodieTimeline.compareTimestamps(instantTime, HoodieTimeline.GREATER_THAN, fs.getBaseInstantTime())); - Assert.assertEquals(p, fs.getPartitionPath()); + assertTrue(HoodieTimeline.compareTimestamps(instantTime, HoodieTimeline.GREATER_THAN, fs.getBaseInstantTime())); + assertEquals(p, fs.getPartitionPath()); }); }); @@ -535,8 +540,8 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness { ValidationUtils.checkArgument(deleted, "Unable to delete compaction instant."); view.sync(); - Assert.assertEquals(newLastInstant, view.getLastInstant().get().getTimestamp()); - partitions.forEach(p -> view.getLatestFileSlices(p).forEach(fs -> Assert.assertEquals(newBaseInstant, fs.getBaseInstantTime()))); + assertEquals(newLastInstant, view.getLastInstant().get().getTimestamp()); + partitions.forEach(p -> view.getLatestFileSlices(p).forEach(fs -> assertEquals(newBaseInstant, fs.getBaseInstantTime()))); } /** @@ -609,22 +614,21 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness { List filePaths = addInstant(metaClient, instant, deltaCommit, deltaCommit ? baseInstantForDeltaCommit : instant); view.sync(); - Assert.assertTrue(view.getLastInstant().isPresent()); - Assert.assertEquals(lastInstant.getTimestamp(), view.getLastInstant().get().getTimestamp()); - Assert.assertEquals(State.COMPLETED, view.getLastInstant().get().getState()); - Assert.assertEquals( + assertTrue(view.getLastInstant().isPresent()); + assertEquals(lastInstant.getTimestamp(), view.getLastInstant().get().getTimestamp()); + assertEquals(State.COMPLETED, view.getLastInstant().get().getState()); + assertEquals(lastInstant.getAction(), view.getLastInstant().get().getAction(), "Expected Last=" + lastInstant + ", Found Instants=" - + view.getTimeline().getInstants().collect(Collectors.toList()), - lastInstant.getAction(), view.getLastInstant().get().getAction()); - partitions.forEach(p -> Assert.assertEquals(fileIdsPerPartition.size(), view.getLatestFileSlices(p).count())); + + view.getTimeline().getInstants().collect(Collectors.toList())); + partitions.forEach(p -> assertEquals(fileIdsPerPartition.size(), view.getLatestFileSlices(p).count())); final long expTotalFileSlicesPerPartition = fileIdsPerPartition.size() * multiple; - partitions.forEach(p -> Assert.assertEquals(expTotalFileSlicesPerPartition, view.getAllFileSlices(p).count())); + partitions.forEach(p -> assertEquals(expTotalFileSlicesPerPartition, view.getAllFileSlices(p).count())); if (deltaCommit) { partitions.forEach(p -> - view.getLatestFileSlices(p).forEach(f -> Assert.assertEquals(baseInstantForDeltaCommit, f.getBaseInstantTime())) + view.getLatestFileSlices(p).forEach(f -> assertEquals(baseInstantForDeltaCommit, f.getBaseInstantTime())) ); } else { - partitions.forEach(p -> view.getLatestBaseFiles(p).forEach(f -> Assert.assertEquals(instant, f.getCommitTime()))); + partitions.forEach(p -> view.getLatestBaseFiles(p).forEach(f -> assertEquals(instant, f.getCommitTime()))); } metaClient.reloadActiveTimeline(); @@ -649,7 +653,7 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness { // Timeline check HoodieTimeline timeline1 = view1.getTimeline(); HoodieTimeline timeline2 = view2.getTimeline(); - Assert.assertEquals(view1.getLastInstant(), view2.getLastInstant()); + assertEquals(view1.getLastInstant(), view2.getLastInstant()); CollectionUtils.elementsEqual(timeline1.getInstants().iterator(), timeline2.getInstants().iterator()); // View Checks @@ -657,45 +661,45 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness { .collect(Collectors.toMap(HoodieFileGroup::getFileGroupId, fg -> fg)); Map fileGroupsMap2 = partitions.stream().flatMap(view2::getAllFileGroups) .collect(Collectors.toMap(HoodieFileGroup::getFileGroupId, fg -> fg)); - Assert.assertEquals(fileGroupsMap1.keySet(), fileGroupsMap2.keySet()); + assertEquals(fileGroupsMap1.keySet(), fileGroupsMap2.keySet()); long gotSlicesCount = fileGroupsMap1.keySet().stream() .map(k -> Pair.of(fileGroupsMap1.get(k), fileGroupsMap2.get(k))).mapToLong(e -> { HoodieFileGroup fg1 = e.getKey(); HoodieFileGroup fg2 = e.getValue(); - Assert.assertEquals(fg1.getFileGroupId(), fg2.getFileGroupId()); + assertEquals(fg1.getFileGroupId(), fg2.getFileGroupId()); List slices1 = fg1.getAllRawFileSlices().collect(Collectors.toList()); List slices2 = fg2.getAllRawFileSlices().collect(Collectors.toList()); - Assert.assertEquals(slices1.size(), slices2.size()); + assertEquals(slices1.size(), slices2.size()); IntStream.range(0, slices1.size()).mapToObj(idx -> Pair.of(slices1.get(idx), slices2.get(idx))) .forEach(e2 -> { FileSlice slice1 = e2.getKey(); FileSlice slice2 = e2.getValue(); - Assert.assertEquals(slice1.getBaseInstantTime(), slice2.getBaseInstantTime()); - Assert.assertEquals(slice1.getFileId(), slice2.getFileId()); - Assert.assertEquals(slice1.getBaseFile().isPresent(), slice2.getBaseFile().isPresent()); + assertEquals(slice1.getBaseInstantTime(), slice2.getBaseInstantTime()); + assertEquals(slice1.getFileId(), slice2.getFileId()); + assertEquals(slice1.getBaseFile().isPresent(), slice2.getBaseFile().isPresent()); if (slice1.getBaseFile().isPresent()) { HoodieBaseFile df1 = slice1.getBaseFile().get(); HoodieBaseFile df2 = slice2.getBaseFile().get(); - Assert.assertEquals(df1.getCommitTime(), df2.getCommitTime()); - Assert.assertEquals(df1.getFileId(), df2.getFileId()); - Assert.assertEquals(df1.getFileName(), df2.getFileName()); - Assert.assertEquals(Path.getPathWithoutSchemeAndAuthority(new Path(df1.getPath())), + assertEquals(df1.getCommitTime(), df2.getCommitTime()); + assertEquals(df1.getFileId(), df2.getFileId()); + assertEquals(df1.getFileName(), df2.getFileName()); + assertEquals(Path.getPathWithoutSchemeAndAuthority(new Path(df1.getPath())), Path.getPathWithoutSchemeAndAuthority(new Path(df2.getPath()))); } List logPaths1 = slice1.getLogFiles() .map(lf -> Path.getPathWithoutSchemeAndAuthority(lf.getPath())).collect(Collectors.toList()); List logPaths2 = slice2.getLogFiles() .map(lf -> Path.getPathWithoutSchemeAndAuthority(lf.getPath())).collect(Collectors.toList()); - Assert.assertEquals(logPaths1, logPaths2); + assertEquals(logPaths1, logPaths2); }); return slices1.size(); }).sum(); - Assert.assertEquals(expectedTotalFileSlices, gotSlicesCount); + assertEquals(expectedTotalFileSlices, gotSlicesCount); // Pending Compaction Operations Check Set> ops1 = view1.getPendingCompactionOperations().collect(Collectors.toSet()); Set> ops2 = view2.getPendingCompactionOperations().collect(Collectors.toSet()); - Assert.assertEquals(ops1, ops2); + assertEquals(ops1, ops2); } private List addInstant(HoodieTableMetaClient metaClient, String instant, boolean deltaCommit, diff --git a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestRocksDBBasedIncrementalFSViewSync.java b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestRocksDBBasedIncrementalFSViewSync.java index 355224279..082277e71 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestRocksDBBasedIncrementalFSViewSync.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/table/view/TestRocksDBBasedIncrementalFSViewSync.java @@ -22,6 +22,7 @@ import org.apache.hudi.common.table.HoodieTableMetaClient; import org.apache.hudi.common.table.timeline.HoodieTimeline; import java.io.IOException; +import java.nio.file.Files; /** * Tests rocks db based incremental file system view sync {@link RocksDbBasedFileSystemView}. @@ -31,7 +32,8 @@ public class TestRocksDBBasedIncrementalFSViewSync extends TestIncrementalFSView @Override protected SyncableFileSystemView getFileSystemView(HoodieTableMetaClient metaClient, HoodieTimeline timeline) throws IOException { + String subdirPath = Files.createTempDirectory(tempDir, null).toAbsolutePath().toString(); return new RocksDbBasedFileSystemView(metaClient, timeline, FileSystemViewStorageConfig.newBuilder() - .withRocksDBPath(folder.newFolder().getAbsolutePath()).withIncrementalTimelineSync(true).build()); + .withRocksDBPath(subdirPath).withIncrementalTimelineSync(true).build()); } } diff --git a/hudi-common/src/test/java/org/apache/hudi/common/HoodieCommonTestHarness.java b/hudi-common/src/test/java/org/apache/hudi/common/testutils/HoodieCommonTestHarness.java similarity index 90% rename from hudi-common/src/test/java/org/apache/hudi/common/HoodieCommonTestHarness.java rename to hudi-common/src/test/java/org/apache/hudi/common/testutils/HoodieCommonTestHarness.java index 62527975e..4315b50ab 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/HoodieCommonTestHarness.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/testutils/HoodieCommonTestHarness.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hudi.common; +package org.apache.hudi.common.testutils; import org.apache.hudi.common.model.HoodieTableType; import org.apache.hudi.common.model.HoodieTestUtils; @@ -25,8 +25,7 @@ import org.apache.hudi.common.table.timeline.HoodieTimeline; import org.apache.hudi.common.table.view.HoodieTableFileSystemView; import org.apache.hudi.common.table.view.SyncableFileSystemView; -import org.junit.Rule; -import org.junit.rules.TemporaryFolder; +import org.junit.jupiter.api.io.TempDir; import java.io.IOException; @@ -36,17 +35,15 @@ import java.io.IOException; public class HoodieCommonTestHarness { protected String basePath = null; - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - protected transient HoodieTableMetaClient metaClient; + @TempDir + public java.nio.file.Path tempDir; /** * Initializes basePath. */ protected void initPath() { - this.basePath = folder.getRoot().getAbsolutePath(); + this.basePath = tempDir.toAbsolutePath().toString(); } /** @@ -56,7 +53,7 @@ public class HoodieCommonTestHarness { * @throws IOException */ protected void initMetaClient() throws IOException { - metaClient = HoodieTestUtils.init(folder.getRoot().getAbsolutePath(), getTableType()); + metaClient = HoodieTestUtils.init(tempDir.toAbsolutePath().toString(), getTableType()); basePath = metaClient.getBasePath(); } diff --git a/hudi-common/src/test/java/org/apache/hudi/common/testutils/HoodieCommonTestHarnessJunit5.java b/hudi-common/src/test/java/org/apache/hudi/common/testutils/HoodieCommonTestHarnessJunit5.java deleted file mode 100644 index ba250365e..000000000 --- a/hudi-common/src/test/java/org/apache/hudi/common/testutils/HoodieCommonTestHarnessJunit5.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hudi.common.testutils; - -import org.apache.hudi.common.model.HoodieTestUtils; -import org.apache.hudi.common.table.HoodieTableMetaClient; - -import org.junit.jupiter.api.io.TempDir; - -import java.io.IOException; - -/** - * The JUnit 5 version of {@link org.apache.hudi.common.HoodieCommonTestHarness}. - *

- * To incrementally migrate test classes. - */ -public class HoodieCommonTestHarnessJunit5 extends org.apache.hudi.common.HoodieCommonTestHarness { - - @TempDir - public java.nio.file.Path tempDir; - - /** - * Initializes basePath. - */ - protected void initPath() { - this.basePath = tempDir.toAbsolutePath().toString(); - } - - /** - * Initializes an instance of {@link HoodieTableMetaClient} with a special table type specified by {@code getTableType()}. - */ - protected void initMetaClient() throws IOException { - metaClient = HoodieTestUtils.init(tempDir.toAbsolutePath().toString(), getTableType()); - basePath = metaClient.getBasePath(); - } -} diff --git a/hudi-common/src/test/java/org/apache/hudi/common/util/CompactionTestUtils.java b/hudi-common/src/test/java/org/apache/hudi/common/util/CompactionTestUtils.java index 5f6338fe9..baf34e0b2 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/util/CompactionTestUtils.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/util/CompactionTestUtils.java @@ -34,7 +34,6 @@ import org.apache.hudi.common.util.collection.Pair; import org.apache.hudi.exception.HoodieIOException; import org.apache.hadoop.fs.Path; -import org.junit.Assert; import java.io.IOException; import java.util.Arrays; @@ -49,6 +48,8 @@ import java.util.stream.Stream; import static org.apache.hudi.common.model.HoodieTestUtils.DEFAULT_PARTITION_PATHS; import static org.apache.hudi.common.table.timeline.HoodieTimeline.COMPACTION_ACTION; import static org.apache.hudi.common.table.timeline.HoodieTimeline.DELTA_COMMIT_ACTION; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; /** * The utility class to support testing compaction. @@ -95,10 +96,11 @@ public class CompactionTestUtils { List plans = CollectionUtils.createImmutableList(plan1, plan2, plan3, plan4); IntStream.range(0, 4).boxed().forEach(idx -> { if (expectedNumEntries.get(idx) > 0) { - Assert.assertEquals("check if plan " + idx + " has exp entries", expectedNumEntries.get(idx).longValue(), - plans.get(idx).getOperations().size()); + assertEquals(expectedNumEntries.get(idx).longValue(), + plans.get(idx).getOperations().size(), + "check if plan " + idx + " has exp entries"); } else { - Assert.assertNull("Plan " + idx + " has null ops", plans.get(idx).getOperations()); + assertNull(plans.get(idx).getOperations(), "Plan " + idx + " has null ops"); } }); @@ -110,7 +112,7 @@ public class CompactionTestUtils { generateExpectedCompactionOperations(Arrays.asList(plan1, plan2, plan3, plan4), baseInstantsToCompaction); // Ensure Compaction operations are fine. - Assert.assertEquals(expPendingCompactionMap, pendingCompactionMap); + assertEquals(expPendingCompactionMap, pendingCompactionMap); return expPendingCompactionMap; } diff --git a/hudi-common/src/test/java/org/apache/hudi/common/util/TestBase64CodecUtil.java b/hudi-common/src/test/java/org/apache/hudi/common/util/TestBase64CodecUtil.java index e62df4024..9129a8c03 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/util/TestBase64CodecUtil.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/util/TestBase64CodecUtil.java @@ -18,12 +18,13 @@ package org.apache.hudi.common.util; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.nio.charset.StandardCharsets; import java.util.UUID; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; + public class TestBase64CodecUtil { @Test @@ -39,7 +40,7 @@ public class TestBase64CodecUtil { String encodeData = Base64CodecUtil.encode(originalData); byte[] decodeData = Base64CodecUtil.decode(encodeData); - Assert.assertArrayEquals(originalData, decodeData); + assertArrayEquals(originalData, decodeData); } } diff --git a/hudi-common/src/test/java/org/apache/hudi/common/util/TestCompactionUtils.java b/hudi-common/src/test/java/org/apache/hudi/common/util/TestCompactionUtils.java index 045aa9430..269301508 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/util/TestCompactionUtils.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/util/TestCompactionUtils.java @@ -20,7 +20,6 @@ package org.apache.hudi.common.util; import org.apache.hudi.avro.model.HoodieCompactionOperation; import org.apache.hudi.avro.model.HoodieCompactionPlan; -import org.apache.hudi.common.HoodieCommonTestHarness; import org.apache.hudi.common.fs.FSUtils; import org.apache.hudi.common.model.FileSlice; import org.apache.hudi.common.model.HoodieBaseFile; @@ -29,13 +28,13 @@ import org.apache.hudi.common.model.HoodieLogFile; import org.apache.hudi.common.model.HoodieTableType; import org.apache.hudi.common.table.HoodieTableMetaClient; import org.apache.hudi.common.table.timeline.versioning.compaction.CompactionPlanMigrator; +import org.apache.hudi.common.testutils.HoodieCommonTestHarness; import org.apache.hudi.common.util.CompactionTestUtils.TestHoodieBaseFile; import org.apache.hudi.common.util.collection.Pair; import org.apache.hadoop.fs.Path; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.Arrays; @@ -52,6 +51,9 @@ import static org.apache.hudi.common.util.CompactionTestUtils.scheduleCompaction import static org.apache.hudi.common.util.CompactionTestUtils.setupAndValidateCompactionOperations; import static org.apache.hudi.common.util.CompactionUtils.COMPACTION_METADATA_VERSION_1; import static org.apache.hudi.common.util.CompactionUtils.LATEST_COMPACTION_METADATA_VERSION; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * The utility class for testing compaction. @@ -68,7 +70,7 @@ public class TestCompactionUtils extends HoodieCommonTestHarness { }; private Function, Map> metricsCaptureFn = (partitionFileSlice) -> METRICS; - @Before + @BeforeEach public void init() throws IOException { initMetaClient(); } @@ -81,13 +83,13 @@ public class TestCompactionUtils extends HoodieCommonTestHarness { CompactionPlanMigrator migrator = new CompactionPlanMigrator(metaClient); HoodieCompactionPlan plan = inputAndPlan.getRight(); System.out.println("Plan=" + plan.getOperations()); - Assert.assertEquals(LATEST_COMPACTION_METADATA_VERSION, plan.getVersion()); + assertEquals(LATEST_COMPACTION_METADATA_VERSION, plan.getVersion()); HoodieCompactionPlan oldPlan = migrator.migrateToVersion(plan, plan.getVersion(), COMPACTION_METADATA_VERSION_1); // Check with older version of compaction plan - Assert.assertEquals(COMPACTION_METADATA_VERSION_1, oldPlan.getVersion()); + assertEquals(COMPACTION_METADATA_VERSION_1, oldPlan.getVersion()); testFileSlicesCompactionPlanEquality(inputAndPlan.getKey(), oldPlan); HoodieCompactionPlan newPlan = migrator.upgradeToLatest(plan, plan.getVersion()); - Assert.assertEquals(LATEST_COMPACTION_METADATA_VERSION, newPlan.getVersion()); + assertEquals(LATEST_COMPACTION_METADATA_VERSION, newPlan.getVersion()); testFileSlicesCompactionPlanEquality(inputAndPlan.getKey(), newPlan); HoodieCompactionPlan latestPlan = migrator.migrateToVersion(oldPlan, oldPlan.getVersion(), newPlan.getVersion()); testFileSlicesCompactionPlanEquality(inputAndPlan.getKey(), latestPlan); @@ -169,14 +171,14 @@ public class TestCompactionUtils extends HoodieCommonTestHarness { // Convert to CompactionOperation // Convert back to HoodieCompactionOperation and check for equality List regeneratedOps = originalOps.stream() - .map(CompactionUtils::buildCompactionOperation) - .map(CompactionUtils::buildHoodieCompactionOperation) - .collect(Collectors.toList()); - Assert.assertTrue("Transformation did get tested", originalOps.size() > 0); - Assert.assertEquals("All fields set correctly in transformations", originalOps, regeneratedOps); + .map(CompactionUtils::buildCompactionOperation) + .map(CompactionUtils::buildHoodieCompactionOperation) + .collect(Collectors.toList()); + assertTrue(originalOps.size() > 0, "Transformation did get tested"); + assertEquals(originalOps, regeneratedOps, "All fields set correctly in transformations"); } - @Test(expected = IllegalStateException.class) + @Test public void testGetAllPendingCompactionOperationsWithDupFileId() throws IOException { // Case where there is duplicate fileIds in compaction requests HoodieCompactionPlan plan1 = createCompactionPlan(metaClient, "000", "001", 10, true, true); @@ -187,8 +189,9 @@ public class TestCompactionUtils extends HoodieCommonTestHarness { plan1.getOperations().get(0).setDataFilePath("bla"); scheduleCompaction(metaClient, "005", plan1); metaClient = new HoodieTableMetaClient(metaClient.getHadoopConf(), basePath, true); - Map> res = - CompactionUtils.getAllPendingCompactionOperations(metaClient); + assertThrows(IllegalStateException.class, () -> { + CompactionUtils.getAllPendingCompactionOperations(metaClient); + }); } @Test @@ -230,7 +233,7 @@ public class TestCompactionUtils extends HoodieCommonTestHarness { * @param plan Compaction Plan */ private void testFileSlicesCompactionPlanEquality(List> input, HoodieCompactionPlan plan) { - Assert.assertEquals("All file-slices present", input.size(), plan.getOperations().size()); + assertEquals(input.size(), plan.getOperations().size(), "All file-slices present"); IntStream.range(0, input.size()).boxed().forEach(idx -> testFileSliceCompactionOpEquality(input.get(idx).getValue(), plan.getOperations().get(idx), input.get(idx).getKey(), plan.getVersion())); } @@ -244,19 +247,19 @@ public class TestCompactionUtils extends HoodieCommonTestHarness { */ private void testFileSliceCompactionOpEquality(FileSlice slice, HoodieCompactionOperation op, String expPartitionPath, int version) { - Assert.assertEquals("Partition path is correct", expPartitionPath, op.getPartitionPath()); - Assert.assertEquals("Same base-instant", slice.getBaseInstantTime(), op.getBaseInstantTime()); - Assert.assertEquals("Same file-id", slice.getFileId(), op.getFileId()); + assertEquals(expPartitionPath, op.getPartitionPath(), "Partition path is correct"); + assertEquals(slice.getBaseInstantTime(), op.getBaseInstantTime(), "Same base-instant"); + assertEquals(slice.getFileId(), op.getFileId(), "Same file-id"); if (slice.getBaseFile().isPresent()) { HoodieBaseFile df = slice.getBaseFile().get(); - Assert.assertEquals("Same data-file", version == COMPACTION_METADATA_VERSION_1 ? df.getPath() : df.getFileName(), - op.getDataFilePath()); + assertEquals(version == COMPACTION_METADATA_VERSION_1 ? df.getPath() : df.getFileName(), + op.getDataFilePath(), "Same data-file"); } List paths = slice.getLogFiles().map(l -> l.getPath().toString()).collect(Collectors.toList()); - IntStream.range(0, paths.size()).boxed().forEach(idx -> Assert.assertEquals("Log File Index " + idx, + IntStream.range(0, paths.size()).boxed().forEach(idx -> assertEquals( version == COMPACTION_METADATA_VERSION_1 ? paths.get(idx) : new Path(paths.get(idx)).getName(), - op.getDeltaFilePaths().get(idx))); - Assert.assertEquals("Metrics set", METRICS, op.getMetrics()); + op.getDeltaFilePaths().get(idx), "Log File Index " + idx)); + assertEquals(METRICS, op.getMetrics(), "Metrics set"); } @Override diff --git a/hudi-common/src/test/java/org/apache/hudi/common/util/TestDFSPropertiesConfiguration.java b/hudi-common/src/test/java/org/apache/hudi/common/util/TestDFSPropertiesConfiguration.java index 6a06d7ddc..792eded41 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/util/TestDFSPropertiesConfiguration.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/util/TestDFSPropertiesConfiguration.java @@ -25,17 +25,17 @@ import org.apache.hudi.common.minicluster.HdfsTestService; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.io.PrintStream; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests basic functionality of {@link DFSPropertiesConfiguration}. @@ -47,7 +47,7 @@ public class TestDFSPropertiesConfiguration { private static MiniDFSCluster dfsCluster; private static DistributedFileSystem dfs; - @BeforeClass + @BeforeAll public static void initClass() throws Exception { hdfsTestService = new HdfsTestService(); dfsCluster = hdfsTestService.start(true); @@ -72,7 +72,7 @@ public class TestDFSPropertiesConfiguration { writePropertiesFile(filePath, new String[] {"double.prop=838.3", "include = t4.props"}); } - @AfterClass + @AfterAll public static void cleanupClass() throws Exception { if (hdfsTestService != null) { hdfsTestService.stop(); @@ -93,12 +93,9 @@ public class TestDFSPropertiesConfiguration { DFSPropertiesConfiguration cfg = new DFSPropertiesConfiguration(dfs, new Path(dfsBasePath + "/t1.props")); TypedProperties props = cfg.getConfig(); assertEquals(5, props.size()); - try { + assertThrows(IllegalArgumentException.class, () -> { props.getString("invalid.key"); - fail("Should error out here."); - } catch (IllegalArgumentException iae) { - // ignore - } + }, "Should error out here."); assertEquals(123, props.getInteger("int.prop")); assertEquals(113.4, props.getDouble("double.prop"), 0.001); @@ -129,12 +126,8 @@ public class TestDFSPropertiesConfiguration { assertTrue(props.getBoolean("boolean.prop")); assertEquals("t3.value", props.getString("string.prop")); assertEquals(1354354354, props.getLong("long.prop")); - - try { + assertThrows(IllegalStateException.class, () -> { new DFSPropertiesConfiguration(dfs, new Path(dfsBasePath + "/t4.props")); - fail("Should error out on a self-included file."); - } catch (IllegalStateException ise) { - // ignore - } + }, "Should error out on a self-included file."); } } diff --git a/hudi-common/src/test/java/org/apache/hudi/common/util/TestFileIOUtils.java b/hudi-common/src/test/java/org/apache/hudi/common/util/TestFileIOUtils.java index 5292f07a6..0c2ac7cf6 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/util/TestFileIOUtils.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/util/TestFileIOUtils.java @@ -18,7 +18,7 @@ package org.apache.hudi.common.util; -import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5; +import org.apache.hudi.common.testutils.HoodieCommonTestHarness; import org.junit.jupiter.api.Test; @@ -35,7 +35,7 @@ import static org.junit.jupiter.api.Assertions.fail; /** * Tests file I/O utils. */ -public class TestFileIOUtils extends HoodieCommonTestHarnessJunit5 { +public class TestFileIOUtils extends HoodieCommonTestHarness { @Test public void testMkdirAndDelete() throws IOException { diff --git a/hudi-common/src/test/java/org/apache/hudi/common/util/TestNumericUtils.java b/hudi-common/src/test/java/org/apache/hudi/common/util/TestNumericUtils.java index cd76383f6..9a74b63c6 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/util/TestNumericUtils.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/util/TestNumericUtils.java @@ -18,12 +18,12 @@ package org.apache.hudi.common.util; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.Arrays; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; /** * Tests numeric utils. diff --git a/hudi-common/src/test/java/org/apache/hudi/common/util/TestParquetUtils.java b/hudi-common/src/test/java/org/apache/hudi/common/util/TestParquetUtils.java index 58c4580de..2d2084d5a 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/util/TestParquetUtils.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/util/TestParquetUtils.java @@ -20,12 +20,12 @@ package org.apache.hudi.common.util; import org.apache.hudi.avro.HoodieAvroUtils; import org.apache.hudi.avro.HoodieAvroWriteSupport; -import org.apache.hudi.common.HoodieCommonTestHarness; import org.apache.hudi.common.bloom.BloomFilter; import org.apache.hudi.common.bloom.BloomFilterFactory; import org.apache.hudi.common.bloom.BloomFilterTypeCode; import org.apache.hudi.common.model.HoodieRecord; import org.apache.hudi.common.model.HoodieTestUtils; +import org.apache.hudi.common.testutils.HoodieCommonTestHarness; import org.apache.avro.Schema; import org.apache.avro.generic.GenericData; @@ -34,58 +34,50 @@ import org.apache.hadoop.fs.Path; import org.apache.parquet.avro.AvroSchemaConverter; import org.apache.parquet.hadoop.ParquetWriter; import org.apache.parquet.hadoop.metadata.CompressionCodecName; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.UUID; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests parquet utils. */ -@RunWith(Parameterized.class) public class TestParquetUtils extends HoodieCommonTestHarness { - String bloomFilterTypeToTest; - - @Parameters() - public static Collection data() { - return Arrays.asList(new Object[][] { - {BloomFilterTypeCode.SIMPLE.name()}, - {BloomFilterTypeCode.DYNAMIC_V0.name()} - }); + public static List bloomFilterTypeCodes() { + return Arrays.asList( + Arguments.of(BloomFilterTypeCode.SIMPLE.name()), + Arguments.of(BloomFilterTypeCode.DYNAMIC_V0.name()) + ); } - public TestParquetUtils(String bloomFilterTypeToTest) { - this.bloomFilterTypeToTest = bloomFilterTypeToTest; - } - - @Before + @BeforeEach public void setup() { initPath(); } - @Test - public void testHoodieWriteSupport() throws Exception { + @ParameterizedTest + @MethodSource("bloomFilterTypeCodes") + public void testHoodieWriteSupport(String typeCode) throws Exception { List rowKeys = new ArrayList<>(); for (int i = 0; i < 1000; i++) { rowKeys.add(UUID.randomUUID().toString()); } - String filePath = basePath + "/test.parquet"; - writeParquetFile(filePath, rowKeys); + String filePath = Paths.get(basePath, "test.parquet").toString(); + writeParquetFile(typeCode, filePath, rowKeys); // Read and verify List rowKeysInFile = new ArrayList<>( @@ -93,16 +85,17 @@ public class TestParquetUtils extends HoodieCommonTestHarness { Collections.sort(rowKeysInFile); Collections.sort(rowKeys); - assertEquals("Did not read back the expected list of keys", rowKeys, rowKeysInFile); + assertEquals(rowKeys, rowKeysInFile, "Did not read back the expected list of keys"); BloomFilter filterInFile = ParquetUtils.readBloomFilterFromParquetMetadata(HoodieTestUtils.getDefaultHadoopConf(), new Path(filePath)); for (String rowKey : rowKeys) { - assertTrue("key should be found in bloom filter", filterInFile.mightContain(rowKey)); + assertTrue(filterInFile.mightContain(rowKey), "key should be found in bloom filter"); } } - @Test - public void testFilterParquetRowKeys() throws Exception { + @ParameterizedTest + @MethodSource("bloomFilterTypeCodes") + public void testFilterParquetRowKeys(String typeCode) throws Exception { List rowKeys = new ArrayList<>(); Set filter = new HashSet<>(); for (int i = 0; i < 1000; i++) { @@ -113,25 +106,25 @@ public class TestParquetUtils extends HoodieCommonTestHarness { } } - String filePath = basePath + "/test.parquet"; - writeParquetFile(filePath, rowKeys); + String filePath = Paths.get(basePath, "test.parquet").toString(); + writeParquetFile(typeCode, filePath, rowKeys); // Read and verify Set filtered = ParquetUtils.filterParquetRowKeys(HoodieTestUtils.getDefaultHadoopConf(), new Path(filePath), filter); - assertEquals("Filtered count does not match", filter.size(), filtered.size()); + assertEquals(filter.size(), filtered.size(), "Filtered count does not match"); for (String rowKey : filtered) { - assertTrue("filtered key must be in the given filter", filter.contains(rowKey)); + assertTrue(filter.contains(rowKey), "filtered key must be in the given filter"); } } - private void writeParquetFile(String filePath, List rowKeys) throws Exception { + private void writeParquetFile(String typeCode, String filePath, List rowKeys) throws Exception { // Write out a parquet file Schema schema = HoodieAvroUtils.getRecordKeySchema(); BloomFilter filter = BloomFilterFactory - .createBloomFilter(1000, 0.0001, 10000, bloomFilterTypeToTest); + .createBloomFilter(1000, 0.0001, 10000, typeCode); HoodieAvroWriteSupport writeSupport = new HoodieAvroWriteSupport(new AvroSchemaConverter().convert(schema), schema, filter); ParquetWriter writer = new ParquetWriter(new Path(filePath), writeSupport, CompressionCodecName.GZIP, diff --git a/hudi-common/src/test/java/org/apache/hudi/common/util/TestSerializationUtils.java b/hudi-common/src/test/java/org/apache/hudi/common/util/TestSerializationUtils.java index e6d0cc63d..9d6c1b81b 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/util/TestSerializationUtils.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/util/TestSerializationUtils.java @@ -19,14 +19,18 @@ package org.apache.hudi.common.util; import org.apache.avro.util.Utf8; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.Arrays; import java.util.LinkedList; import java.util.Objects; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + /** * Tests serialization utils. */ @@ -50,13 +54,14 @@ public class TestSerializationUtils { private void verifyObject(T expectedValue) throws IOException { byte[] serializedObject = SerializationUtils.serialize(expectedValue); - Assert.assertTrue(serializedObject != null && serializedObject.length > 0); + assertNotNull(serializedObject); + assertTrue(serializedObject.length > 0); final T deserializedValue = SerializationUtils.deserialize(serializedObject); if (expectedValue == null) { - Assert.assertNull(deserializedValue); + assertNull(deserializedValue); } else { - Assert.assertTrue(expectedValue.equals(deserializedValue)); + assertEquals(expectedValue, deserializedValue); } } diff --git a/hudi-common/src/test/java/org/apache/hudi/common/util/TestStringUtils.java b/hudi-common/src/test/java/org/apache/hudi/common/util/TestStringUtils.java index 05a0825fd..b402996fa 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/util/TestStringUtils.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/util/TestStringUtils.java @@ -18,12 +18,12 @@ package org.apache.hudi.common.util; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; public class TestStringUtils { diff --git a/hudi-common/src/test/java/org/apache/hudi/common/util/collection/TestDiskBasedMap.java b/hudi-common/src/test/java/org/apache/hudi/common/util/collection/TestDiskBasedMap.java index fd4f4388d..32119cc1e 100755 --- a/hudi-common/src/test/java/org/apache/hudi/common/util/collection/TestDiskBasedMap.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/util/collection/TestDiskBasedMap.java @@ -19,13 +19,13 @@ package org.apache.hudi.common.util.collection; import org.apache.hudi.avro.HoodieAvroUtils; -import org.apache.hudi.common.HoodieCommonTestHarness; import org.apache.hudi.common.model.AvroBinaryTestPayload; import org.apache.hudi.common.model.HoodieAvroPayload; import org.apache.hudi.common.model.HoodieKey; import org.apache.hudi.common.model.HoodieRecord; import org.apache.hudi.common.model.HoodieRecordPayload; import org.apache.hudi.common.table.timeline.HoodieActiveTimeline; +import org.apache.hudi.common.testutils.HoodieCommonTestHarness; import org.apache.hudi.common.util.HoodieRecordSizeEstimator; import org.apache.hudi.common.util.Option; import org.apache.hudi.common.util.SchemaTestUtil; @@ -35,9 +35,9 @@ import org.apache.hudi.common.util.SpillableMapUtils; import org.apache.avro.Schema; import org.apache.avro.generic.GenericRecord; import org.apache.avro.generic.IndexedRecord; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.io.UncheckedIOException; @@ -53,15 +53,15 @@ import java.util.UUID; import java.util.stream.Collectors; import static org.apache.hudi.common.util.SchemaTestUtil.getSimpleSchema; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests dis based map {@link DiskBasedMap}. */ public class TestDiskBasedMap extends HoodieCommonTestHarness { - @Before + @BeforeEach public void setup() { initPath(); } @@ -212,7 +212,7 @@ public class TestDiskBasedMap extends HoodieCommonTestHarness { /** * @na: Leaving this test here for a quick performance test */ - @Ignore + @Disabled @Test public void testSizeEstimatorPerformance() throws IOException, URISyntaxException { // Test sizeEstimatorPerformance with simpleSchema diff --git a/hudi-common/src/test/java/org/apache/hudi/common/util/collection/TestExternalSpillableMap.java b/hudi-common/src/test/java/org/apache/hudi/common/util/collection/TestExternalSpillableMap.java index 5f2c45ed0..23a14d906 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/util/collection/TestExternalSpillableMap.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/util/collection/TestExternalSpillableMap.java @@ -19,12 +19,12 @@ package org.apache.hudi.common.util.collection; import org.apache.hudi.avro.HoodieAvroUtils; -import org.apache.hudi.common.HoodieCommonTestHarness; import org.apache.hudi.common.model.HoodieAvroPayload; import org.apache.hudi.common.model.HoodieKey; import org.apache.hudi.common.model.HoodieRecord; import org.apache.hudi.common.model.HoodieRecordPayload; import org.apache.hudi.common.table.timeline.HoodieActiveTimeline; +import org.apache.hudi.common.testutils.HoodieCommonTestHarness; import org.apache.hudi.common.util.DefaultSizeEstimator; import org.apache.hudi.common.util.HoodieRecordSizeEstimator; import org.apache.hudi.common.util.Option; @@ -34,10 +34,10 @@ import org.apache.hudi.common.util.SpillableMapTestUtils; import org.apache.avro.Schema; import org.apache.avro.generic.GenericRecord; import org.apache.avro.generic.IndexedRecord; -import org.junit.Before; -import org.junit.FixMethodOrder; -import org.junit.Test; -import org.junit.runners.MethodSorters; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.MethodOrderer.Alphanumeric; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestMethodOrder; import java.io.IOException; import java.io.UncheckedIOException; @@ -46,19 +46,20 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests external spillable map {@link ExternalSpillableMap}. */ -@FixMethodOrder(MethodSorters.NAME_ASCENDING) +@TestMethodOrder(Alphanumeric.class) public class TestExternalSpillableMap extends HoodieCommonTestHarness { private static String failureOutputPath; - @Before + @BeforeEach public void setUp() { initPath(); failureOutputPath = basePath + "/test_fail"; @@ -175,7 +176,7 @@ public class TestExternalSpillableMap extends HoodieCommonTestHarness { assertTrue(records.size() == 0); } - @Test(expected = IOException.class) + @Test public void simpleTestWithException() throws IOException, URISyntaxException { Schema schema = HoodieAvroUtils.addMetadataFields(SchemaTestUtil.getSimpleSchema()); @@ -186,9 +187,11 @@ public class TestExternalSpillableMap extends HoodieCommonTestHarness { List recordKeys = SpillableMapTestUtils.upsertRecords(iRecords, records); assert (recordKeys.size() == 100); Iterator> itr = records.iterator(); - while (itr.hasNext()) { - throw new IOException("Testing failures..."); - } + assertThrows(IOException.class, () -> { + while (itr.hasNext()) { + throw new IOException("Testing failures..."); + } + }); } @Test diff --git a/hudi-common/src/test/java/org/apache/hudi/common/util/collection/TestRocksDBManager.java b/hudi-common/src/test/java/org/apache/hudi/common/util/collection/TestRocksDBManager.java index 5f30fa15b..2a5691e02 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/util/collection/TestRocksDBManager.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/util/collection/TestRocksDBManager.java @@ -20,10 +20,9 @@ package org.apache.hudi.common.util.collection; import org.apache.hudi.common.table.view.FileSystemViewStorageConfig; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import java.io.File; import java.io.Serializable; @@ -38,6 +37,11 @@ import java.util.UUID; import java.util.stream.Collectors; import java.util.stream.IntStream; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + /** * Tests RocksDB manager {@link RocksDBDAO}. */ @@ -45,13 +49,13 @@ public class TestRocksDBManager { private RocksDBDAO dbManager; - @Before + @BeforeEach public void setUpClass() { dbManager = new RocksDBDAO("/dummy/path/" + UUID.randomUUID().toString(), FileSystemViewStorageConfig.newBuilder().build().newBuilder().build().getRocksdbBasePath()); } - @After + @AfterEach public void tearDownClass() { if (dbManager != null) { dbManager.close(); @@ -104,35 +108,36 @@ public class TestRocksDBManager { List> gotPayloads = dbManager.prefixSearch(family, prefix).collect(Collectors.toList()); Integer expCount = countsMap.get(family).get(prefix); - Assert.assertEquals("Size check for prefix (" + prefix + ") and family (" + family + ")", - expCount == null ? 0L : expCount.longValue(), gotPayloads.size()); + assertEquals(expCount == null ? 0L : expCount.longValue(), gotPayloads.size(), + "Size check for prefix (" + prefix + ") and family (" + family + ")"); gotPayloads.forEach(p -> { - Assert.assertEquals(p.getRight().getFamily(), family); - Assert.assertTrue(p.getRight().getKey().toString().startsWith(prefix)); + assertEquals(p.getRight().getFamily(), family); + assertTrue(p.getRight().getKey().toString().startsWith(prefix)); }); }); }); payloads.stream().filter(p -> !p.getPrefix().equalsIgnoreCase(prefix1)).forEach(payload -> { Payload p = dbManager.get(payload.getFamily(), payload.getKey()); - Assert.assertEquals("Retrieved correct payload for key :" + payload.getKey(), payload, p); + assertEquals(payload, p, "Retrieved correct payload for key :" + payload.getKey()); dbManager.delete(payload.getFamily(), payload.getKey()); Payload p2 = dbManager.get(payload.getFamily(), payload.getKey()); - Assert.assertNull("Retrieved correct payload for key :" + payload.getKey(), p2); + assertNull(p2, "Retrieved correct payload for key :" + payload.getKey()); }); colFamilies.forEach(family -> { dbManager.prefixDelete(family, prefix1); int got = dbManager.prefixSearch(family, prefix1).collect(Collectors.toList()).size(); - Assert.assertEquals("Expected prefix delete to leave at least one item for family: " + family, countsMap.get(family).get(prefix1) == null ? 0 : 1, got); + assertEquals(countsMap.get(family).get(prefix1) == null ? 0 : 1, got, + "Expected prefix delete to leave at least one item for family: " + family); }); payloads.stream().filter(p -> !p.getPrefix().equalsIgnoreCase(prefix1)).forEach(payload -> { Payload p2 = dbManager.get(payload.getFamily(), payload.getKey()); - Assert.assertNull("Retrieved correct payload for key :" + payload.getKey(), p2); + assertNull(p2, "Retrieved correct payload for key :" + payload.getKey()); }); // Now do a prefix search @@ -140,14 +145,14 @@ public class TestRocksDBManager { prefixes.stream().filter(p -> !p.equalsIgnoreCase(prefix1)).forEach(prefix -> { List> gotPayloads = dbManager.prefixSearch(family, prefix).collect(Collectors.toList()); - Assert.assertEquals("Size check for prefix (" + prefix + ") and family (" + family + ")", 0, - gotPayloads.size()); + assertEquals(0, gotPayloads.size(), + "Size check for prefix (" + prefix + ") and family (" + family + ")"); }); }); String rocksDBBasePath = dbManager.getRocksDBBasePath(); dbManager.close(); - Assert.assertFalse(new File(rocksDBBasePath).exists()); + assertFalse(new File(rocksDBBasePath).exists()); } @Test @@ -196,26 +201,26 @@ public class TestRocksDBManager { payloads.forEach(payload -> { Payload p = dbManager.get(payload.getFamily(), payload.getKey()); - Assert.assertEquals("Retrieved correct payload for key :" + payload.getKey(), payload, p); + assertEquals(payload, p, "Retrieved correct payload for key :" + payload.getKey()); }); payloadSplits.next().forEach(payload -> { dbManager.delete(payload.getFamily(), payload.getKey()); Payload want = dbManager.get(payload.getFamily(), payload.getKey()); - Assert.assertNull("Verify deleted during single delete for key :" + payload.getKey(), want); + assertNull(want, "Verify deleted during single delete for key :" + payload.getKey()); }); dbManager.writeBatch(batch -> { payloadSplits.next().forEach(payload -> { dbManager.deleteInBatch(batch, payload.getFamily(), payload.getKey()); Payload want = dbManager.get(payload.getFamily(), payload.getKey()); - Assert.assertEquals("Verify not deleted during batch delete in progress for key :" + payload.getKey(), payload, want); + assertEquals(payload, want, "Verify not deleted during batch delete in progress for key :" + payload.getKey()); }); }); payloads.forEach(payload -> { Payload want = dbManager.get(payload.getFamily(), payload.getKey()); - Assert.assertNull("Verify delete for key :" + payload.getKey(), want); + assertNull(want, "Verify delete for key :" + payload.getKey()); }); // Now do a prefix search @@ -223,14 +228,14 @@ public class TestRocksDBManager { prefixes.forEach(prefix -> { List> gotPayloads = dbManager.prefixSearch(family, prefix).collect(Collectors.toList()); - Assert.assertEquals("Size check for prefix (" + prefix + ") and family (" + family + ")", 0, - gotPayloads.size()); + assertEquals(0, gotPayloads.size(), + "Size check for prefix (" + prefix + ") and family (" + family + ")"); }); }); String rocksDBBasePath = dbManager.getRocksDBBasePath(); dbManager.close(); - Assert.assertFalse(new File(rocksDBBasePath).exists()); + assertFalse(new File(rocksDBBasePath).exists()); } public static class PayloadKey implements Serializable { diff --git a/hudi-common/src/test/java/org/apache/hudi/common/util/collection/TestRocksDbBasedMap.java b/hudi-common/src/test/java/org/apache/hudi/common/util/collection/TestRocksDbBasedMap.java index 1877a9444..86e1f0c21 100644 --- a/hudi-common/src/test/java/org/apache/hudi/common/util/collection/TestRocksDbBasedMap.java +++ b/hudi-common/src/test/java/org/apache/hudi/common/util/collection/TestRocksDbBasedMap.java @@ -18,17 +18,16 @@ package org.apache.hudi.common.util.collection; -import org.apache.hudi.common.HoodieCommonTestHarness; import org.apache.hudi.common.model.HoodieRecord; import org.apache.hudi.common.model.HoodieRecordPayload; +import org.apache.hudi.common.testutils.HoodieCommonTestHarness; import org.apache.hudi.common.util.SchemaTestUtil; import org.apache.hudi.common.util.SpillableMapTestUtils; import org.apache.avro.generic.GenericRecord; import org.apache.avro.generic.IndexedRecord; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.net.URISyntaxException; @@ -36,12 +35,14 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import static org.junit.jupiter.api.Assertions.assertEquals; + /** * Tests RocksDB based map {@link RocksDBBasedMap}. */ public class TestRocksDbBasedMap extends HoodieCommonTestHarness { - @Before + @BeforeEach public void setUp() { initPath(); } @@ -61,6 +62,6 @@ public class TestRocksDbBasedMap extends HoodieCommonTestHarness { oRecords.add(rec); assert recordKeys.contains(rec.getRecordKey()); } - Assert.assertEquals(recordKeys.size(), oRecords.size()); + assertEquals(recordKeys.size(), oRecords.size()); } } diff --git a/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/TestHoodieROTablePathFilter.java b/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/TestHoodieROTablePathFilter.java index f41f09c19..040a89b46 100644 --- a/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/TestHoodieROTablePathFilter.java +++ b/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/TestHoodieROTablePathFilter.java @@ -20,7 +20,7 @@ package org.apache.hudi.hadoop; import org.apache.hudi.common.model.HoodieTestUtils; import org.apache.hudi.common.table.HoodieTableMetaClient; -import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5; +import org.apache.hudi.common.testutils.HoodieCommonTestHarness; import org.apache.hadoop.fs.Path; import org.junit.jupiter.api.BeforeEach; @@ -37,7 +37,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue; /** * */ -public class TestHoodieROTablePathFilter extends HoodieCommonTestHarnessJunit5 { +public class TestHoodieROTablePathFilter extends HoodieCommonTestHarness { @BeforeEach public void setUp() throws Exception { diff --git a/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/realtime/TestHoodieCombineHiveInputFormat.java b/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/realtime/TestHoodieCombineHiveInputFormat.java index 8ef295e89..3f4be1661 100644 --- a/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/realtime/TestHoodieCombineHiveInputFormat.java +++ b/hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/realtime/TestHoodieCombineHiveInputFormat.java @@ -23,7 +23,7 @@ import org.apache.hudi.common.minicluster.MiniClusterUtil; import org.apache.hudi.common.model.HoodieTableType; import org.apache.hudi.common.model.HoodieTestUtils; import org.apache.hudi.common.table.log.HoodieLogFormat; -import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5; +import org.apache.hudi.common.testutils.HoodieCommonTestHarness; import org.apache.hudi.common.util.SchemaTestUtil; import org.apache.hudi.hadoop.InputFormatTestUtil; import org.apache.hudi.hadoop.hive.HoodieCombineHiveInputFormat; @@ -58,7 +58,7 @@ import static org.apache.hadoop.hive.ql.exec.Utilities.MAPRED_MAPPER_CLASS; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; -public class TestHoodieCombineHiveInputFormat extends HoodieCommonTestHarnessJunit5 { +public class TestHoodieCombineHiveInputFormat extends HoodieCommonTestHarness { private JobConf jobConf; private FileSystem fs; @@ -80,7 +80,7 @@ public class TestHoodieCombineHiveInputFormat extends HoodieCommonTestHarnessJun this.fs = MiniClusterUtil.fileSystem; jobConf = new JobConf(); hadoopConf = HoodieTestUtils.getDefaultHadoopConf(); - assertTrue(fs.mkdirs(new Path(folder.getRoot().getPath()))); + assertTrue(fs.mkdirs(new Path(tempDir.toAbsolutePath().toString()))); HoodieTestUtils.init(MiniClusterUtil.configuration, tempDir.toAbsolutePath().toString(), HoodieTableType.MERGE_ON_READ); } diff --git a/hudi-hive-sync/src/test/java/org/apache/hudi/hive/TestUtil.java b/hudi-hive-sync/src/test/java/org/apache/hudi/hive/TestUtil.java index 90c399524..86c78fb78 100644 --- a/hudi-hive-sync/src/test/java/org/apache/hudi/hive/TestUtil.java +++ b/hudi-hive-sync/src/test/java/org/apache/hudi/hive/TestUtil.java @@ -59,7 +59,7 @@ import org.apache.zookeeper.server.ZooKeeperServer; import org.joda.time.DateTime; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; -import org.junit.runners.model.InitializationError; +import org.junit.platform.commons.JUnitException; import java.io.File; import java.io.IOException; @@ -156,7 +156,7 @@ public class TestUtil { } static void createCOWTable(String instantTime, int numberOfPartitions) - throws IOException, InitializationError, URISyntaxException { + throws IOException, URISyntaxException { Path path = new Path(hiveSyncConfig.basePath); FileIOUtils.deleteDirectory(new File(hiveSyncConfig.basePath)); HoodieTableMetaClient.initTableType(configuration, hiveSyncConfig.basePath, HoodieTableType.COPY_ON_WRITE, @@ -170,8 +170,7 @@ public class TestUtil { } static void createMORTable(String commitTime, String deltaCommitTime, int numberOfPartitions, - boolean createDeltaCommit) - throws IOException, InitializationError, URISyntaxException, InterruptedException { + boolean createDeltaCommit) throws IOException, URISyntaxException, InterruptedException { Path path = new Path(hiveSyncConfig.basePath); FileIOUtils.deleteDirectory(new File(hiveSyncConfig.basePath)); HoodieTableMetaClient.initTableType(configuration, hiveSyncConfig.basePath, HoodieTableType.MERGE_ON_READ, @@ -312,9 +311,9 @@ public class TestUtil { return logWriter.getLogFile(); } - private static void checkResult(boolean result) throws InitializationError { + private static void checkResult(boolean result) { if (!result) { - throw new InitializationError("Could not initialize"); + throw new JUnitException("Could not initialize"); } } diff --git a/hudi-utilities/src/test/java/org/apache/hudi/utilities/TestHoodieSnapshotCopier.java b/hudi-utilities/src/test/java/org/apache/hudi/utilities/TestHoodieSnapshotCopier.java index 97ab12cc2..8b202d3f4 100644 --- a/hudi-utilities/src/test/java/org/apache/hudi/utilities/TestHoodieSnapshotCopier.java +++ b/hudi-utilities/src/test/java/org/apache/hudi/utilities/TestHoodieSnapshotCopier.java @@ -21,7 +21,7 @@ package org.apache.hudi.utilities; import org.apache.hudi.common.HoodieTestDataGenerator; import org.apache.hudi.common.fs.FSUtils; import org.apache.hudi.common.model.HoodieTestUtils; -import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5; +import org.apache.hudi.common.testutils.HoodieCommonTestHarness; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -39,7 +39,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; -public class TestHoodieSnapshotCopier extends HoodieCommonTestHarnessJunit5 { +public class TestHoodieSnapshotCopier extends HoodieCommonTestHarness { private static final String TEST_WRITE_TOKEN = "1-0-1"; diff --git a/hudi-utilities/src/test/java/org/apache/hudi/utilities/checkpointing/TestKafkaConnectHdfsProvider.java b/hudi-utilities/src/test/java/org/apache/hudi/utilities/checkpointing/TestKafkaConnectHdfsProvider.java index 8a59bf909..9aca63575 100644 --- a/hudi-utilities/src/test/java/org/apache/hudi/utilities/checkpointing/TestKafkaConnectHdfsProvider.java +++ b/hudi-utilities/src/test/java/org/apache/hudi/utilities/checkpointing/TestKafkaConnectHdfsProvider.java @@ -20,7 +20,7 @@ package org.apache.hudi.utilities.checkpointing; import org.apache.hudi.common.config.TypedProperties; import org.apache.hudi.common.model.HoodieTestUtils; -import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5; +import org.apache.hudi.common.testutils.HoodieCommonTestHarness; import org.apache.hudi.exception.HoodieException; import org.apache.hadoop.conf.Configuration; @@ -32,7 +32,7 @@ import java.io.File; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; -public class TestKafkaConnectHdfsProvider extends HoodieCommonTestHarnessJunit5 { +public class TestKafkaConnectHdfsProvider extends HoodieCommonTestHarness { private String topicPath = null; private Configuration hadoopConf = null; diff --git a/pom.xml b/pom.xml index ecfd35645..f43f7f570 100644 --- a/pom.xml +++ b/pom.xml @@ -82,7 +82,6 @@ 2.0.0 2.17 1.10.1 - 4.12 5.6.1 5.6.1 3.3.3 @@ -815,13 +814,6 @@ test - - junit - junit - ${junit.version} - test - - org.junit.jupiter junit-jupiter-api diff --git a/style/checkstyle.xml b/style/checkstyle.xml index f0c423c26..9e03882d1 100644 --- a/style/checkstyle.xml +++ b/style/checkstyle.xml @@ -264,8 +264,10 @@ - - + + +