1
0

[HUDI-812] Migrate hudi common tests to JUnit 5 (#1590)

* [HUDI-812] Migrate hudi-common tests to JUnit 5
This commit is contained in:
Raymond Xu
2020-05-06 04:15:20 -07:00
committed by GitHub
parent e21441ad83
commit 366bb10d8c
43 changed files with 714 additions and 828 deletions

View File

@@ -23,7 +23,7 @@ import org.apache.hudi.common.fs.FSUtils;
import org.apache.hudi.common.minicluster.HdfsTestService; import org.apache.hudi.common.minicluster.HdfsTestService;
import org.apache.hudi.common.model.HoodieTestUtils; import org.apache.hudi.common.model.HoodieTestUtils;
import org.apache.hudi.common.table.HoodieTableMetaClient; import org.apache.hudi.common.table.HoodieTableMetaClient;
import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5; import org.apache.hudi.common.testutils.HoodieCommonTestHarness;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@@ -45,7 +45,7 @@ import java.util.concurrent.atomic.AtomicInteger;
/** /**
* The test harness for resource initialization and cleanup. * The test harness for resource initialization and cleanup.
*/ */
public abstract class HoodieClientTestHarness extends HoodieCommonTestHarnessJunit5 implements Serializable { public abstract class HoodieClientTestHarness extends HoodieCommonTestHarness implements Serializable {
private static final Logger LOG = LoggerFactory.getLogger(HoodieClientTestHarness.class); private static final Logger LOG = LoggerFactory.getLogger(HoodieClientTestHarness.class);

View File

@@ -147,12 +147,6 @@
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
<dependency> <dependency>
<groupId>org.junit.jupiter</groupId> <groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-api</artifactId> <artifactId>junit-jupiter-api</artifactId>
@@ -192,14 +186,8 @@
<dependency> <dependency>
<groupId>com.github.stefanbirkner</groupId> <groupId>com.github.stefanbirkner</groupId>
<artifactId>system-rules</artifactId> <artifactId>system-rules</artifactId>
<version>1.16.0</version> <version>1.17.2</version>
<scope>test</scope> <scope>test</scope>
<exclusions>
<exclusion>
<groupId>junit</groupId>
<artifactId>junit-dep</artifactId>
</exclusion>
</exclusions>
</dependency> </dependency>
<!-- HBase --> <!-- HBase -->

View File

@@ -18,72 +18,65 @@
package org.apache.hudi.common.bloom; package org.apache.hudi.common.bloom;
import org.junit.Assert; import org.junit.jupiter.params.ParameterizedTest;
import org.junit.Test; import org.junit.jupiter.params.provider.Arguments;
import org.junit.runner.RunWith; import org.junit.jupiter.params.provider.MethodSource;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection;
import java.util.List; import java.util.List;
import java.util.UUID; import java.util.UUID;
import static org.junit.jupiter.api.Assertions.assertTrue;
/** /**
* Unit tests {@link SimpleBloomFilter} and {@link HoodieDynamicBoundedBloomFilter}. * Unit tests {@link SimpleBloomFilter} and {@link HoodieDynamicBoundedBloomFilter}.
*/ */
@RunWith(Parameterized.class)
public class TestBloomFilter { public class TestBloomFilter {
private final String versionToTest;
// name attribute is optional, provide a unique name for test // name attribute is optional, provide a unique name for test
// multiple parameters, uses Collection<Object[]> // multiple parameters, uses Collection<Object[]>
@Parameters() public static List<Arguments> bloomFilterTypeCodes() {
public static Collection<Object[]> data() { return Arrays.asList(
return Arrays.asList(new Object[][] { Arguments.of(BloomFilterTypeCode.SIMPLE.name()),
{BloomFilterTypeCode.SIMPLE.name()}, Arguments.of(BloomFilterTypeCode.DYNAMIC_V0.name())
{BloomFilterTypeCode.DYNAMIC_V0.name()} );
});
} }
public TestBloomFilter(String versionToTest) { @ParameterizedTest
this.versionToTest = versionToTest; @MethodSource("bloomFilterTypeCodes")
} public void testAddKey(String typeCode) {
@Test
public void testAddKey() {
List<String> inputs; List<String> inputs;
int[] sizes = {100, 1000, 10000}; int[] sizes = {100, 1000, 10000};
for (int size : sizes) { for (int size : sizes) {
inputs = new ArrayList<>(); inputs = new ArrayList<>();
BloomFilter filter = getBloomFilter(versionToTest, size, 0.000001, size * 10); BloomFilter filter = getBloomFilter(typeCode, size, 0.000001, size * 10);
for (int i = 0; i < size; i++) { for (int i = 0; i < size; i++) {
String key = UUID.randomUUID().toString(); String key = UUID.randomUUID().toString();
inputs.add(key); inputs.add(key);
filter.add(key); filter.add(key);
} }
for (java.lang.String key : inputs) { for (java.lang.String key : inputs) {
Assert.assertTrue("Filter should have returned true for " + key, filter.mightContain(key)); assertTrue(filter.mightContain(key), "Filter should have returned true for " + key);
} }
for (int i = 0; i < 100; i++) { for (int i = 0; i < 100; i++) {
String randomKey = UUID.randomUUID().toString(); String randomKey = UUID.randomUUID().toString();
if (inputs.contains(randomKey)) { if (inputs.contains(randomKey)) {
Assert.assertTrue("Filter should have returned true for " + randomKey, filter.mightContain(randomKey)); assertTrue(filter.mightContain(randomKey), "Filter should have returned true for " + randomKey);
} }
} }
} }
} }
@Test @ParameterizedTest
public void testSerialize() { @MethodSource("bloomFilterTypeCodes")
public void testSerialize(String typeCode) {
List<String> inputs; List<String> inputs;
int[] sizes = {100, 1000, 10000}; int[] sizes = {100, 1000, 10000};
for (int size : sizes) { for (int size : sizes) {
inputs = new ArrayList<>(); inputs = new ArrayList<>();
BloomFilter filter = getBloomFilter(versionToTest, size, 0.000001, size * 10); BloomFilter filter = getBloomFilter(typeCode, size, 0.000001, size * 10);
for (int i = 0; i < size; i++) { for (int i = 0; i < size; i++) {
String key = UUID.randomUUID().toString(); String key = UUID.randomUUID().toString();
inputs.add(key); inputs.add(key);
@@ -92,9 +85,9 @@ public class TestBloomFilter {
String serString = filter.serializeToString(); String serString = filter.serializeToString();
BloomFilter recreatedBloomFilter = BloomFilterFactory BloomFilter recreatedBloomFilter = BloomFilterFactory
.fromString(serString, versionToTest); .fromString(serString, typeCode);
for (String key : inputs) { for (String key : inputs) {
Assert.assertTrue("Filter should have returned true for " + key, recreatedBloomFilter.mightContain(key)); assertTrue(recreatedBloomFilter.mightContain(key), "Filter should have returned true for " + key);
} }
} }
} }

View File

@@ -19,11 +19,13 @@
package org.apache.hudi.common.bloom; package org.apache.hudi.common.bloom;
import org.apache.hadoop.util.hash.Hash; import org.apache.hadoop.util.hash.Hash;
import org.junit.Assert; import org.junit.jupiter.api.Test;
import org.junit.Test;
import java.util.UUID; import java.util.UUID;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
/** /**
* Unit tests {@link InternalDynamicBloomFilter} for size bounding. * Unit tests {@link InternalDynamicBloomFilter} for size bounding.
*/ */
@@ -48,9 +50,9 @@ public class TestInternalDynamicBloomFilter {
if (index != 0) { if (index != 0) {
int curLength = serString.length(); int curLength = serString.length();
if (index > indexForMaxGrowth) { if (index > indexForMaxGrowth) {
Assert.assertEquals("Length should not increase after hitting max entries", curLength, lastKnownBloomSize); assertEquals(curLength, lastKnownBloomSize, "Length should not increase after hitting max entries");
} else { } else {
Assert.assertTrue("Length should increase until max entries are reached", curLength > lastKnownBloomSize); assertTrue(curLength > lastKnownBloomSize, "Length should increase until max entries are reached");
} }
} }
lastKnownBloomSize = serString.length(); lastKnownBloomSize = serString.length();

View File

@@ -18,25 +18,25 @@
package org.apache.hudi.common.fs; package org.apache.hudi.common.fs;
import org.apache.hudi.common.HoodieCommonTestHarness;
import org.apache.hudi.common.model.HoodieLogFile; import org.apache.hudi.common.model.HoodieLogFile;
import org.apache.hudi.common.model.HoodieTestUtils; import org.apache.hudi.common.model.HoodieTestUtils;
import org.apache.hudi.common.table.HoodieTableMetaClient; import org.apache.hudi.common.table.HoodieTableMetaClient;
import org.apache.hudi.common.table.timeline.HoodieInstant; import org.apache.hudi.common.table.timeline.HoodieInstant;
import org.apache.hudi.common.table.timeline.HoodieTimeline; import org.apache.hudi.common.table.timeline.HoodieTimeline;
import org.apache.hudi.common.testutils.HoodieCommonTestHarness;
import org.apache.hudi.exception.HoodieException; import org.apache.hudi.exception.HoodieException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule; import org.junit.Rule;
import org.junit.Test;
import org.junit.contrib.java.lang.system.EnvironmentVariables; import org.junit.contrib.java.lang.system.EnvironmentVariables;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.File; import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException; import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.text.SimpleDateFormat; import java.text.SimpleDateFormat;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
@@ -46,13 +46,17 @@ import java.util.UUID;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import java.util.stream.Stream; import java.util.stream.Stream;
import static org.junit.Assert.assertEquals; import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
/** /**
* Tests file system utils. * Tests file system utils.
*/ */
public class TestFSUtils extends HoodieCommonTestHarness { public class TestFSUtils extends HoodieCommonTestHarness {
private final long minRollbackToKeep = 10; private final long minRollbackToKeep = 10;
private final long minCleanToKeep = 10; private final long minCleanToKeep = 10;
@@ -61,7 +65,7 @@ public class TestFSUtils extends HoodieCommonTestHarness {
@Rule @Rule
public final EnvironmentVariables environmentVariables = new EnvironmentVariables(); public final EnvironmentVariables environmentVariables = new EnvironmentVariables();
@Before @BeforeEach
public void setUp() throws IOException { public void setUp() throws IOException {
initMetaClient(); initMetaClient();
} }
@@ -120,10 +124,10 @@ public class TestFSUtils extends HoodieCommonTestHarness {
return true; return true;
}, true); }, true);
assertTrue("Hoodie MetaFolder MUST be skipped but got :" + collected, assertTrue(collected.stream().noneMatch(s -> s.contains(HoodieTableMetaClient.METAFOLDER_NAME)),
collected.stream().noneMatch(s -> s.contains(HoodieTableMetaClient.METAFOLDER_NAME))); "Hoodie MetaFolder MUST be skipped but got :" + collected);
// Check if only files are listed // Check if only files are listed
Assert.assertEquals(2, collected.size()); assertEquals(2, collected.size());
// Test including meta-folder // Test including meta-folder
final List<String> collected2 = new ArrayList<>(); final List<String> collected2 = new ArrayList<>();
@@ -132,10 +136,10 @@ public class TestFSUtils extends HoodieCommonTestHarness {
return true; return true;
}, false); }, false);
Assert.assertFalse("Hoodie MetaFolder will be present :" + collected2, assertFalse(collected2.stream().noneMatch(s -> s.contains(HoodieTableMetaClient.METAFOLDER_NAME)),
collected2.stream().noneMatch(s -> s.contains(HoodieTableMetaClient.METAFOLDER_NAME))); "Hoodie MetaFolder will be present :" + collected2);
// Check if only files are listed including hoodie.properties // Check if only files are listed including hoodie.properties
Assert.assertEquals("Collected=" + collected2, 5, collected2.size()); assertEquals(5, collected2.size(), "Collected=" + collected2);
} }
@Test @Test
@@ -143,7 +147,7 @@ public class TestFSUtils extends HoodieCommonTestHarness {
String instantTime = new SimpleDateFormat("yyyyMMddHHmmss").format(new Date()); String instantTime = new SimpleDateFormat("yyyyMMddHHmmss").format(new Date());
String fileName = UUID.randomUUID().toString(); String fileName = UUID.randomUUID().toString();
String fullFileName = FSUtils.makeDataFileName(instantTime, TEST_WRITE_TOKEN, fileName); String fullFileName = FSUtils.makeDataFileName(instantTime, TEST_WRITE_TOKEN, fileName);
assertEquals(FSUtils.getCommitTime(fullFileName), instantTime); assertEquals(instantTime, FSUtils.getCommitTime(fullFileName));
} }
@Test @Test
@@ -151,7 +155,7 @@ public class TestFSUtils extends HoodieCommonTestHarness {
String instantTime = new SimpleDateFormat("yyyyMMddHHmmss").format(new Date()); String instantTime = new SimpleDateFormat("yyyyMMddHHmmss").format(new Date());
String fileName = UUID.randomUUID().toString(); String fileName = UUID.randomUUID().toString();
String fullFileName = FSUtils.makeDataFileName(instantTime, TEST_WRITE_TOKEN, fileName); String fullFileName = FSUtils.makeDataFileName(instantTime, TEST_WRITE_TOKEN, fileName);
assertEquals(FSUtils.getFileId(fullFileName), fileName); assertEquals(fileName, FSUtils.getFileId(fullFileName));
} }
@Test @Test
@@ -204,10 +208,10 @@ public class TestFSUtils extends HoodieCommonTestHarness {
assertEquals(fileName, FSUtils.getFileIdFromLogPath(rlPath)); assertEquals(fileName, FSUtils.getFileIdFromLogPath(rlPath));
assertEquals("100", FSUtils.getBaseCommitTimeFromLogPath(rlPath)); assertEquals("100", FSUtils.getBaseCommitTimeFromLogPath(rlPath));
assertEquals(1, FSUtils.getFileVersionFromLog(rlPath)); assertEquals(1, FSUtils.getFileVersionFromLog(rlPath));
Assert.assertNull(FSUtils.getTaskPartitionIdFromLogPath(rlPath)); assertNull(FSUtils.getTaskPartitionIdFromLogPath(rlPath));
Assert.assertNull(FSUtils.getStageIdFromLogPath(rlPath)); assertNull(FSUtils.getStageIdFromLogPath(rlPath));
Assert.assertNull(FSUtils.getTaskAttemptIdFromLogPath(rlPath)); assertNull(FSUtils.getTaskAttemptIdFromLogPath(rlPath));
Assert.assertNull(FSUtils.getWriteTokenFromLogPath(rlPath)); assertNull(FSUtils.getWriteTokenFromLogPath(rlPath));
} }
@Test @Test
@@ -222,9 +226,9 @@ public class TestFSUtils extends HoodieCommonTestHarness {
assertEquals(fileName, FSUtils.getFileIdFromLogPath(rlPath)); assertEquals(fileName, FSUtils.getFileIdFromLogPath(rlPath));
assertEquals("100", FSUtils.getBaseCommitTimeFromLogPath(rlPath)); assertEquals("100", FSUtils.getBaseCommitTimeFromLogPath(rlPath));
assertEquals(2, FSUtils.getFileVersionFromLog(rlPath)); assertEquals(2, FSUtils.getFileVersionFromLog(rlPath));
assertEquals(new Integer(1), FSUtils.getTaskPartitionIdFromLogPath(rlPath)); assertEquals(1, FSUtils.getTaskPartitionIdFromLogPath(rlPath));
assertEquals(new Integer(0), FSUtils.getStageIdFromLogPath(rlPath)); assertEquals(0, FSUtils.getStageIdFromLogPath(rlPath));
assertEquals(new Integer(1), FSUtils.getTaskAttemptIdFromLogPath(rlPath)); assertEquals(1, FSUtils.getTaskAttemptIdFromLogPath(rlPath));
} }
/** /**
@@ -277,20 +281,19 @@ public class TestFSUtils extends HoodieCommonTestHarness {
List<HoodieInstant> hoodieInstants = new ArrayList<>(); List<HoodieInstant> hoodieInstants = new ArrayList<>();
// create rollback files // create rollback files
for (String instantTime : instantTimes) { for (String instantTime : instantTimes) {
new File(basePath + "/" + HoodieTableMetaClient.METAFOLDER_NAME + "/" Files.createFile(Paths.get(basePath,
+ instantTime + HoodieTimeline.ROLLBACK_EXTENSION).createNewFile(); HoodieTableMetaClient.METAFOLDER_NAME,
instantTime + HoodieTimeline.ROLLBACK_EXTENSION));
hoodieInstants.add(new HoodieInstant(false, HoodieTimeline.ROLLBACK_ACTION, instantTime)); hoodieInstants.add(new HoodieInstant(false, HoodieTimeline.ROLLBACK_ACTION, instantTime));
} }
String metaPath = Paths.get(basePath, ".hoodie").toString();
FSUtils.deleteOlderRollbackMetaFiles(FSUtils.getFs(basePath, new Configuration()), FSUtils.deleteOlderRollbackMetaFiles(FSUtils.getFs(basePath, new Configuration()),
basePath + "/.hoodie", hoodieInstants.stream()); metaPath, hoodieInstants.stream());
File[] rollbackFiles = new File(basePath + "/.hoodie").listFiles(new FilenameFilter() { File[] rollbackFiles = new File(metaPath).listFiles((dir, name)
@Override -> name.contains(HoodieTimeline.ROLLBACK_EXTENSION));
public boolean accept(File dir, String name) { assertNotNull(rollbackFiles);
return name.contains(HoodieTimeline.ROLLBACK_EXTENSION); assertEquals(rollbackFiles.length, minRollbackToKeep);
}
});
assertTrue(rollbackFiles.length == minRollbackToKeep);
} }
@Test @Test
@@ -301,19 +304,18 @@ public class TestFSUtils extends HoodieCommonTestHarness {
List<HoodieInstant> hoodieInstants = new ArrayList<>(); List<HoodieInstant> hoodieInstants = new ArrayList<>();
// create rollback files // create rollback files
for (String instantTime : instantTimes) { for (String instantTime : instantTimes) {
new File(basePath + "/" + HoodieTableMetaClient.METAFOLDER_NAME + "/" Files.createFile(Paths.get(basePath,
+ instantTime + HoodieTimeline.CLEAN_EXTENSION).createNewFile(); HoodieTableMetaClient.METAFOLDER_NAME,
instantTime + HoodieTimeline.CLEAN_EXTENSION));
hoodieInstants.add(new HoodieInstant(false, HoodieTimeline.CLEAN_ACTION, instantTime)); hoodieInstants.add(new HoodieInstant(false, HoodieTimeline.CLEAN_ACTION, instantTime));
} }
String metaPath = Paths.get(basePath, ".hoodie").toString();
FSUtils.deleteOlderCleanMetaFiles(FSUtils.getFs(basePath, new Configuration()), FSUtils.deleteOlderCleanMetaFiles(FSUtils.getFs(basePath, new Configuration()),
basePath + "/.hoodie", hoodieInstants.stream()); metaPath, hoodieInstants.stream());
File[] cleanFiles = new File(basePath + "/.hoodie").listFiles(new FilenameFilter() { File[] cleanFiles = new File(metaPath).listFiles((dir, name)
@Override -> name.contains(HoodieTimeline.CLEAN_EXTENSION));
public boolean accept(File dir, String name) { assertNotNull(cleanFiles);
return name.contains(HoodieTimeline.CLEAN_EXTENSION); assertEquals(cleanFiles.length, minCleanToKeep);
}
});
assertTrue(cleanFiles.length == minCleanToKeep);
} }
@Test @Test
@@ -329,29 +331,29 @@ public class TestFSUtils extends HoodieCommonTestHarness {
// data file name // data file name
String dataFileName = FSUtils.makeDataFileName(instantTime, writeToken, fileId); String dataFileName = FSUtils.makeDataFileName(instantTime, writeToken, fileId);
assertTrue(instantTime.equals(FSUtils.getCommitTime(dataFileName))); assertEquals(instantTime, FSUtils.getCommitTime(dataFileName));
assertTrue(fileId.equals(FSUtils.getFileId(dataFileName))); assertEquals(fileId, FSUtils.getFileId(dataFileName));
String logFileName = FSUtils.makeLogFileName(fileId, LOG_EXTENTION, instantTime, version, writeToken); String logFileName = FSUtils.makeLogFileName(fileId, LOG_EXTENTION, instantTime, version, writeToken);
assertTrue(FSUtils.isLogFile(new Path(logFileName))); assertTrue(FSUtils.isLogFile(new Path(logFileName)));
assertTrue(instantTime.equals(FSUtils.getBaseCommitTimeFromLogPath(new Path(logFileName)))); assertEquals(instantTime, FSUtils.getBaseCommitTimeFromLogPath(new Path(logFileName)));
assertTrue(fileId.equals(FSUtils.getFileIdFromLogPath(new Path(logFileName)))); assertEquals(fileId, FSUtils.getFileIdFromLogPath(new Path(logFileName)));
assertTrue(version == FSUtils.getFileVersionFromLog(new Path(logFileName))); assertEquals(version, FSUtils.getFileVersionFromLog(new Path(logFileName)));
assertTrue(LOG_STR.equals(FSUtils.getFileExtensionFromLog(new Path(logFileName)))); assertEquals(LOG_STR, FSUtils.getFileExtensionFromLog(new Path(logFileName)));
// create three versions of log file // create three versions of log file
String partitionPath = basePath + "/" + partitionStr; java.nio.file.Path partitionPath = Paths.get(basePath, partitionStr);
new File(partitionPath).mkdirs(); Files.createDirectories(partitionPath);
String log1 = FSUtils.makeLogFileName(fileId, LOG_EXTENTION, instantTime, 1, writeToken); String log1 = FSUtils.makeLogFileName(fileId, LOG_EXTENTION, instantTime, 1, writeToken);
new File(partitionPath + "/" + log1).createNewFile(); Files.createFile(partitionPath.resolve(log1));
String log2 = FSUtils.makeLogFileName(fileId, LOG_EXTENTION, instantTime, 2, writeToken); String log2 = FSUtils.makeLogFileName(fileId, LOG_EXTENTION, instantTime, 2, writeToken);
new File(partitionPath + "/" + log2).createNewFile(); Files.createFile(partitionPath.resolve(log2));
String log3 = FSUtils.makeLogFileName(fileId, LOG_EXTENTION, instantTime, 3, writeToken); String log3 = FSUtils.makeLogFileName(fileId, LOG_EXTENTION, instantTime, 3, writeToken);
new File(partitionPath + "/" + log3).createNewFile(); Files.createFile(partitionPath.resolve(log3));
assertTrue(3 == FSUtils.getLatestLogVersion(FSUtils.getFs(basePath, new Configuration()), assertEquals(3, (int) FSUtils.getLatestLogVersion(FSUtils.getFs(basePath, new Configuration()),
new Path(partitionPath), fileId, LOG_EXTENTION, instantTime).get().getLeft()); new Path(partitionPath.toString()), fileId, LOG_EXTENTION, instantTime).get().getLeft());
assertTrue(4 == FSUtils.computeNextLogVersion(FSUtils.getFs(basePath, new Configuration()), assertEquals(4, FSUtils.computeNextLogVersion(FSUtils.getFs(basePath, new Configuration()),
new Path(partitionPath), fileId, LOG_EXTENTION, instantTime)); new Path(partitionPath.toString()), fileId, LOG_EXTENTION, instantTime));
} }
} }

View File

@@ -30,14 +30,12 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext;
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After; import org.junit.jupiter.api.AfterEach;
import org.junit.Assert; import org.junit.jupiter.api.Test;
import org.junit.Test;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.HashSet; import java.util.HashSet;
import java.util.Set; import java.util.Set;
import java.util.UUID; import java.util.UUID;
@@ -46,6 +44,9 @@ import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.FILE_SCHEME;
import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.RANDOM; import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.RANDOM;
import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.getPhantomFile; import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.getPhantomFile;
import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.getRandomOuterInMemPath; import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.getRandomOuterInMemPath;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
/** /**
* Tests {@link InLineFileSystem} to inline HFile. * Tests {@link InLineFileSystem} to inline HFile.
@@ -66,7 +67,7 @@ public class TestHFileInLining {
inlineConf.set("fs." + InLineFileSystem.SCHEME + ".impl", InLineFileSystem.class.getName()); inlineConf.set("fs." + InLineFileSystem.SCHEME + ".impl", InLineFileSystem.class.getName());
} }
@After @AfterEach
public void teardown() throws IOException { public void teardown() throws IOException {
if (generatedPath != null) { if (generatedPath != null) {
File filePath = new File(generatedPath.toString().substring(generatedPath.toString().indexOf(':') + 1)); File filePath = new File(generatedPath.toString().substring(generatedPath.toString().indexOf(':') + 1));
@@ -117,23 +118,22 @@ public class TestHFileInLining {
Set<Integer> rowIdsToSearch = getRandomValidRowIds(10); Set<Integer> rowIdsToSearch = getRandomValidRowIds(10);
for (int rowId : rowIdsToSearch) { for (int rowId : rowIdsToSearch) {
Assert.assertTrue("location lookup failed", assertEquals(0, scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(rowId))),
scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(rowId))) == 0); "location lookup failed");
// read the key and see if it matches // read the key and see if it matches
ByteBuffer readKey = scanner.getKey(); ByteBuffer readKey = scanner.getKey();
Assert.assertTrue("seeked key does not match", Arrays.equals(getSomeKey(rowId), assertArrayEquals(getSomeKey(rowId), Bytes.toBytes(readKey), "seeked key does not match");
Bytes.toBytes(readKey)));
scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(rowId))); scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(rowId)));
ByteBuffer val1 = scanner.getValue(); ByteBuffer val1 = scanner.getValue();
scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(rowId))); scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(rowId)));
ByteBuffer val2 = scanner.getValue(); ByteBuffer val2 = scanner.getValue();
Assert.assertTrue(Arrays.equals(Bytes.toBytes(val1), Bytes.toBytes(val2))); assertArrayEquals(Bytes.toBytes(val1), Bytes.toBytes(val2));
} }
int[] invalidRowIds = {-4, maxRows, maxRows + 1, maxRows + 120, maxRows + 160, maxRows + 1000}; int[] invalidRowIds = {-4, maxRows, maxRows + 1, maxRows + 120, maxRows + 160, maxRows + 1000};
for (int rowId : invalidRowIds) { for (int rowId : invalidRowIds) {
Assert.assertFalse("location lookup should have failed", assertNotEquals(0, scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(rowId))),
scanner.seekTo(KeyValue.createKeyValueFromKey(getSomeKey(rowId))) == 0); "location lookup should have failed");
} }
reader.close(); reader.close();
fin.close(); fin.close();
@@ -197,16 +197,16 @@ public class TestHFileInLining {
Bytes.toBytes("qual"), Bytes.toBytes(valStr)); Bytes.toBytes("qual"), Bytes.toBytes(valStr));
byte[] keyBytes = new KeyValue.KeyOnlyKeyValue(Bytes.toBytes(key), 0, byte[] keyBytes = new KeyValue.KeyOnlyKeyValue(Bytes.toBytes(key), 0,
Bytes.toBytes(key).length).getKey(); Bytes.toBytes(key).length).getKey();
Assert.assertTrue("bytes for keys do not match " + keyStr + " " assertArrayEquals(kv.getKey(), keyBytes,
+ Bytes.toString(Bytes.toBytes(key)), Arrays.equals(kv.getKey(), keyBytes)); "bytes for keys do not match " + keyStr + " " + Bytes.toString(Bytes.toBytes(key)));
byte[] valBytes = Bytes.toBytes(val); byte[] valBytes = Bytes.toBytes(val);
Assert.assertTrue("bytes for vals do not match " + valStr + " " assertArrayEquals(Bytes.toBytes(valStr), valBytes,
+ Bytes.toString(valBytes), Arrays.equals(Bytes.toBytes(valStr), valBytes)); "bytes for vals do not match " + valStr + " " + Bytes.toString(valBytes));
if (!scanner.next()) { if (!scanner.next()) {
break; break;
} }
} }
Assert.assertEquals(i, start + n - 1); assertEquals(i, start + n - 1);
return (start + n); return (start + n);
} }

View File

@@ -25,10 +25,9 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.junit.After; import org.junit.jupiter.api.AfterEach;
import org.junit.Assert; import org.junit.jupiter.api.Disabled;
import org.junit.Ignore; import org.junit.jupiter.api.Test;
import org.junit.Test;
import java.io.File; import java.io.File;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
@@ -41,6 +40,11 @@ import java.util.List;
import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.RANDOM; import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.RANDOM;
import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.getRandomOuterFSPath; import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.getRandomOuterFSPath;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
/** /**
* Tests {@link InLineFileSystem}. * Tests {@link InLineFileSystem}.
@@ -55,7 +59,7 @@ public class TestInLineFileSystem {
this.listOfGeneratedPaths = new ArrayList<>(); this.listOfGeneratedPaths = new ArrayList<>();
} }
@After @AfterEach
public void teardown() throws IOException { public void teardown() throws IOException {
for (Path pathToDelete : listOfGeneratedPaths) { for (Path pathToDelete : listOfGeneratedPaths) {
File filePath = new File(pathToDelete.toString().substring(pathToDelete.toString().indexOf(':') + 1)); File filePath = new File(pathToDelete.toString().substring(pathToDelete.toString().indexOf(':') + 1));
@@ -102,32 +106,32 @@ public class TestInLineFileSystem {
Path inlinePath = FileSystemTestUtils.getPhantomFile(outerPath, startOffsetLengthPair.getLeft(), startOffsetLengthPair.getRight()); Path inlinePath = FileSystemTestUtils.getPhantomFile(outerPath, startOffsetLengthPair.getLeft(), startOffsetLengthPair.getRight());
InLineFileSystem inlineFileSystem = (InLineFileSystem) inlinePath.getFileSystem(conf); InLineFileSystem inlineFileSystem = (InLineFileSystem) inlinePath.getFileSystem(conf);
FSDataInputStream fsDataInputStream = inlineFileSystem.open(inlinePath); FSDataInputStream fsDataInputStream = inlineFileSystem.open(inlinePath);
Assert.assertTrue(inlineFileSystem.exists(inlinePath)); assertTrue(inlineFileSystem.exists(inlinePath));
verifyFileStatus(expectedFileStatus, inlinePath, startOffsetLengthPair.getRight(), inlineFileSystem.getFileStatus(inlinePath)); verifyFileStatus(expectedFileStatus, inlinePath, startOffsetLengthPair.getRight(), inlineFileSystem.getFileStatus(inlinePath));
FileStatus[] actualFileStatuses = inlineFileSystem.listStatus(inlinePath); FileStatus[] actualFileStatuses = inlineFileSystem.listStatus(inlinePath);
Assert.assertEquals(1, actualFileStatuses.length); assertEquals(1, actualFileStatuses.length);
verifyFileStatus(expectedFileStatus, inlinePath, startOffsetLengthPair.getRight(), actualFileStatuses[0]); verifyFileStatus(expectedFileStatus, inlinePath, startOffsetLengthPair.getRight(), actualFileStatuses[0]);
byte[] actualBytes = new byte[expectedBytes.length]; byte[] actualBytes = new byte[expectedBytes.length];
fsDataInputStream.readFully(0, actualBytes); fsDataInputStream.readFully(0, actualBytes);
Assert.assertArrayEquals(expectedBytes, actualBytes); assertArrayEquals(expectedBytes, actualBytes);
fsDataInputStream.close(); fsDataInputStream.close();
Assert.assertEquals(InLineFileSystem.SCHEME, inlineFileSystem.getScheme()); assertEquals(InLineFileSystem.SCHEME, inlineFileSystem.getScheme());
Assert.assertEquals(URI.create(InLineFileSystem.SCHEME), inlineFileSystem.getUri()); assertEquals(URI.create(InLineFileSystem.SCHEME), inlineFileSystem.getUri());
} }
} }
@Test @Test
@Ignore // Disabling flaky test for now https://issues.apache.org/jira/browse/HUDI-786 @Disabled("Disabling flaky test for now https://issues.apache.org/jira/browse/HUDI-786")
public void testFileSystemApis() throws IOException { public void testFileSystemApis() throws IOException {
OuterPathInfo outerPathInfo = generateOuterFileAndGetInfo(1000); OuterPathInfo outerPathInfo = generateOuterFileAndGetInfo(1000);
Path inlinePath = FileSystemTestUtils.getPhantomFile(outerPathInfo.outerPath, outerPathInfo.startOffset, outerPathInfo.length); Path inlinePath = FileSystemTestUtils.getPhantomFile(outerPathInfo.outerPath, outerPathInfo.startOffset, outerPathInfo.length);
InLineFileSystem inlineFileSystem = (InLineFileSystem) inlinePath.getFileSystem(conf); InLineFileSystem inlineFileSystem = (InLineFileSystem) inlinePath.getFileSystem(conf);
FSDataInputStream fsDataInputStream = inlineFileSystem.open(inlinePath); final FSDataInputStream fsDataInputStream = inlineFileSystem.open(inlinePath);
byte[] actualBytes = new byte[outerPathInfo.expectedBytes.length]; byte[] actualBytes = new byte[outerPathInfo.expectedBytes.length];
// verify pos // verify pos
Assert.assertEquals(0 - outerPathInfo.startOffset, fsDataInputStream.getPos()); assertEquals(0 - outerPathInfo.startOffset, fsDataInputStream.getPos());
fsDataInputStream.readFully(0, actualBytes); fsDataInputStream.readFully(0, actualBytes);
Assert.assertArrayEquals(outerPathInfo.expectedBytes, actualBytes); assertArrayEquals(outerPathInfo.expectedBytes, actualBytes);
// read partial data // read partial data
// test read(long position, byte[] buffer, int offset, int length) // test read(long position, byte[] buffer, int offset, int length)
@@ -138,13 +142,9 @@ public class TestInLineFileSystem {
fsDataInputStream.read(25, actualBytes, 100, 210); fsDataInputStream.read(25, actualBytes, 100, 210);
verifyArrayEquality(outerPathInfo.expectedBytes, 25, 210, actualBytes, 100, 210); verifyArrayEquality(outerPathInfo.expectedBytes, 25, 210, actualBytes, 100, 210);
// give length to read > than actual inline content // give length to read > than actual inline content
actualBytes = new byte[1100]; assertThrows(IndexOutOfBoundsException.class, () -> {
try { fsDataInputStream.read(0, new byte[1100], 0, 1101);
fsDataInputStream.read(0, actualBytes, 0, 1101); }, "Should have thrown IndexOutOfBoundsException");
Assert.fail("Should have thrown IndexOutOfBoundsException");
} catch (IndexOutOfBoundsException e) {
// no op
}
// test readFully(long position, byte[] buffer, int offset, int length) // test readFully(long position, byte[] buffer, int offset, int length)
actualBytes = new byte[100]; actualBytes = new byte[100];
@@ -154,13 +154,9 @@ public class TestInLineFileSystem {
fsDataInputStream.readFully(25, actualBytes, 100, 210); fsDataInputStream.readFully(25, actualBytes, 100, 210);
verifyArrayEquality(outerPathInfo.expectedBytes, 25, 210, actualBytes, 100, 210); verifyArrayEquality(outerPathInfo.expectedBytes, 25, 210, actualBytes, 100, 210);
// give length to read > than actual inline content // give length to read > than actual inline content
actualBytes = new byte[1100]; assertThrows(IndexOutOfBoundsException.class, () -> {
try { fsDataInputStream.readFully(0, new byte[1100], 0, 1101);
fsDataInputStream.readFully(0, actualBytes, 0, 1101); }, "Should have thrown IndexOutOfBoundsException");
Assert.fail("Should have thrown IndexOutOfBoundsException");
} catch (IndexOutOfBoundsException e) {
// no op
}
// test readFully(long position, byte[] buffer) // test readFully(long position, byte[] buffer)
actualBytes = new byte[100]; actualBytes = new byte[100];
@@ -185,28 +181,20 @@ public class TestInLineFileSystem {
*/ */
// test read(ByteBuffer buf) // test read(ByteBuffer buf)
ByteBuffer actualByteBuffer = ByteBuffer.allocate(100); ByteBuffer actualByteBuffer = ByteBuffer.allocate(100);
try { assertThrows(UnsupportedOperationException.class, () -> {
fsDataInputStream.read(actualByteBuffer); fsDataInputStream.read(actualByteBuffer);
Assert.fail("Should have thrown"); }, "Should have thrown");
} catch (UnsupportedOperationException e) {
// ignore
}
Assert.assertEquals(outerPathInfo.outerPath.getFileSystem(conf).open(outerPathInfo.outerPath).getFileDescriptor(), fsDataInputStream.getFileDescriptor()); assertEquals(outerPathInfo.outerPath.getFileSystem(conf).open(outerPathInfo.outerPath).getFileDescriptor(),
fsDataInputStream.getFileDescriptor());
try { assertThrows(UnsupportedOperationException.class, () -> {
fsDataInputStream.setReadahead(10L); fsDataInputStream.setReadahead(10L);
Assert.fail("Should have thrown exception"); }, "Should have thrown exception");
} catch (UnsupportedOperationException e) {
// ignore
}
try { assertThrows(UnsupportedOperationException.class, () -> {
fsDataInputStream.setDropBehind(true); fsDataInputStream.setDropBehind(true);
Assert.fail("Should have thrown exception"); }, "Should have thrown exception");
} catch (UnsupportedOperationException e) {
// ignore
}
// yet to test // yet to test
// read(ByteBufferPool bufferPool, int maxLength, EnumSet<ReadOption> opts) // read(ByteBufferPool bufferPool, int maxLength, EnumSet<ReadOption> opts)
@@ -218,7 +206,8 @@ public class TestInLineFileSystem {
private void verifyArrayEquality(byte[] expected, int expectedOffset, int expectedLength, private void verifyArrayEquality(byte[] expected, int expectedOffset, int expectedLength,
byte[] actual, int actualOffset, int actualLength) { byte[] actual, int actualOffset, int actualLength) {
Assert.assertArrayEquals(Arrays.copyOfRange(expected, expectedOffset, expectedOffset + expectedLength), Arrays.copyOfRange(actual, actualOffset, actualOffset + actualLength)); assertArrayEquals(Arrays.copyOfRange(expected, expectedOffset, expectedOffset + expectedLength),
Arrays.copyOfRange(actual, actualOffset, actualOffset + actualLength));
} }
private OuterPathInfo generateOuterFileAndGetInfo(int inlineContentSize) throws IOException { private OuterPathInfo generateOuterFileAndGetInfo(int inlineContentSize) throws IOException {
@@ -251,84 +240,63 @@ public class TestInLineFileSystem {
public void testOpen() throws IOException { public void testOpen() throws IOException {
Path inlinePath = getRandomInlinePath(); Path inlinePath = getRandomInlinePath();
// open non existant path // open non existant path
try { assertThrows(FileNotFoundException.class, () -> {
inlinePath.getFileSystem(conf).open(inlinePath); inlinePath.getFileSystem(conf).open(inlinePath);
Assert.fail("Should have thrown exception"); }, "Should have thrown exception");
} catch (FileNotFoundException e) {
// ignore
}
} }
@Test @Test
public void testCreate() throws IOException { public void testCreate() throws IOException {
Path inlinePath = getRandomInlinePath(); Path inlinePath = getRandomInlinePath();
try { assertThrows(UnsupportedOperationException.class, () -> {
inlinePath.getFileSystem(conf).create(inlinePath, true); inlinePath.getFileSystem(conf).create(inlinePath, true);
Assert.fail("Should have thrown exception"); }, "Should have thrown exception");
} catch (UnsupportedOperationException e) {
// ignore
}
} }
@Test @Test
public void testAppend() throws IOException { public void testAppend() throws IOException {
Path inlinePath = getRandomInlinePath(); Path inlinePath = getRandomInlinePath();
try { assertThrows(UnsupportedOperationException.class, () -> {
inlinePath.getFileSystem(conf).append(inlinePath); inlinePath.getFileSystem(conf).append(inlinePath);
Assert.fail("Should have thrown exception"); }, "Should have thrown exception");
} catch (UnsupportedOperationException e) {
// ignore
}
} }
@Test @Test
public void testRename() throws IOException { public void testRename() throws IOException {
Path inlinePath = getRandomInlinePath(); Path inlinePath = getRandomInlinePath();
try { assertThrows(UnsupportedOperationException.class, () -> {
inlinePath.getFileSystem(conf).rename(inlinePath, inlinePath); inlinePath.getFileSystem(conf).rename(inlinePath, inlinePath);
Assert.fail("Should have thrown exception"); }, "Should have thrown exception");
} catch (UnsupportedOperationException e) {
// ignore
}
} }
@Test @Test
public void testDelete() throws IOException { public void testDelete() throws IOException {
Path inlinePath = getRandomInlinePath(); Path inlinePath = getRandomInlinePath();
try { assertThrows(UnsupportedOperationException.class, () -> {
inlinePath.getFileSystem(conf).delete(inlinePath, true); inlinePath.getFileSystem(conf).delete(inlinePath, true);
Assert.fail("Should have thrown exception"); }, "Should have thrown exception");
} catch (UnsupportedOperationException e) {
// ignore
}
} }
@Test @Test
public void testgetWorkingDir() throws IOException { public void testgetWorkingDir() throws IOException {
Path inlinePath = getRandomInlinePath(); Path inlinePath = getRandomInlinePath();
try { assertThrows(UnsupportedOperationException.class, () -> {
inlinePath.getFileSystem(conf).getWorkingDirectory(); inlinePath.getFileSystem(conf).getWorkingDirectory();
Assert.fail("Should have thrown exception"); }, "Should have thrown exception");
} catch (UnsupportedOperationException e) {
// ignore
}
} }
@Test @Test
public void testsetWorkingDirectory() throws IOException { public void testsetWorkingDirectory() throws IOException {
Path inlinePath = getRandomInlinePath(); Path inlinePath = getRandomInlinePath();
try { assertThrows(UnsupportedOperationException.class, () -> {
inlinePath.getFileSystem(conf).setWorkingDirectory(inlinePath); inlinePath.getFileSystem(conf).setWorkingDirectory(inlinePath);
Assert.fail("Should have thrown exception"); }, "Should have thrown exception");
} catch (UnsupportedOperationException e) {
// ignore
}
} }
@Test @Test
public void testExists() throws IOException { public void testExists() throws IOException {
Path inlinePath = getRandomInlinePath(); Path inlinePath = getRandomInlinePath();
Assert.assertFalse(inlinePath.getFileSystem(conf).exists(inlinePath)); assertFalse(inlinePath.getFileSystem(conf).exists(inlinePath));
} }
private Path getRandomInlinePath() { private Path getRandomInlinePath() {
@@ -338,15 +306,15 @@ public class TestInLineFileSystem {
} }
private void verifyFileStatus(FileStatus expected, Path inlinePath, long expectedLength, FileStatus actual) { private void verifyFileStatus(FileStatus expected, Path inlinePath, long expectedLength, FileStatus actual) {
Assert.assertEquals(inlinePath, actual.getPath()); assertEquals(inlinePath, actual.getPath());
Assert.assertEquals(expectedLength, actual.getLen()); assertEquals(expectedLength, actual.getLen());
Assert.assertEquals(expected.getAccessTime(), actual.getAccessTime()); assertEquals(expected.getAccessTime(), actual.getAccessTime());
Assert.assertEquals(expected.getBlockSize(), actual.getBlockSize()); assertEquals(expected.getBlockSize(), actual.getBlockSize());
Assert.assertEquals(expected.getGroup(), actual.getGroup()); assertEquals(expected.getGroup(), actual.getGroup());
Assert.assertEquals(expected.getModificationTime(), actual.getModificationTime()); assertEquals(expected.getModificationTime(), actual.getModificationTime());
Assert.assertEquals(expected.getOwner(), actual.getOwner()); assertEquals(expected.getOwner(), actual.getOwner());
Assert.assertEquals(expected.getPermission(), actual.getPermission()); assertEquals(expected.getPermission(), actual.getPermission());
Assert.assertEquals(expected.getReplication(), actual.getReplication()); assertEquals(expected.getReplication(), actual.getReplication());
} }
class OuterPathInfo { class OuterPathInfo {

View File

@@ -21,14 +21,17 @@ package org.apache.hudi.common.fs.inline;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.junit.Assert; import org.junit.jupiter.api.Test;
import org.junit.Test;
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.RANDOM; import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.RANDOM;
import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.getRandomOuterInMemPath; import static org.apache.hudi.common.fs.inline.FileSystemTestUtils.getRandomOuterInMemPath;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
/** /**
* Unit tests {@link InMemoryFileSystem}. * Unit tests {@link InMemoryFileSystem}.
@@ -53,93 +56,75 @@ public class TestInMemoryFileSystem {
out.close(); out.close();
InMemoryFileSystem inMemoryFileSystem = (InMemoryFileSystem) outerInMemFSPath.getFileSystem(conf); InMemoryFileSystem inMemoryFileSystem = (InMemoryFileSystem) outerInMemFSPath.getFileSystem(conf);
byte[] bytesRead = inMemoryFileSystem.getFileAsBytes(); byte[] bytesRead = inMemoryFileSystem.getFileAsBytes();
Assert.assertArrayEquals(randomBytes, bytesRead); assertArrayEquals(randomBytes, bytesRead);
Assert.assertEquals(InMemoryFileSystem.SCHEME, inMemoryFileSystem.getScheme()); assertEquals(InMemoryFileSystem.SCHEME, inMemoryFileSystem.getScheme());
Assert.assertEquals(URI.create(outerInMemFSPath.toString()), inMemoryFileSystem.getUri()); assertEquals(URI.create(outerInMemFSPath.toString()), inMemoryFileSystem.getUri());
} }
@Test @Test
public void testOpen() throws IOException { public void testOpen() throws IOException {
Path outerInMemFSPath = getRandomOuterInMemPath(); Path outerInMemFSPath = getRandomOuterInMemPath();
Assert.assertNull(outerInMemFSPath.getFileSystem(conf).open(outerInMemFSPath)); assertNull(outerInMemFSPath.getFileSystem(conf).open(outerInMemFSPath));
} }
@Test @Test
public void testAppend() throws IOException { public void testAppend() throws IOException {
Path outerInMemFSPath = getRandomOuterInMemPath(); Path outerInMemFSPath = getRandomOuterInMemPath();
Assert.assertNull(outerInMemFSPath.getFileSystem(conf).append(outerInMemFSPath)); assertNull(outerInMemFSPath.getFileSystem(conf).append(outerInMemFSPath));
} }
@Test @Test
public void testRename() throws IOException { public void testRename() throws IOException {
Path outerInMemFSPath = getRandomOuterInMemPath(); Path outerInMemFSPath = getRandomOuterInMemPath();
try { assertThrows(UnsupportedOperationException.class, () -> {
outerInMemFSPath.getFileSystem(conf).rename(outerInMemFSPath, outerInMemFSPath); outerInMemFSPath.getFileSystem(conf).rename(outerInMemFSPath, outerInMemFSPath);
Assert.fail("Should have thrown exception"); }, "Should have thrown exception");
} catch (UnsupportedOperationException e) {
// ignore
}
} }
@Test @Test
public void testDelete() throws IOException { public void testDelete() throws IOException {
Path outerInMemFSPath = getRandomOuterInMemPath(); Path outerInMemFSPath = getRandomOuterInMemPath();
try { assertThrows(UnsupportedOperationException.class, () -> {
outerInMemFSPath.getFileSystem(conf).delete(outerInMemFSPath, true); outerInMemFSPath.getFileSystem(conf).delete(outerInMemFSPath, true);
Assert.fail("Should have thrown exception"); }, "Should have thrown exception");
} catch (UnsupportedOperationException e) {
// ignore
}
} }
@Test @Test
public void testgetWorkingDir() throws IOException { public void testgetWorkingDir() throws IOException {
Path outerInMemFSPath = getRandomOuterInMemPath(); Path outerInMemFSPath = getRandomOuterInMemPath();
Assert.assertNull(outerInMemFSPath.getFileSystem(conf).getWorkingDirectory()); assertNull(outerInMemFSPath.getFileSystem(conf).getWorkingDirectory());
} }
@Test @Test
public void testsetWorkingDirectory() throws IOException { public void testsetWorkingDirectory() throws IOException {
Path outerInMemFSPath = getRandomOuterInMemPath(); Path outerInMemFSPath = getRandomOuterInMemPath();
try { assertThrows(UnsupportedOperationException.class, () -> {
outerInMemFSPath.getFileSystem(conf).setWorkingDirectory(outerInMemFSPath); outerInMemFSPath.getFileSystem(conf).setWorkingDirectory(outerInMemFSPath);
Assert.fail("Should have thrown exception"); }, "Should have thrown exception");
} catch (UnsupportedOperationException e) {
// ignore
}
} }
@Test @Test
public void testExists() throws IOException { public void testExists() throws IOException {
Path outerInMemFSPath = getRandomOuterInMemPath(); Path outerInMemFSPath = getRandomOuterInMemPath();
try { assertThrows(UnsupportedOperationException.class, () -> {
outerInMemFSPath.getFileSystem(conf).exists(outerInMemFSPath); outerInMemFSPath.getFileSystem(conf).exists(outerInMemFSPath);
Assert.fail("Should have thrown exception"); }, "Should have thrown exception");
} catch (UnsupportedOperationException e) {
// ignore
}
} }
@Test @Test
public void testFileStatus() throws IOException { public void testFileStatus() throws IOException {
Path outerInMemFSPath = getRandomOuterInMemPath(); Path outerInMemFSPath = getRandomOuterInMemPath();
try { assertThrows(UnsupportedOperationException.class, () -> {
outerInMemFSPath.getFileSystem(conf).getFileStatus(outerInMemFSPath); outerInMemFSPath.getFileSystem(conf).getFileStatus(outerInMemFSPath);
Assert.fail("Should have thrown exception"); }, "Should have thrown exception");
} catch (UnsupportedOperationException e) {
// ignore
}
} }
@Test @Test
public void testListStatus() throws IOException { public void testListStatus() throws IOException {
Path outerInMemFSPath = getRandomOuterInMemPath(); Path outerInMemFSPath = getRandomOuterInMemPath();
try { assertThrows(UnsupportedOperationException.class, () -> {
outerInMemFSPath.getFileSystem(conf).listStatus(outerInMemFSPath); outerInMemFSPath.getFileSystem(conf).listStatus(outerInMemFSPath);
Assert.fail("Should have thrown exception"); }, "Should have thrown exception");
} catch (UnsupportedOperationException e) {
// ignore
}
} }
} }

View File

@@ -80,8 +80,8 @@ import java.util.UUID;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import java.util.stream.Stream; import java.util.stream.Stream;
import static org.junit.Assert.assertEquals; import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.Assert.fail; import static org.junit.jupiter.api.Assertions.fail;
/** /**
* A utility class for testing. * A utility class for testing.
@@ -352,7 +352,7 @@ public class HoodieTestUtils {
Iterator<?> iter1 = expected.iterator(); Iterator<?> iter1 = expected.iterator();
Iterator<?> iter2 = actual.iterator(); Iterator<?> iter2 = actual.iterator();
while (iter1.hasNext() && iter2.hasNext()) { while (iter1.hasNext() && iter2.hasNext()) {
assertEquals(message, iter1.next(), iter2.next()); assertEquals(iter1.next(), iter2.next(), message);
} }
assert !iter1.hasNext() && !iter2.hasNext(); assert !iter1.hasNext() && !iter2.hasNext();
} }

View File

@@ -20,11 +20,14 @@ package org.apache.hudi.common.model;
import org.apache.hudi.common.util.FileIOUtils; import org.apache.hudi.common.util.FileIOUtils;
import org.junit.Assert; import org.junit.jupiter.api.Test;
import org.junit.Test;
import java.util.List; import java.util.List;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertTrue;
/** /**
* Tests hoodie commit metadata {@link HoodieCommitMetadata}. * Tests hoodie commit metadata {@link HoodieCommitMetadata}.
*/ */
@@ -36,17 +39,17 @@ public class TestHoodieCommitMetadata {
List<HoodieWriteStat> fakeHoodieWriteStats = HoodieTestUtils.generateFakeHoodieWriteStat(100); List<HoodieWriteStat> fakeHoodieWriteStats = HoodieTestUtils.generateFakeHoodieWriteStat(100);
HoodieCommitMetadata commitMetadata = new HoodieCommitMetadata(); HoodieCommitMetadata commitMetadata = new HoodieCommitMetadata();
fakeHoodieWriteStats.forEach(stat -> commitMetadata.addWriteStat(stat.getPartitionPath(), stat)); fakeHoodieWriteStats.forEach(stat -> commitMetadata.addWriteStat(stat.getPartitionPath(), stat));
Assert.assertTrue(commitMetadata.getTotalCreateTime() > 0); assertTrue(commitMetadata.getTotalCreateTime() > 0);
Assert.assertTrue(commitMetadata.getTotalUpsertTime() > 0); assertTrue(commitMetadata.getTotalUpsertTime() > 0);
Assert.assertTrue(commitMetadata.getTotalScanTime() > 0); assertTrue(commitMetadata.getTotalScanTime() > 0);
Assert.assertTrue(commitMetadata.getTotalLogFilesCompacted() > 0); assertTrue(commitMetadata.getTotalLogFilesCompacted() > 0);
String serializedCommitMetadata = commitMetadata.toJsonString(); String serializedCommitMetadata = commitMetadata.toJsonString();
HoodieCommitMetadata metadata = HoodieCommitMetadata metadata =
HoodieCommitMetadata.fromJsonString(serializedCommitMetadata, HoodieCommitMetadata.class); HoodieCommitMetadata.fromJsonString(serializedCommitMetadata, HoodieCommitMetadata.class);
// Make sure timing metrics are not written to instant file // Make sure timing metrics are not written to instant file
Assert.assertEquals(0, (long) metadata.getTotalScanTime()); assertEquals(0, (long) metadata.getTotalScanTime());
Assert.assertTrue(metadata.getTotalLogFilesCompacted() > 0); assertTrue(metadata.getTotalLogFilesCompacted() > 0);
} }
@Test @Test
@@ -56,17 +59,17 @@ public class TestHoodieCommitMetadata {
FileIOUtils.readAsUTFString(TestHoodieCommitMetadata.class.getResourceAsStream("/old-version.commit")); FileIOUtils.readAsUTFString(TestHoodieCommitMetadata.class.getResourceAsStream("/old-version.commit"));
HoodieCommitMetadata metadata = HoodieCommitMetadata metadata =
HoodieCommitMetadata.fromJsonString(serializedCommitMetadata, HoodieCommitMetadata.class); HoodieCommitMetadata.fromJsonString(serializedCommitMetadata, HoodieCommitMetadata.class);
Assert.assertTrue(metadata.getOperationType() == WriteOperationType.UNKNOWN); assertSame(metadata.getOperationType(), WriteOperationType.UNKNOWN);
// test operate type // test operate type
HoodieCommitMetadata commitMetadata = new HoodieCommitMetadata(); HoodieCommitMetadata commitMetadata = new HoodieCommitMetadata();
commitMetadata.setOperationType(WriteOperationType.INSERT); commitMetadata.setOperationType(WriteOperationType.INSERT);
Assert.assertTrue(commitMetadata.getOperationType() == WriteOperationType.INSERT); assertSame(commitMetadata.getOperationType(), WriteOperationType.INSERT);
// test serialized // test serialized
serializedCommitMetadata = commitMetadata.toJsonString(); serializedCommitMetadata = commitMetadata.toJsonString();
metadata = metadata =
HoodieCommitMetadata.fromJsonString(serializedCommitMetadata, HoodieCommitMetadata.class); HoodieCommitMetadata.fromJsonString(serializedCommitMetadata, HoodieCommitMetadata.class);
Assert.assertTrue(metadata.getOperationType() == WriteOperationType.INSERT); assertSame(metadata.getOperationType(), WriteOperationType.INSERT);
} }
} }

View File

@@ -23,15 +23,14 @@ import org.apache.hudi.common.util.SchemaTestUtil;
import org.apache.avro.generic.GenericRecord; import org.apache.avro.generic.GenericRecord;
import org.apache.avro.generic.IndexedRecord; import org.apache.avro.generic.IndexedRecord;
import org.junit.Assert; import org.junit.jupiter.api.BeforeEach;
import org.junit.Before; import org.junit.jupiter.api.Test;
import org.junit.Test;
import java.util.List; import java.util.List;
import java.util.UUID; import java.util.UUID;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import static org.junit.Assert.fail; import static org.junit.jupiter.api.Assertions.assertThrows;
/** /**
* Tests for {@link HoodieRecord}. * Tests for {@link HoodieRecord}.
@@ -40,7 +39,7 @@ public class TestHoodieRecord {
private HoodieRecord hoodieRecord; private HoodieRecord hoodieRecord;
@Before @BeforeEach
public void setUp() throws Exception { public void setUp() throws Exception {
final List<IndexedRecord> indexedRecords = SchemaTestUtil.generateHoodieTestRecords(0, 1); final List<IndexedRecord> indexedRecords = SchemaTestUtil.generateHoodieTestRecords(0, 1);
final List<HoodieRecord> hoodieRecords = final List<HoodieRecord> hoodieRecords =
@@ -53,12 +52,9 @@ public class TestHoodieRecord {
public void testModificationAfterSeal() { public void testModificationAfterSeal() {
hoodieRecord.seal(); hoodieRecord.seal();
final HoodieRecordLocation location = new HoodieRecordLocation("100", "0"); final HoodieRecordLocation location = new HoodieRecordLocation("100", "0");
try { assertThrows(UnsupportedOperationException.class, () -> {
hoodieRecord.setCurrentLocation(location); hoodieRecord.setCurrentLocation(location);
fail("should fail since modification after sealed is not allowed"); }, "should fail since modification after sealed is not allowed");
} catch (Exception e) {
Assert.assertTrue(e instanceof UnsupportedOperationException);
}
} }
@Test @Test

View File

@@ -21,14 +21,14 @@ package org.apache.hudi.common.model;
import org.apache.hudi.common.fs.FSUtils; import org.apache.hudi.common.fs.FSUtils;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.junit.Test; import org.junit.jupiter.api.Test;
import java.text.SimpleDateFormat; import java.text.SimpleDateFormat;
import java.util.Date; import java.util.Date;
import java.util.UUID; import java.util.UUID;
import static org.junit.Assert.assertEquals; import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.Assert.assertNull; import static org.junit.jupiter.api.Assertions.assertNull;
/** /**
* Tests hoodie write stat {@link HoodieWriteStat}. * Tests hoodie write stat {@link HoodieWriteStat}.

View File

@@ -20,11 +20,11 @@ package org.apache.hudi.common.storage;
import org.apache.hudi.common.fs.StorageSchemes; import org.apache.hudi.common.fs.StorageSchemes;
import org.junit.Test; import org.junit.jupiter.api.Test;
import static org.junit.Assert.assertFalse; import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.Assert.fail; import static org.junit.jupiter.api.Assertions.assertTrue;
/** /**
* Tests {@link StorageSchemes}. * Tests {@link StorageSchemes}.
@@ -42,11 +42,8 @@ public class TestStorageSchemes {
assertFalse(StorageSchemes.isAppendSupported("abfs")); assertFalse(StorageSchemes.isAppendSupported("abfs"));
assertFalse(StorageSchemes.isAppendSupported("oss")); assertFalse(StorageSchemes.isAppendSupported("oss"));
assertTrue(StorageSchemes.isAppendSupported("viewfs")); assertTrue(StorageSchemes.isAppendSupported("viewfs"));
try { assertThrows(IllegalArgumentException.class, () -> {
StorageSchemes.isAppendSupported("s2"); StorageSchemes.isAppendSupported("s2");
fail("Should throw exception for unsupported schemes"); }, "Should throw exception for unsupported schemes");
} catch (IllegalArgumentException ignore) {
// expected.
}
} }
} }

View File

@@ -22,7 +22,7 @@ import org.apache.hudi.common.model.HoodieTestUtils;
import org.apache.hudi.common.table.timeline.HoodieActiveTimeline; import org.apache.hudi.common.table.timeline.HoodieActiveTimeline;
import org.apache.hudi.common.table.timeline.HoodieInstant; import org.apache.hudi.common.table.timeline.HoodieInstant;
import org.apache.hudi.common.table.timeline.HoodieTimeline; import org.apache.hudi.common.table.timeline.HoodieTimeline;
import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5; import org.apache.hudi.common.testutils.HoodieCommonTestHarness;
import org.apache.hudi.common.util.Option; import org.apache.hudi.common.util.Option;
import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeEach;
@@ -41,7 +41,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
/** /**
* Tests hoodie table meta client {@link HoodieTableMetaClient}. * Tests hoodie table meta client {@link HoodieTableMetaClient}.
*/ */
public class TestHoodieTableMetaClient extends HoodieCommonTestHarnessJunit5 { public class TestHoodieTableMetaClient extends HoodieCommonTestHarness {
@BeforeEach @BeforeEach
public void init() throws IOException { public void init() throws IOException {

View File

@@ -19,7 +19,6 @@
package org.apache.hudi.common.table.log; package org.apache.hudi.common.table.log;
import org.apache.hudi.avro.HoodieAvroUtils; import org.apache.hudi.avro.HoodieAvroUtils;
import org.apache.hudi.common.HoodieCommonTestHarness;
import org.apache.hudi.common.fs.FSUtils; import org.apache.hudi.common.fs.FSUtils;
import org.apache.hudi.common.minicluster.MiniClusterUtil; import org.apache.hudi.common.minicluster.MiniClusterUtil;
import org.apache.hudi.common.model.HoodieArchivedLogFile; import org.apache.hudi.common.model.HoodieArchivedLogFile;
@@ -36,6 +35,7 @@ import org.apache.hudi.common.table.log.block.HoodieDeleteBlock;
import org.apache.hudi.common.table.log.block.HoodieLogBlock; import org.apache.hudi.common.table.log.block.HoodieLogBlock;
import org.apache.hudi.common.table.log.block.HoodieLogBlock.HeaderMetadataType; import org.apache.hudi.common.table.log.block.HoodieLogBlock.HeaderMetadataType;
import org.apache.hudi.common.table.log.block.HoodieLogBlock.HoodieLogBlockType; import org.apache.hudi.common.table.log.block.HoodieLogBlock.HoodieLogBlockType;
import org.apache.hudi.common.testutils.HoodieCommonTestHarness;
import org.apache.hudi.common.util.SchemaTestUtil; import org.apache.hudi.common.util.SchemaTestUtil;
import org.apache.hudi.exception.CorruptedLogFileException; import org.apache.hudi.exception.CorruptedLogFileException;
@@ -46,19 +46,18 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.junit.After; import org.junit.jupiter.api.AfterAll;
import org.junit.AfterClass; import org.junit.jupiter.api.AfterEach;
import org.junit.Before; import org.junit.jupiter.api.BeforeAll;
import org.junit.BeforeClass; import org.junit.jupiter.api.BeforeEach;
import org.junit.Test; import org.junit.jupiter.api.Test;
import org.junit.runner.RunWith; import org.junit.jupiter.params.ParameterizedTest;
import org.junit.runners.Parameterized; import org.junit.jupiter.params.provider.ValueSource;
import java.io.IOException; import java.io.IOException;
import java.io.UncheckedIOException; import java.io.UncheckedIOException;
import java.net.URISyntaxException; import java.net.URISyntaxException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
@@ -69,56 +68,45 @@ import java.util.Set;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import static org.apache.hudi.common.util.SchemaTestUtil.getSimpleSchema; import static org.apache.hudi.common.util.SchemaTestUtil.getSimpleSchema;
import static org.junit.Assert.assertEquals; import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.Assert.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.Assert.assertTrue; import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.Assert.fail; import static org.junit.jupiter.api.Assertions.assertTrue;
/** /**
* Tests hoodie log format {@link HoodieLogFormat}. * Tests hoodie log format {@link HoodieLogFormat}.
*/ */
@SuppressWarnings("Duplicates") @SuppressWarnings("Duplicates")
@RunWith(Parameterized.class)
public class TestHoodieLogFormat extends HoodieCommonTestHarness { public class TestHoodieLogFormat extends HoodieCommonTestHarness {
private static String BASE_OUTPUT_PATH = "/tmp/"; private static String BASE_OUTPUT_PATH = "/tmp/";
private FileSystem fs; private FileSystem fs;
private Path partitionPath; private Path partitionPath;
private int bufferSize = 4096; private int bufferSize = 4096;
private Boolean readBlocksLazily;
public TestHoodieLogFormat(Boolean readBlocksLazily) { @BeforeAll
this.readBlocksLazily = readBlocksLazily;
}
@Parameterized.Parameters(name = "LogBlockReadMode")
public static Collection<Boolean[]> data() {
return Arrays.asList(new Boolean[][] {{true}, {false}});
}
@BeforeClass
public static void setUpClass() throws IOException, InterruptedException { public static void setUpClass() throws IOException, InterruptedException {
// Append is not supported in LocalFileSystem. HDFS needs to be setup. // Append is not supported in LocalFileSystem. HDFS needs to be setup.
MiniClusterUtil.setUp(); MiniClusterUtil.setUp();
} }
@AfterClass @AfterAll
public static void tearDownClass() { public static void tearDownClass() {
MiniClusterUtil.shutdown(); MiniClusterUtil.shutdown();
} }
@Before @BeforeEach
public void setUp() throws IOException, InterruptedException { public void setUp() throws IOException, InterruptedException {
this.fs = MiniClusterUtil.fileSystem; this.fs = MiniClusterUtil.fileSystem;
assertTrue(fs.mkdirs(new Path(folder.getRoot().getPath()))); assertTrue(fs.mkdirs(new Path(tempDir.toAbsolutePath().toString())));
this.partitionPath = new Path(folder.getRoot().getPath()); this.partitionPath = new Path(tempDir.toAbsolutePath().toString());
this.basePath = folder.getRoot().getParent(); this.basePath = tempDir.getParent().toString();
HoodieTestUtils.init(MiniClusterUtil.configuration, basePath, HoodieTableType.MERGE_ON_READ); HoodieTestUtils.init(MiniClusterUtil.configuration, basePath, HoodieTableType.MERGE_ON_READ);
} }
@After @AfterEach
public void tearDown() throws IOException { public void tearDown() throws IOException {
fs.delete(partitionPath, true); fs.delete(partitionPath, true);
} }
@@ -128,9 +116,9 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
Writer writer = Writer writer =
HoodieLogFormat.newWriterBuilder().onParentPath(partitionPath).withFileExtension(HoodieLogFile.DELTA_EXTENSION) HoodieLogFormat.newWriterBuilder().onParentPath(partitionPath).withFileExtension(HoodieLogFile.DELTA_EXTENSION)
.withFileId("test-fileid1").overBaseCommit("100").withFs(fs).build(); .withFileId("test-fileid1").overBaseCommit("100").withFs(fs).build();
assertEquals("Just created this log, size should be 0", 0, writer.getCurrentSize()); assertEquals(0, writer.getCurrentSize(), "Just created this log, size should be 0");
assertTrue("Check all log files should start with a .", writer.getLogFile().getFileName().startsWith(".")); assertTrue(writer.getLogFile().getFileName().startsWith("."), "Check all log files should start with a .");
assertEquals("Version should be 1 for new log created", 1, writer.getLogFile().getLogVersion()); assertEquals(1, writer.getLogFile().getLogVersion(), "Version should be 1 for new log created");
} }
@Test @Test
@@ -145,9 +133,9 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
HoodieAvroDataBlock dataBlock = new HoodieAvroDataBlock(records, header); HoodieAvroDataBlock dataBlock = new HoodieAvroDataBlock(records, header);
writer = writer.appendBlock(dataBlock); writer = writer.appendBlock(dataBlock);
long size = writer.getCurrentSize(); long size = writer.getCurrentSize();
assertTrue("We just wrote a block - size should be > 0", size > 0); assertTrue(size > 0, "We just wrote a block - size should be > 0");
assertEquals("Write should be auto-flushed. The size reported by FileStatus and the writer should match", size, assertEquals(size, fs.getFileStatus(writer.getLogFile().getPath()).getLen(),
fs.getFileStatus(writer.getLogFile().getPath()).getLen()); "Write should be auto-flushed. The size reported by FileStatus and the writer should match");
writer.close(); writer.close();
} }
@@ -175,8 +163,8 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
header.put(HoodieLogBlock.HeaderMetadataType.SCHEMA, getSimpleSchema().toString()); header.put(HoodieLogBlock.HeaderMetadataType.SCHEMA, getSimpleSchema().toString());
dataBlock = new HoodieAvroDataBlock(records, header); dataBlock = new HoodieAvroDataBlock(records, header);
writer = writer.appendBlock(dataBlock); writer = writer.appendBlock(dataBlock);
assertEquals("This should be a new log file and hence size should be 0", 0, writer.getCurrentSize()); assertEquals(0, writer.getCurrentSize(), "This should be a new log file and hence size should be 0");
assertEquals("Version should be rolled to 2", 2, writer.getLogFile().getLogVersion()); assertEquals(2, writer.getLogFile().getLogVersion(), "Version should be rolled to 2");
writer.close(); writer.close();
} }
@@ -232,7 +220,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
writer.close(); writer.close();
writer2.close(); writer2.close();
assertNotNull(logFile1.getLogWriteToken()); assertNotNull(logFile1.getLogWriteToken());
assertEquals("Log Files must have different versions", logFile1.getLogVersion(), logFile2.getLogVersion() - 1); assertEquals(logFile1.getLogVersion(), logFile2.getLogVersion() - 1, "Log Files must have different versions");
} }
@Test @Test
@@ -257,9 +245,9 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
dataBlock = new HoodieAvroDataBlock(records, header); dataBlock = new HoodieAvroDataBlock(records, header);
writer = writer.appendBlock(dataBlock); writer = writer.appendBlock(dataBlock);
long size2 = writer.getCurrentSize(); long size2 = writer.getCurrentSize();
assertTrue("We just wrote a new block - size2 should be > size1", size2 > size1); assertTrue(size2 > size1, "We just wrote a new block - size2 should be > size1");
assertEquals("Write should be auto-flushed. The size reported by FileStatus and the writer should match", size2, assertEquals(size2, fs.getFileStatus(writer.getLogFile().getPath()).getLen(),
fs.getFileStatus(writer.getLogFile().getPath()).getLen()); "Write should be auto-flushed. The size reported by FileStatus and the writer should match");
writer.close(); writer.close();
// Close and Open again and append 100 more records // Close and Open again and append 100 more records
@@ -271,18 +259,16 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
dataBlock = new HoodieAvroDataBlock(records, header); dataBlock = new HoodieAvroDataBlock(records, header);
writer = writer.appendBlock(dataBlock); writer = writer.appendBlock(dataBlock);
long size3 = writer.getCurrentSize(); long size3 = writer.getCurrentSize();
assertTrue("We just wrote a new block - size3 should be > size2", size3 > size2); assertTrue(size3 > size2, "We just wrote a new block - size3 should be > size2");
assertEquals("Write should be auto-flushed. The size reported by FileStatus and the writer should match", size3, assertEquals(size3, fs.getFileStatus(writer.getLogFile().getPath()).getLen(),
fs.getFileStatus(writer.getLogFile().getPath()).getLen()); "Write should be auto-flushed. The size reported by FileStatus and the writer should match");
writer.close(); writer.close();
// Cannot get the current size after closing the log // Cannot get the current size after closing the log
try { final Writer closedWriter = writer;
writer.getCurrentSize(); assertThrows(IllegalStateException.class, () -> {
fail("getCurrentSize should fail after the logAppender is closed"); closedWriter.getCurrentSize();
} catch (IllegalStateException e) { }, "getCurrentSize should fail after the logAppender is closed");
// pass
}
} }
/* /*
@@ -354,14 +340,14 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
writer.close(); writer.close();
Reader reader = HoodieLogFormat.newReader(fs, writer.getLogFile(), SchemaTestUtil.getSimpleSchema()); Reader reader = HoodieLogFormat.newReader(fs, writer.getLogFile(), SchemaTestUtil.getSimpleSchema());
assertTrue("We wrote a block, we should be able to read it", reader.hasNext()); assertTrue(reader.hasNext(), "We wrote a block, we should be able to read it");
HoodieLogBlock nextBlock = reader.next(); HoodieLogBlock nextBlock = reader.next();
assertEquals("The next block should be a data block", HoodieLogBlockType.AVRO_DATA_BLOCK, nextBlock.getBlockType()); assertEquals(HoodieLogBlockType.AVRO_DATA_BLOCK, nextBlock.getBlockType(), "The next block should be a data block");
HoodieAvroDataBlock dataBlockRead = (HoodieAvroDataBlock) nextBlock; HoodieAvroDataBlock dataBlockRead = (HoodieAvroDataBlock) nextBlock;
assertEquals("Read records size should be equal to the written records size", copyOfRecords.size(), assertEquals(copyOfRecords.size(), dataBlockRead.getRecords().size(),
dataBlockRead.getRecords().size()); "Read records size should be equal to the written records size");
assertEquals("Both records lists should be the same. (ordering guaranteed)", copyOfRecords, assertEquals(copyOfRecords, dataBlockRead.getRecords(),
dataBlockRead.getRecords()); "Both records lists should be the same. (ordering guaranteed)");
reader.close(); reader.close();
} }
@@ -405,35 +391,37 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
writer.close(); writer.close();
Reader reader = HoodieLogFormat.newReader(fs, writer.getLogFile(), SchemaTestUtil.getSimpleSchema()); Reader reader = HoodieLogFormat.newReader(fs, writer.getLogFile(), SchemaTestUtil.getSimpleSchema());
assertTrue("First block should be available", reader.hasNext()); assertTrue(reader.hasNext(), "First block should be available");
HoodieLogBlock nextBlock = reader.next(); HoodieLogBlock nextBlock = reader.next();
HoodieAvroDataBlock dataBlockRead = (HoodieAvroDataBlock) nextBlock; HoodieAvroDataBlock dataBlockRead = (HoodieAvroDataBlock) nextBlock;
assertEquals("Read records size should be equal to the written records size", copyOfRecords1.size(), assertEquals(copyOfRecords1.size(), dataBlockRead.getRecords().size(),
dataBlockRead.getRecords().size()); "Read records size should be equal to the written records size");
assertEquals("Both records lists should be the same. (ordering guaranteed)", copyOfRecords1, assertEquals(copyOfRecords1, dataBlockRead.getRecords(),
dataBlockRead.getRecords()); "Both records lists should be the same. (ordering guaranteed)");
assertEquals(dataBlockRead.getSchema(), getSimpleSchema()); assertEquals(dataBlockRead.getSchema(), getSimpleSchema());
reader.hasNext(); reader.hasNext();
nextBlock = reader.next(); nextBlock = reader.next();
dataBlockRead = (HoodieAvroDataBlock) nextBlock; dataBlockRead = (HoodieAvroDataBlock) nextBlock;
assertEquals("Read records size should be equal to the written records size", copyOfRecords2.size(), assertEquals(copyOfRecords2.size(), dataBlockRead.getRecords().size(),
dataBlockRead.getRecords().size()); "Read records size should be equal to the written records size");
assertEquals("Both records lists should be the same. (ordering guaranteed)", copyOfRecords2, assertEquals(copyOfRecords2, dataBlockRead.getRecords(),
dataBlockRead.getRecords()); "Both records lists should be the same. (ordering guaranteed)");
reader.hasNext(); reader.hasNext();
nextBlock = reader.next(); nextBlock = reader.next();
dataBlockRead = (HoodieAvroDataBlock) nextBlock; dataBlockRead = (HoodieAvroDataBlock) nextBlock;
assertEquals("Read records size should be equal to the written records size", copyOfRecords3.size(), assertEquals(copyOfRecords3.size(), dataBlockRead.getRecords().size(),
dataBlockRead.getRecords().size()); "Read records size should be equal to the written records size");
assertEquals("Both records lists should be the same. (ordering guaranteed)", copyOfRecords3, assertEquals(copyOfRecords3, dataBlockRead.getRecords(),
dataBlockRead.getRecords()); "Both records lists should be the same. (ordering guaranteed)");
reader.close(); reader.close();
} }
@Test @ParameterizedTest
public void testBasicAppendAndScanMultipleFiles() throws IOException, URISyntaxException, InterruptedException { @ValueSource(booleans = {true, false})
public void testBasicAppendAndScanMultipleFiles(boolean readBlocksLazily)
throws IOException, URISyntaxException, InterruptedException {
Writer writer = Writer writer =
HoodieLogFormat.newWriterBuilder().onParentPath(partitionPath).withFileExtension(HoodieLogFile.DELTA_EXTENSION) HoodieLogFormat.newWriterBuilder().onParentPath(partitionPath).withFileExtension(HoodieLogFile.DELTA_EXTENSION)
.withSizeThreshold(1024).withFileId("test-fileid1").overBaseCommit("100").withFs(fs).build(); .withSizeThreshold(1024).withFileId("test-fileid1").overBaseCommit("100").withFs(fs).build();
@@ -467,8 +455,8 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
scannedRecords.add((IndexedRecord) record.getData().getInsertValue(schema).get()); scannedRecords.add((IndexedRecord) record.getData().getInsertValue(schema).get());
} }
assertEquals("Scanner records count should be the same as appended records", scannedRecords.size(), assertEquals(scannedRecords.size(), allRecords.stream().mapToLong(Collection::size).sum(),
allRecords.stream().mapToLong(Collection::size).sum()); "Scanner records count should be the same as appended records");
} }
@@ -511,17 +499,16 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
writer = writer.appendBlock(dataBlock); writer = writer.appendBlock(dataBlock);
writer.close(); writer.close();
// First round of reads - we should be able to read the first block and then EOF // First round of reads - we should be able to read the first block and then EOF
Reader reader = HoodieLogFormat.newReader(fs, writer.getLogFile(), SchemaTestUtil.getSimpleSchema()); Reader reader = HoodieLogFormat.newReader(fs, writer.getLogFile(), SchemaTestUtil.getSimpleSchema());
assertTrue("First block should be available", reader.hasNext()); assertTrue(reader.hasNext(), "First block should be available");
reader.next(); reader.next();
assertTrue("We should have corrupted block next", reader.hasNext()); assertTrue(reader.hasNext(), "We should have corrupted block next");
HoodieLogBlock block = reader.next(); HoodieLogBlock block = reader.next();
assertEquals("The read block should be a corrupt block", HoodieLogBlockType.CORRUPT_BLOCK, block.getBlockType()); assertEquals(HoodieLogBlockType.CORRUPT_BLOCK, block.getBlockType(), "The read block should be a corrupt block");
assertTrue("Third block should be available", reader.hasNext()); assertTrue(reader.hasNext(), "Third block should be available");
reader.next(); reader.next();
assertFalse("There should be no more block left", reader.hasNext()); assertFalse(reader.hasNext(), "There should be no more block left");
reader.close(); reader.close();
@@ -552,23 +539,25 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
// Second round of reads - we should be able to read the first and last block // Second round of reads - we should be able to read the first and last block
reader = HoodieLogFormat.newReader(fs, writer.getLogFile(), SchemaTestUtil.getSimpleSchema()); reader = HoodieLogFormat.newReader(fs, writer.getLogFile(), SchemaTestUtil.getSimpleSchema());
assertTrue("First block should be available", reader.hasNext()); assertTrue(reader.hasNext(), "First block should be available");
reader.next(); reader.next();
assertTrue("We should get the 1st corrupted block next", reader.hasNext()); assertTrue(reader.hasNext(), "We should get the 1st corrupted block next");
reader.next(); reader.next();
assertTrue("Third block should be available", reader.hasNext()); assertTrue(reader.hasNext(), "Third block should be available");
reader.next(); reader.next();
assertTrue("We should get the 2nd corrupted block next", reader.hasNext()); assertTrue(reader.hasNext(), "We should get the 2nd corrupted block next");
block = reader.next(); block = reader.next();
assertEquals("The read block should be a corrupt block", HoodieLogBlockType.CORRUPT_BLOCK, block.getBlockType()); assertEquals(HoodieLogBlockType.CORRUPT_BLOCK, block.getBlockType(), "The read block should be a corrupt block");
assertTrue("We should get the last block next", reader.hasNext()); assertTrue(reader.hasNext(), "We should get the last block next");
reader.next(); reader.next();
assertFalse("We should have no more blocks left", reader.hasNext()); assertFalse(reader.hasNext(), "We should have no more blocks left");
reader.close(); reader.close();
} }
@Test @ParameterizedTest
public void testAvroLogRecordReaderBasic() throws IOException, URISyntaxException, InterruptedException { @ValueSource(booleans = {true, false})
public void testAvroLogRecordReaderBasic(boolean readBlocksLazily)
throws IOException, URISyntaxException, InterruptedException {
Schema schema = HoodieAvroUtils.addMetadataFields(getSimpleSchema()); Schema schema = HoodieAvroUtils.addMetadataFields(getSimpleSchema());
// Set a small threshold so that every block is a new version // Set a small threshold so that every block is a new version
Writer writer = Writer writer =
@@ -600,19 +589,20 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "100", HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "100",
10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH); 10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH);
assertEquals("", 200, scanner.getTotalLogRecords()); assertEquals(200, scanner.getTotalLogRecords());
Set<String> readKeys = new HashSet<>(200); Set<String> readKeys = new HashSet<>(200);
scanner.forEach(s -> readKeys.add(s.getKey().getRecordKey())); scanner.forEach(s -> readKeys.add(s.getKey().getRecordKey()));
assertEquals("Stream collect should return all 200 records", 200, readKeys.size()); assertEquals(200, readKeys.size(), "Stream collect should return all 200 records");
copyOfRecords1.addAll(copyOfRecords2); copyOfRecords1.addAll(copyOfRecords2);
Set<String> originalKeys = Set<String> originalKeys =
copyOfRecords1.stream().map(s -> ((GenericRecord) s).get(HoodieRecord.RECORD_KEY_METADATA_FIELD).toString()) copyOfRecords1.stream().map(s -> ((GenericRecord) s).get(HoodieRecord.RECORD_KEY_METADATA_FIELD).toString())
.collect(Collectors.toSet()); .collect(Collectors.toSet());
assertEquals("CompositeAvroLogReader should return 200 records from 2 versions", originalKeys, readKeys); assertEquals(originalKeys, readKeys, "CompositeAvroLogReader should return 200 records from 2 versions");
} }
@Test @ParameterizedTest
public void testAvroLogRecordReaderWithRollbackTombstone() @ValueSource(booleans = {true, false})
public void testAvroLogRecordReaderWithRollbackTombstone(boolean readBlocksLazily)
throws IOException, URISyntaxException, InterruptedException { throws IOException, URISyntaxException, InterruptedException {
Schema schema = HoodieAvroUtils.addMetadataFields(getSimpleSchema()); Schema schema = HoodieAvroUtils.addMetadataFields(getSimpleSchema());
// Set a small threshold so that every block is a new version // Set a small threshold so that every block is a new version
@@ -661,15 +651,15 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "102", HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "102",
10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH); 10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH);
assertEquals("We read 200 records from 2 write batches", 200, scanner.getTotalLogRecords()); assertEquals(200, scanner.getTotalLogRecords(), "We read 200 records from 2 write batches");
Set<String> readKeys = new HashSet<>(200); Set<String> readKeys = new HashSet<>(200);
scanner.forEach(s -> readKeys.add(s.getKey().getRecordKey())); scanner.forEach(s -> readKeys.add(s.getKey().getRecordKey()));
assertEquals("Stream collect should return all 200 records", 200, readKeys.size()); assertEquals(200, readKeys.size(), "Stream collect should return all 200 records");
copyOfRecords1.addAll(copyOfRecords3); copyOfRecords1.addAll(copyOfRecords3);
Set<String> originalKeys = Set<String> originalKeys =
copyOfRecords1.stream().map(s -> ((GenericRecord) s).get(HoodieRecord.RECORD_KEY_METADATA_FIELD).toString()) copyOfRecords1.stream().map(s -> ((GenericRecord) s).get(HoodieRecord.RECORD_KEY_METADATA_FIELD).toString())
.collect(Collectors.toSet()); .collect(Collectors.toSet());
assertEquals("CompositeAvroLogReader should return 200 records from 2 versions", originalKeys, readKeys); assertEquals(originalKeys, readKeys, "CompositeAvroLogReader should return 200 records from 2 versions");
} }
@Test @Test
@@ -740,19 +730,20 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "103", HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "103",
10240L, true, false, bufferSize, BASE_OUTPUT_PATH); 10240L, true, false, bufferSize, BASE_OUTPUT_PATH);
assertEquals("We would read 200 records", 200, scanner.getTotalLogRecords()); assertEquals(200, scanner.getTotalLogRecords(), "We would read 200 records");
Set<String> readKeys = new HashSet<>(200); Set<String> readKeys = new HashSet<>(200);
scanner.forEach(s -> readKeys.add(s.getKey().getRecordKey())); scanner.forEach(s -> readKeys.add(s.getKey().getRecordKey()));
assertEquals("Stream collect should return all 200 records", 200, readKeys.size()); assertEquals(200, readKeys.size(), "Stream collect should return all 200 records");
copyOfRecords1.addAll(copyOfRecords3); copyOfRecords1.addAll(copyOfRecords3);
Set<String> originalKeys = Set<String> originalKeys =
copyOfRecords1.stream().map(s -> ((GenericRecord) s).get(HoodieRecord.RECORD_KEY_METADATA_FIELD).toString()) copyOfRecords1.stream().map(s -> ((GenericRecord) s).get(HoodieRecord.RECORD_KEY_METADATA_FIELD).toString())
.collect(Collectors.toSet()); .collect(Collectors.toSet());
assertEquals("CompositeAvroLogReader should return 200 records from 2 versions", originalKeys, readKeys); assertEquals(originalKeys, readKeys, "CompositeAvroLogReader should return 200 records from 2 versions");
} }
@Test @ParameterizedTest
public void testAvroLogRecordReaderWithDeleteAndRollback() @ValueSource(booleans = {true, false})
public void testAvroLogRecordReaderWithDeleteAndRollback(boolean readBlocksLazily)
throws IOException, URISyntaxException, InterruptedException { throws IOException, URISyntaxException, InterruptedException {
Schema schema = HoodieAvroUtils.addMetadataFields(getSimpleSchema()); Schema schema = HoodieAvroUtils.addMetadataFields(getSimpleSchema());
// Set a small threshold so that every block is a new version // Set a small threshold so that every block is a new version
@@ -799,7 +790,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "102", HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "102",
10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH); 10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH);
assertEquals("We still would read 200 records", 200, scanner.getTotalLogRecords()); assertEquals(200, scanner.getTotalLogRecords(), "We still would read 200 records");
final List<String> readKeys = new ArrayList<>(200); final List<String> readKeys = new ArrayList<>(200);
final List<Boolean> emptyPayloads = new ArrayList<>(); final List<Boolean> emptyPayloads = new ArrayList<>();
scanner.forEach(s -> readKeys.add(s.getKey().getRecordKey())); scanner.forEach(s -> readKeys.add(s.getKey().getRecordKey()));
@@ -812,12 +803,12 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
throw new UncheckedIOException(io); throw new UncheckedIOException(io);
} }
}); });
assertEquals("Stream collect should return all 200 records", 200, readKeys.size()); assertEquals(200, readKeys.size(), "Stream collect should return all 200 records");
assertEquals("Stream collect should return all 50 records with empty payloads", 50, emptyPayloads.size()); assertEquals(50, emptyPayloads.size(), "Stream collect should return all 50 records with empty payloads");
originalKeys.removeAll(deletedKeys); originalKeys.removeAll(deletedKeys);
Collections.sort(originalKeys); Collections.sort(originalKeys);
Collections.sort(readKeys); Collections.sort(readKeys);
assertEquals("CompositeAvroLogReader should return 150 records from 2 versions", originalKeys, readKeys); assertEquals(originalKeys, readKeys, "CompositeAvroLogReader should return 150 records from 2 versions");
// Rollback the last block // Rollback the last block
header.put(HoodieLogBlock.HeaderMetadataType.INSTANT_TIME, "103"); header.put(HoodieLogBlock.HeaderMetadataType.INSTANT_TIME, "103");
@@ -831,11 +822,12 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "101", 10240L, readBlocksLazily, scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "101", 10240L, readBlocksLazily,
false, bufferSize, BASE_OUTPUT_PATH); false, bufferSize, BASE_OUTPUT_PATH);
scanner.forEach(s -> readKeys.add(s.getKey().getRecordKey())); scanner.forEach(s -> readKeys.add(s.getKey().getRecordKey()));
assertEquals("Stream collect should return all 200 records after rollback of delete", 200, readKeys.size()); assertEquals(200, readKeys.size(), "Stream collect should return all 200 records after rollback of delete");
} }
@Test @ParameterizedTest
public void testAvroLogRecordReaderWithFailedRollbacks() @ValueSource(booleans = {true, false})
public void testAvroLogRecordReaderWithFailedRollbacks(boolean readBlocksLazily)
throws IOException, URISyntaxException, InterruptedException { throws IOException, URISyntaxException, InterruptedException {
// Write a Data block and Delete block with same InstantTime (written in same batch) // Write a Data block and Delete block with same InstantTime (written in same batch)
@@ -894,15 +886,16 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
// all data must be rolled back before merge // all data must be rolled back before merge
HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "100", HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "100",
10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH); 10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH);
assertEquals("We would have scanned 0 records because of rollback", 0, scanner.getTotalLogRecords()); assertEquals(0, scanner.getTotalLogRecords(), "We would have scanned 0 records because of rollback");
final List<String> readKeys = new ArrayList<>(); final List<String> readKeys = new ArrayList<>();
scanner.forEach(s -> readKeys.add(s.getKey().getRecordKey())); scanner.forEach(s -> readKeys.add(s.getKey().getRecordKey()));
assertEquals("Stream collect should return all 0 records", 0, readKeys.size()); assertEquals(0, readKeys.size(), "Stream collect should return all 0 records");
} }
@Test @ParameterizedTest
public void testAvroLogRecordReaderWithInsertDeleteAndRollback() @ValueSource(booleans = {true, false})
public void testAvroLogRecordReaderWithInsertDeleteAndRollback(boolean readBlocksLazily)
throws IOException, URISyntaxException, InterruptedException { throws IOException, URISyntaxException, InterruptedException {
// Write a Data block and Delete block with same InstantTime (written in same batch) // Write a Data block and Delete block with same InstantTime (written in same batch)
@@ -944,11 +937,12 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "100", HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "100",
10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH); 10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH);
assertEquals("We would read 0 records", 0, scanner.getTotalLogRecords()); assertEquals(0, scanner.getTotalLogRecords(), "We would read 0 records");
} }
@Test @ParameterizedTest
public void testAvroLogRecordReaderWithInvalidRollback() @ValueSource(booleans = {true, false})
public void testAvroLogRecordReaderWithInvalidRollback(boolean readBlocksLazily)
throws IOException, URISyntaxException, InterruptedException { throws IOException, URISyntaxException, InterruptedException {
Schema schema = HoodieAvroUtils.addMetadataFields(getSimpleSchema()); Schema schema = HoodieAvroUtils.addMetadataFields(getSimpleSchema());
// Set a small threshold so that every block is a new version // Set a small threshold so that every block is a new version
@@ -977,14 +971,15 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "100", HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "100",
10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH); 10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH);
assertEquals("We still would read 100 records", 100, scanner.getTotalLogRecords()); assertEquals(100, scanner.getTotalLogRecords(), "We still would read 100 records");
final List<String> readKeys = new ArrayList<>(100); final List<String> readKeys = new ArrayList<>(100);
scanner.forEach(s -> readKeys.add(s.getKey().getRecordKey())); scanner.forEach(s -> readKeys.add(s.getKey().getRecordKey()));
assertEquals("Stream collect should return all 150 records", 100, readKeys.size()); assertEquals(100, readKeys.size(), "Stream collect should return all 150 records");
} }
@Test @ParameterizedTest
public void testAvroLogRecordReaderWithInsertsDeleteAndRollback() @ValueSource(booleans = {true, false})
public void testAvroLogRecordReaderWithInsertsDeleteAndRollback(boolean readBlocksLazily)
throws IOException, URISyntaxException, InterruptedException { throws IOException, URISyntaxException, InterruptedException {
// Write a 3 Data blocs with same InstantTime (written in same batch) // Write a 3 Data blocs with same InstantTime (written in same batch)
@@ -1029,11 +1024,12 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "101", HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "101",
10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH); 10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH);
assertEquals("We would read 0 records", 0, scanner.getTotalLogRecords()); assertEquals(0, scanner.getTotalLogRecords(), "We would read 0 records");
} }
@Test @ParameterizedTest
public void testAvroLogRecordReaderWithMixedInsertsCorruptsAndRollback() @ValueSource(booleans = {true, false})
public void testAvroLogRecordReaderWithMixedInsertsCorruptsAndRollback(boolean readBlocksLazily)
throws IOException, URISyntaxException, InterruptedException { throws IOException, URISyntaxException, InterruptedException {
// Write a 3 Data blocs with same InstantTime (written in same batch) // Write a 3 Data blocs with same InstantTime (written in same batch)
@@ -1118,7 +1114,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "101", HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, "101",
10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH); 10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH);
assertEquals("We would read 0 records", 0, scanner.getTotalLogRecords()); assertEquals(0, scanner.getTotalLogRecords(), "We would read 0 records");
} }
/* /*
@@ -1134,7 +1130,8 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
* duplicate data. * duplicate data.
* *
*/ */
private void testAvroLogRecordReaderMergingMultipleLogFiles(int numRecordsInLog1, int numRecordsInLog2) { private void testAvroLogRecordReaderMergingMultipleLogFiles(int numRecordsInLog1, int numRecordsInLog2,
boolean readBlocksLazily) {
try { try {
// Write one Data block with same InstantTime (written in same batch) // Write one Data block with same InstantTime (written in same batch)
Schema schema = HoodieAvroUtils.addMetadataFields(getSimpleSchema()); Schema schema = HoodieAvroUtils.addMetadataFields(getSimpleSchema());
@@ -1175,43 +1172,48 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema, HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, basePath, allLogFiles, schema,
"100", 10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH); "100", 10240L, readBlocksLazily, false, bufferSize, BASE_OUTPUT_PATH);
assertEquals("We would read 100 records", assertEquals(Math.max(numRecordsInLog1, numRecordsInLog2), scanner.getNumMergedRecordsInLog(),
Math.max(numRecordsInLog1, numRecordsInLog2), scanner.getNumMergedRecordsInLog()); "We would read 100 records");
} catch (Exception e) { } catch (Exception e) {
e.printStackTrace(); e.printStackTrace();
} }
} }
@Test @ParameterizedTest
public void testAvroLogRecordReaderWithFailedTaskInFirstStageAttempt() { @ValueSource(booleans = {true, false})
public void testAvroLogRecordReaderWithFailedTaskInFirstStageAttempt(boolean readBlocksLazily) {
/* /*
* FIRST_ATTEMPT_FAILED: * FIRST_ATTEMPT_FAILED:
* Original task from the stage attempt failed, but subsequent stage retry succeeded. * Original task from the stage attempt failed, but subsequent stage retry succeeded.
*/ */
testAvroLogRecordReaderMergingMultipleLogFiles(77, 100); testAvroLogRecordReaderMergingMultipleLogFiles(77, 100, readBlocksLazily);
} }
@Test @ParameterizedTest
public void testAvroLogRecordReaderWithFailedTaskInSecondStageAttempt() { @ValueSource(booleans = {true, false})
public void testAvroLogRecordReaderWithFailedTaskInSecondStageAttempt(boolean readBlocksLazily) {
/* /*
* SECOND_ATTEMPT_FAILED: * SECOND_ATTEMPT_FAILED:
* Original task from stage attempt succeeded, but subsequent retry attempt failed. * Original task from stage attempt succeeded, but subsequent retry attempt failed.
*/ */
testAvroLogRecordReaderMergingMultipleLogFiles(100, 66); testAvroLogRecordReaderMergingMultipleLogFiles(100, 66, readBlocksLazily);
} }
@Test @ParameterizedTest
public void testAvroLogRecordReaderTasksSucceededInBothStageAttempts() { @ValueSource(booleans = {true, false})
public void testAvroLogRecordReaderTasksSucceededInBothStageAttempts(boolean readBlocksLazily) {
/* /*
* BOTH_ATTEMPTS_SUCCEEDED: * BOTH_ATTEMPTS_SUCCEEDED:
* Original task from the stage attempt and duplicate task from the stage retry succeeded. * Original task from the stage attempt and duplicate task from the stage retry succeeded.
*/ */
testAvroLogRecordReaderMergingMultipleLogFiles(100, 100); testAvroLogRecordReaderMergingMultipleLogFiles(100, 100, readBlocksLazily);
} }
@Test @ParameterizedTest
public void testBasicAppendAndReadInReverse() throws IOException, URISyntaxException, InterruptedException { @ValueSource(booleans = {true, false})
public void testBasicAppendAndReadInReverse(boolean readBlocksLazily)
throws IOException, URISyntaxException, InterruptedException {
Writer writer = Writer writer =
HoodieLogFormat.newWriterBuilder().onParentPath(partitionPath).withFileExtension(HoodieLogFile.DELTA_EXTENSION) HoodieLogFormat.newWriterBuilder().onParentPath(partitionPath).withFileExtension(HoodieLogFile.DELTA_EXTENSION)
.withFileId("test-fileid1").overBaseCommit("100").withFs(fs).build(); .withFileId("test-fileid1").overBaseCommit("100").withFs(fs).build();
@@ -1250,37 +1252,39 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
HoodieLogFileReader reader = new HoodieLogFileReader(fs, writer.getLogFile(), SchemaTestUtil.getSimpleSchema(), HoodieLogFileReader reader = new HoodieLogFileReader(fs, writer.getLogFile(), SchemaTestUtil.getSimpleSchema(),
bufferSize, readBlocksLazily, true); bufferSize, readBlocksLazily, true);
assertTrue("Last block should be available", reader.hasPrev()); assertTrue(reader.hasPrev(), "Last block should be available");
HoodieLogBlock prevBlock = reader.prev(); HoodieLogBlock prevBlock = reader.prev();
HoodieAvroDataBlock dataBlockRead = (HoodieAvroDataBlock) prevBlock; HoodieAvroDataBlock dataBlockRead = (HoodieAvroDataBlock) prevBlock;
assertEquals("Third records size should be equal to the written records size", copyOfRecords3.size(), assertEquals(copyOfRecords3.size(), dataBlockRead.getRecords().size(),
dataBlockRead.getRecords().size()); "Third records size should be equal to the written records size");
assertEquals("Both records lists should be the same. (ordering guaranteed)", copyOfRecords3, assertEquals(copyOfRecords3, dataBlockRead.getRecords(),
dataBlockRead.getRecords()); "Both records lists should be the same. (ordering guaranteed)");
assertTrue("Second block should be available", reader.hasPrev()); assertTrue(reader.hasPrev(), "Second block should be available");
prevBlock = reader.prev(); prevBlock = reader.prev();
dataBlockRead = (HoodieAvroDataBlock) prevBlock; dataBlockRead = (HoodieAvroDataBlock) prevBlock;
assertEquals("Read records size should be equal to the written records size", copyOfRecords2.size(), assertEquals(copyOfRecords2.size(), dataBlockRead.getRecords().size(),
dataBlockRead.getRecords().size()); "Read records size should be equal to the written records size");
assertEquals("Both records lists should be the same. (ordering guaranteed)", copyOfRecords2, assertEquals(copyOfRecords2, dataBlockRead.getRecords(),
dataBlockRead.getRecords()); "Both records lists should be the same. (ordering guaranteed)");
assertTrue("First block should be available", reader.hasPrev()); assertTrue(reader.hasPrev(), "First block should be available");
prevBlock = reader.prev(); prevBlock = reader.prev();
dataBlockRead = (HoodieAvroDataBlock) prevBlock; dataBlockRead = (HoodieAvroDataBlock) prevBlock;
assertEquals("Read records size should be equal to the written records size", copyOfRecords1.size(), assertEquals(copyOfRecords1.size(), dataBlockRead.getRecords().size(),
dataBlockRead.getRecords().size()); "Read records size should be equal to the written records size");
assertEquals("Both records lists should be the same. (ordering guaranteed)", copyOfRecords1, assertEquals(copyOfRecords1, dataBlockRead.getRecords(),
dataBlockRead.getRecords()); "Both records lists should be the same. (ordering guaranteed)");
assertFalse(reader.hasPrev()); assertFalse(reader.hasPrev());
reader.close(); reader.close();
} }
@Test @ParameterizedTest
public void testAppendAndReadOnCorruptedLogInReverse() throws IOException, URISyntaxException, InterruptedException { @ValueSource(booleans = {true, false})
public void testAppendAndReadOnCorruptedLogInReverse(boolean readBlocksLazily)
throws IOException, URISyntaxException, InterruptedException {
Writer writer = Writer writer =
HoodieLogFormat.newWriterBuilder().onParentPath(partitionPath).withFileExtension(HoodieLogFile.DELTA_EXTENSION) HoodieLogFormat.newWriterBuilder().onParentPath(partitionPath).withFileExtension(HoodieLogFile.DELTA_EXTENSION)
.withFileId("test-fileid1").overBaseCommit("100").withFs(fs).build(); .withFileId("test-fileid1").overBaseCommit("100").withFs(fs).build();
@@ -1323,22 +1327,21 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
HoodieLogFileReader reader = HoodieLogFileReader reader =
new HoodieLogFileReader(fs, writer.getLogFile(), schema, bufferSize, readBlocksLazily, true); new HoodieLogFileReader(fs, writer.getLogFile(), schema, bufferSize, readBlocksLazily, true);
assertTrue("Last block should be available", reader.hasPrev()); assertTrue(reader.hasPrev(), "Last block should be available");
HoodieLogBlock block = reader.prev(); HoodieLogBlock block = reader.prev();
assertTrue("Last block should be datablock", block instanceof HoodieAvroDataBlock); assertTrue(block instanceof HoodieAvroDataBlock, "Last block should be datablock");
assertTrue("Last block should be available", reader.hasPrev()); assertTrue(reader.hasPrev(), "Last block should be available");
try { assertThrows(CorruptedLogFileException.class, () -> {
reader.prev(); reader.prev();
} catch (CorruptedLogFileException e) { });
e.printStackTrace();
// We should have corrupted block
}
reader.close(); reader.close();
} }
@Test @ParameterizedTest
public void testBasicAppendAndTraverseInReverse() throws IOException, URISyntaxException, InterruptedException { @ValueSource(booleans = {true, false})
public void testBasicAppendAndTraverseInReverse(boolean readBlocksLazily)
throws IOException, URISyntaxException, InterruptedException {
Writer writer = Writer writer =
HoodieLogFormat.newWriterBuilder().onParentPath(partitionPath).withFileExtension(HoodieLogFile.DELTA_EXTENSION) HoodieLogFormat.newWriterBuilder().onParentPath(partitionPath).withFileExtension(HoodieLogFile.DELTA_EXTENSION)
.withFileId("test-fileid1").overBaseCommit("100").withFs(fs).build(); .withFileId("test-fileid1").overBaseCommit("100").withFs(fs).build();
@@ -1373,20 +1376,20 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
HoodieLogFileReader reader = new HoodieLogFileReader(fs, writer.getLogFile(), SchemaTestUtil.getSimpleSchema(), HoodieLogFileReader reader = new HoodieLogFileReader(fs, writer.getLogFile(), SchemaTestUtil.getSimpleSchema(),
bufferSize, readBlocksLazily, true); bufferSize, readBlocksLazily, true);
assertTrue("Third block should be available", reader.hasPrev()); assertTrue(reader.hasPrev(), "Third block should be available");
reader.moveToPrev(); reader.moveToPrev();
assertTrue("Second block should be available", reader.hasPrev()); assertTrue(reader.hasPrev(), "Second block should be available");
reader.moveToPrev(); reader.moveToPrev();
// After moving twice, this last reader.prev() should read the First block written // After moving twice, this last reader.prev() should read the First block written
assertTrue("First block should be available", reader.hasPrev()); assertTrue(reader.hasPrev(), "First block should be available");
HoodieLogBlock prevBlock = reader.prev(); HoodieLogBlock prevBlock = reader.prev();
HoodieAvroDataBlock dataBlockRead = (HoodieAvroDataBlock) prevBlock; HoodieAvroDataBlock dataBlockRead = (HoodieAvroDataBlock) prevBlock;
assertEquals("Read records size should be equal to the written records size", copyOfRecords1.size(), assertEquals(copyOfRecords1.size(), dataBlockRead.getRecords().size(),
dataBlockRead.getRecords().size()); "Read records size should be equal to the written records size");
assertEquals("Both records lists should be the same. (ordering guaranteed)", copyOfRecords1, assertEquals(copyOfRecords1, dataBlockRead.getRecords(),
dataBlockRead.getRecords()); "Both records lists should be the same. (ordering guaranteed)");
assertFalse(reader.hasPrev()); assertFalse(reader.hasPrev());
reader.close(); reader.close();
@@ -1400,14 +1403,14 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
Schema schema = getSimpleSchema(); Schema schema = getSimpleSchema();
List<IndexedRecord> records = SchemaTestUtil.generateTestRecords(0, 100); List<IndexedRecord> records = SchemaTestUtil.generateTestRecords(0, 100);
List<IndexedRecord> recordsCopy = new ArrayList<>(records); List<IndexedRecord> recordsCopy = new ArrayList<>(records);
assertEquals(records.size(), 100); assertEquals(100, records.size());
assertEquals(recordsCopy.size(), 100); assertEquals(100, recordsCopy.size());
HoodieAvroDataBlock dataBlock = new HoodieAvroDataBlock(records, schema); HoodieAvroDataBlock dataBlock = new HoodieAvroDataBlock(records, schema);
byte[] content = dataBlock.getBytes(schema); byte[] content = dataBlock.getBytes(schema);
assertTrue(content.length > 0); assertTrue(content.length > 0);
HoodieLogBlock logBlock = HoodieAvroDataBlock.getBlock(content, schema); HoodieLogBlock logBlock = HoodieAvroDataBlock.getBlock(content, schema);
assertEquals(logBlock.getBlockType(), HoodieLogBlockType.AVRO_DATA_BLOCK); assertEquals(HoodieLogBlockType.AVRO_DATA_BLOCK, logBlock.getBlockType());
List<IndexedRecord> readRecords = ((HoodieAvroDataBlock) logBlock).getRecords(); List<IndexedRecord> readRecords = ((HoodieAvroDataBlock) logBlock).getRecords();
assertEquals(readRecords.size(), recordsCopy.size()); assertEquals(readRecords.size(), recordsCopy.size());
for (int i = 0; i < recordsCopy.size(); ++i) { for (int i = 0; i < recordsCopy.size(); ++i) {
@@ -1416,7 +1419,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
// Reader schema is optional if it is same as write schema // Reader schema is optional if it is same as write schema
logBlock = HoodieAvroDataBlock.getBlock(content, null); logBlock = HoodieAvroDataBlock.getBlock(content, null);
assertEquals(logBlock.getBlockType(), HoodieLogBlockType.AVRO_DATA_BLOCK); assertEquals(HoodieLogBlockType.AVRO_DATA_BLOCK, logBlock.getBlockType());
readRecords = ((HoodieAvroDataBlock) logBlock).getRecords(); readRecords = ((HoodieAvroDataBlock) logBlock).getRecords();
assertEquals(readRecords.size(), recordsCopy.size()); assertEquals(readRecords.size(), recordsCopy.size());
for (int i = 0; i < recordsCopy.size(); ++i) { for (int i = 0; i < recordsCopy.size(); ++i) {

View File

@@ -36,10 +36,10 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.junit.AfterClass; import org.junit.jupiter.api.AfterAll;
import org.junit.Assert; import org.junit.jupiter.api.BeforeAll;
import org.junit.BeforeClass; import org.junit.jupiter.api.Test;
import org.junit.Test; import org.junit.jupiter.api.Timeout;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
@@ -51,6 +51,7 @@ import java.util.UUID;
import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeoutException;
import static org.apache.hudi.common.util.SchemaTestUtil.getSimpleSchema; import static org.apache.hudi.common.util.SchemaTestUtil.getSimpleSchema;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
/** /**
* This class is intentionally using a different way of setting up the MiniDFSCluster and not relying on * This class is intentionally using a different way of setting up the MiniDFSCluster and not relying on
@@ -62,7 +63,7 @@ public class TestHoodieLogFormatAppendFailure {
private static File baseDir; private static File baseDir;
private static MiniDFSCluster cluster; private static MiniDFSCluster cluster;
@BeforeClass @BeforeAll
public static void setUpClass() throws IOException { public static void setUpClass() throws IOException {
// NOTE : The MiniClusterDFS leaves behind the directory under which the cluster was created // NOTE : The MiniClusterDFS leaves behind the directory under which the cluster was created
baseDir = new File("/tmp/" + UUID.randomUUID().toString()); baseDir = new File("/tmp/" + UUID.randomUUID().toString());
@@ -77,14 +78,15 @@ public class TestHoodieLogFormatAppendFailure {
cluster = new MiniDFSCluster.Builder(conf).checkExitOnShutdown(true).numDataNodes(4).build(); cluster = new MiniDFSCluster.Builder(conf).checkExitOnShutdown(true).numDataNodes(4).build();
} }
@AfterClass @AfterAll
public static void tearDownClass() { public static void tearDownClass() {
cluster.shutdown(true); cluster.shutdown(true);
// Force clean up the directory under which the cluster was created // Force clean up the directory under which the cluster was created
FileUtil.fullyDelete(baseDir); FileUtil.fullyDelete(baseDir);
} }
@Test(timeout = 60000) @Test
@Timeout(60)
public void testFailedToGetAppendStreamFromHDFSNameNode() public void testFailedToGetAppendStreamFromHDFSNameNode()
throws IOException, URISyntaxException, InterruptedException, TimeoutException { throws IOException, URISyntaxException, InterruptedException, TimeoutException {
@@ -137,7 +139,7 @@ public class TestHoodieLogFormatAppendFailure {
.withFileExtension(HoodieArchivedLogFile.ARCHIVE_EXTENSION).withFileId("commits.archive") .withFileExtension(HoodieArchivedLogFile.ARCHIVE_EXTENSION).withFileId("commits.archive")
.overBaseCommit("").withFs(fs).build(); .overBaseCommit("").withFs(fs).build();
// The log version should be different for this new writer // The log version should be different for this new writer
Assert.assertNotEquals(writer.getLogFile().getLogVersion(), logFileVersion); assertNotEquals(writer.getLogFile().getLogVersion(), logFileVersion);
} }
} }

View File

@@ -18,10 +18,10 @@
package org.apache.hudi.common.table.log; package org.apache.hudi.common.table.log;
import org.junit.Test; import org.junit.jupiter.api.Test;
import static org.junit.Assert.assertFalse; import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.jupiter.api.Assertions.assertTrue;
/** /**
* Tests HUDI log format version {@link HoodieLogFormatVersion}. * Tests HUDI log format version {@link HoodieLogFormatVersion}.

View File

@@ -18,18 +18,17 @@
package org.apache.hudi.common.table.timeline; package org.apache.hudi.common.table.timeline;
import org.apache.hadoop.fs.Path;
import org.apache.hudi.common.HoodieCommonTestHarness;
import org.apache.hudi.common.model.HoodieTestUtils; import org.apache.hudi.common.model.HoodieTestUtils;
import org.apache.hudi.common.table.HoodieTableMetaClient; import org.apache.hudi.common.table.HoodieTableMetaClient;
import org.apache.hudi.common.table.timeline.HoodieInstant.State; import org.apache.hudi.common.table.timeline.HoodieInstant.State;
import org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion; import org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion;
import org.apache.hudi.common.testutils.HoodieCommonTestHarness;
import org.apache.hudi.common.util.CollectionUtils; import org.apache.hudi.common.util.CollectionUtils;
import org.apache.hudi.common.util.Option; import org.apache.hudi.common.util.Option;
import org.junit.Before;
import org.junit.Rule; import org.apache.hadoop.fs.Path;
import org.junit.Test; import org.junit.jupiter.api.BeforeEach;
import org.junit.rules.ExpectedException; import org.junit.jupiter.api.Test;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@@ -44,9 +43,9 @@ import java.util.stream.Collectors;
import java.util.stream.Stream; import java.util.stream.Stream;
import static org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion.VERSION_0; import static org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion.VERSION_0;
import static org.junit.Assert.assertEquals; import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.jupiter.api.Assertions.assertTrue;
/** /**
* Tests {@link HoodieActiveTimeline}. * Tests {@link HoodieActiveTimeline}.
@@ -54,10 +53,8 @@ import static org.junit.Assert.assertTrue;
public class TestHoodieActiveTimeline extends HoodieCommonTestHarness { public class TestHoodieActiveTimeline extends HoodieCommonTestHarness {
private HoodieActiveTimeline timeline; private HoodieActiveTimeline timeline;
@Rule
public final ExpectedException exception = ExpectedException.none();
@Before @BeforeEach
public void setUp() throws Exception { public void setUp() throws Exception {
initMetaClient(); initMetaClient();
} }
@@ -95,7 +92,7 @@ public class TestHoodieActiveTimeline extends HoodieCommonTestHarness {
timeline.createNewInstant(instant5); timeline.createNewInstant(instant5);
timeline = timeline.reload(); timeline = timeline.reload();
assertEquals("Total instants should be 5", 5, timeline.countInstants()); assertEquals(5, timeline.countInstants(), "Total instants should be 5");
HoodieTestUtils.assertStreamEquals("Check the instants stream", HoodieTestUtils.assertStreamEquals("Check the instants stream",
Stream.of(instant1Complete, instant2Complete, instant3Complete, instant4Complete, instant5), Stream.of(instant1Complete, instant2Complete, instant3Complete, instant4Complete, instant5),
timeline.getInstants()); timeline.getInstants());
@@ -124,7 +121,7 @@ public class TestHoodieActiveTimeline extends HoodieCommonTestHarness {
// Ensure aux file is present // Ensure aux file is present
assertTrue(metaClient.getFs().exists(new Path(metaClient.getMetaAuxiliaryPath(), instant6.getFileName()))); assertTrue(metaClient.getFs().exists(new Path(metaClient.getMetaAuxiliaryPath(), instant6.getFileName())));
// Read 5 bytes // Read 5 bytes
assertEquals(timeline.readCompactionPlanAsBytes(instant6).get().length, 5); assertEquals(5, timeline.readCompactionPlanAsBytes(instant6).get().length);
// Delete auxiliary file to mimic future release where we stop writing to aux // Delete auxiliary file to mimic future release where we stop writing to aux
metaClient.getFs().delete(new Path(metaClient.getMetaAuxiliaryPath(), instant6.getFileName())); metaClient.getFs().delete(new Path(metaClient.getMetaAuxiliaryPath(), instant6.getFileName()));
@@ -133,19 +130,19 @@ public class TestHoodieActiveTimeline extends HoodieCommonTestHarness {
assertFalse(metaClient.getFs().exists(new Path(metaClient.getMetaAuxiliaryPath(), instant6.getFileName()))); assertFalse(metaClient.getFs().exists(new Path(metaClient.getMetaAuxiliaryPath(), instant6.getFileName())));
// Now read compaction plan again which should not throw exception // Now read compaction plan again which should not throw exception
assertEquals(timeline.readCompactionPlanAsBytes(instant6).get().length, 5); assertEquals(5, timeline.readCompactionPlanAsBytes(instant6).get().length);
} }
@Test @Test
public void testTimelineOperationsBasic() { public void testTimelineOperationsBasic() {
timeline = new HoodieActiveTimeline(metaClient); timeline = new HoodieActiveTimeline(metaClient);
assertTrue(timeline.empty()); assertTrue(timeline.empty());
assertEquals("", 0, timeline.countInstants()); assertEquals(0, timeline.countInstants());
assertEquals("", Option.empty(), timeline.firstInstant()); assertEquals(Option.empty(), timeline.firstInstant());
assertEquals("", Option.empty(), timeline.nthInstant(5)); assertEquals(Option.empty(), timeline.nthInstant(5));
assertEquals("", Option.empty(), timeline.nthInstant(-1)); assertEquals(Option.empty(), timeline.nthInstant(-1));
assertEquals("", Option.empty(), timeline.lastInstant()); assertEquals(Option.empty(), timeline.lastInstant());
assertFalse("", timeline.containsInstant(new HoodieInstant(false, HoodieTimeline.COMMIT_ACTION, "01"))); assertFalse(timeline.containsInstant(new HoodieInstant(false, HoodieTimeline.COMMIT_ACTION, "01")));
} }
@Test @Test
@@ -163,17 +160,17 @@ public class TestHoodieActiveTimeline extends HoodieCommonTestHarness {
.getInstants().map(HoodieInstant::getTimestamp)); .getInstants().map(HoodieInstant::getTimestamp));
assertFalse(timeline.empty()); assertFalse(timeline.empty());
assertFalse(timeline.getCommitTimeline().filterPendingExcludingCompaction().empty()); assertFalse(timeline.getCommitTimeline().filterPendingExcludingCompaction().empty());
assertEquals("", 12, timeline.countInstants()); assertEquals(12, timeline.countInstants());
HoodieTimeline activeCommitTimeline = timeline.getCommitTimeline().filterCompletedInstants(); HoodieTimeline activeCommitTimeline = timeline.getCommitTimeline().filterCompletedInstants();
assertEquals("", 10, activeCommitTimeline.countInstants()); assertEquals(10, activeCommitTimeline.countInstants());
assertEquals("", "01", activeCommitTimeline.firstInstant().get().getTimestamp()); assertEquals("01", activeCommitTimeline.firstInstant().get().getTimestamp());
assertEquals("", "11", activeCommitTimeline.nthInstant(5).get().getTimestamp()); assertEquals("11", activeCommitTimeline.nthInstant(5).get().getTimestamp());
assertEquals("", "19", activeCommitTimeline.lastInstant().get().getTimestamp()); assertEquals("19", activeCommitTimeline.lastInstant().get().getTimestamp());
assertEquals("", "09", activeCommitTimeline.nthFromLastInstant(5).get().getTimestamp()); assertEquals("09", activeCommitTimeline.nthFromLastInstant(5).get().getTimestamp());
assertTrue("", activeCommitTimeline.containsInstant(new HoodieInstant(false, HoodieTimeline.COMMIT_ACTION, "09"))); assertTrue(activeCommitTimeline.containsInstant(new HoodieInstant(false, HoodieTimeline.COMMIT_ACTION, "09")));
assertFalse("", activeCommitTimeline.isBeforeTimelineStarts("02")); assertFalse(activeCommitTimeline.isBeforeTimelineStarts("02"));
assertTrue("", activeCommitTimeline.isBeforeTimelineStarts("00")); assertTrue(activeCommitTimeline.isBeforeTimelineStarts("00"));
} }
@Test @Test
@@ -220,25 +217,25 @@ public class TestHoodieActiveTimeline extends HoodieCommonTestHarness {
@Test @Test
public void testTimelineInstantOperations() { public void testTimelineInstantOperations() {
timeline = new HoodieActiveTimeline(metaClient, true); timeline = new HoodieActiveTimeline(metaClient, true);
assertEquals("No instant present", timeline.countInstants(), 0); assertEquals(0, timeline.countInstants(), "No instant present");
// revertToInflight // revertToInflight
HoodieInstant commit = new HoodieInstant(State.COMPLETED, HoodieTimeline.COMMIT_ACTION, "1"); HoodieInstant commit = new HoodieInstant(State.COMPLETED, HoodieTimeline.COMMIT_ACTION, "1");
timeline.createNewInstant(commit); timeline.createNewInstant(commit);
timeline = timeline.reload(); timeline = timeline.reload();
assertEquals(timeline.countInstants(), 1); assertEquals(1, timeline.countInstants());
assertTrue(timeline.containsInstant(commit)); assertTrue(timeline.containsInstant(commit));
HoodieInstant inflight = timeline.revertToInflight(commit); HoodieInstant inflight = timeline.revertToInflight(commit);
// revert creates the .requested file // revert creates the .requested file
timeline = timeline.reload(); timeline = timeline.reload();
assertEquals(timeline.countInstants(), 1); assertEquals(1, timeline.countInstants());
assertTrue(timeline.containsInstant(inflight)); assertTrue(timeline.containsInstant(inflight));
assertFalse(timeline.containsInstant(commit)); assertFalse(timeline.containsInstant(commit));
// deleteInflight // deleteInflight
timeline.deleteInflight(inflight); timeline.deleteInflight(inflight);
timeline = timeline.reload(); timeline = timeline.reload();
assertEquals(timeline.countInstants(), 1); assertEquals(1, timeline.countInstants());
assertFalse(timeline.containsInstant(inflight)); assertFalse(timeline.containsInstant(inflight));
assertFalse(timeline.containsInstant(commit)); assertFalse(timeline.containsInstant(commit));
@@ -246,10 +243,10 @@ public class TestHoodieActiveTimeline extends HoodieCommonTestHarness {
timeline.createNewInstant(commit); timeline.createNewInstant(commit);
timeline.createNewInstant(inflight); timeline.createNewInstant(inflight);
timeline = timeline.reload(); timeline = timeline.reload();
assertEquals(timeline.countInstants(), 1); assertEquals(1, timeline.countInstants());
timeline.deletePending(inflight); timeline.deletePending(inflight);
timeline = timeline.reload(); timeline = timeline.reload();
assertEquals(timeline.countInstants(), 1); assertEquals(1, timeline.countInstants());
assertFalse(timeline.containsInstant(inflight)); assertFalse(timeline.containsInstant(inflight));
assertTrue(timeline.containsInstant(commit)); assertTrue(timeline.containsInstant(commit));
@@ -257,10 +254,10 @@ public class TestHoodieActiveTimeline extends HoodieCommonTestHarness {
HoodieInstant compaction = new HoodieInstant(State.REQUESTED, HoodieTimeline.COMPACTION_ACTION, "2"); HoodieInstant compaction = new HoodieInstant(State.REQUESTED, HoodieTimeline.COMPACTION_ACTION, "2");
timeline.createNewInstant(compaction); timeline.createNewInstant(compaction);
timeline = timeline.reload(); timeline = timeline.reload();
assertEquals(timeline.countInstants(), 2); assertEquals(2, timeline.countInstants());
timeline.deleteCompactionRequested(compaction); timeline.deleteCompactionRequested(compaction);
timeline = timeline.reload(); timeline = timeline.reload();
assertEquals(timeline.countInstants(), 1); assertEquals(1, timeline.countInstants());
assertFalse(timeline.containsInstant(inflight)); assertFalse(timeline.containsInstant(inflight));
assertFalse(timeline.containsInstant(compaction)); assertFalse(timeline.containsInstant(compaction));
assertTrue(timeline.containsInstant(commit)); assertTrue(timeline.containsInstant(commit));

View File

@@ -34,7 +34,7 @@ import org.apache.hudi.common.table.timeline.HoodieTimeline;
import org.apache.hudi.common.table.timeline.TimelineMetadataUtils; import org.apache.hudi.common.table.timeline.TimelineMetadataUtils;
import org.apache.hudi.common.table.view.TableFileSystemView.BaseFileOnlyView; import org.apache.hudi.common.table.view.TableFileSystemView.BaseFileOnlyView;
import org.apache.hudi.common.table.view.TableFileSystemView.SliceView; import org.apache.hudi.common.table.view.TableFileSystemView.SliceView;
import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5; import org.apache.hudi.common.testutils.HoodieCommonTestHarness;
import org.apache.hudi.common.util.CompactionUtils; import org.apache.hudi.common.util.CompactionUtils;
import org.apache.hudi.common.util.Option; import org.apache.hudi.common.util.Option;
import org.apache.hudi.common.util.collection.Pair; import org.apache.hudi.common.util.collection.Pair;
@@ -66,7 +66,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
* Tests hoodie table file system view {@link HoodieTableFileSystemView}. * Tests hoodie table file system view {@link HoodieTableFileSystemView}.
*/ */
@SuppressWarnings("ResultOfMethodCallIgnored") @SuppressWarnings("ResultOfMethodCallIgnored")
public class TestHoodieTableFileSystemView extends HoodieCommonTestHarnessJunit5 { public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness {
private static final Logger LOG = LogManager.getLogger(TestHoodieTableFileSystemView.class); private static final Logger LOG = LogManager.getLogger(TestHoodieTableFileSystemView.class);

View File

@@ -23,7 +23,6 @@ import org.apache.hudi.avro.model.HoodieCompactionPlan;
import org.apache.hudi.avro.model.HoodieRestoreMetadata; import org.apache.hudi.avro.model.HoodieRestoreMetadata;
import org.apache.hudi.avro.model.HoodieRollbackMetadata; import org.apache.hudi.avro.model.HoodieRollbackMetadata;
import org.apache.hudi.common.HoodieCleanStat; import org.apache.hudi.common.HoodieCleanStat;
import org.apache.hudi.common.HoodieCommonTestHarness;
import org.apache.hudi.common.HoodieRollbackStat; import org.apache.hudi.common.HoodieRollbackStat;
import org.apache.hudi.common.fs.FSUtils; import org.apache.hudi.common.fs.FSUtils;
import org.apache.hudi.common.model.CompactionOperation; import org.apache.hudi.common.model.CompactionOperation;
@@ -40,6 +39,7 @@ import org.apache.hudi.common.table.timeline.HoodieInstant;
import org.apache.hudi.common.table.timeline.HoodieInstant.State; import org.apache.hudi.common.table.timeline.HoodieInstant.State;
import org.apache.hudi.common.table.timeline.HoodieTimeline; import org.apache.hudi.common.table.timeline.HoodieTimeline;
import org.apache.hudi.common.table.timeline.TimelineMetadataUtils; import org.apache.hudi.common.table.timeline.TimelineMetadataUtils;
import org.apache.hudi.common.testutils.HoodieCommonTestHarness;
import org.apache.hudi.common.util.CleanerUtils; import org.apache.hudi.common.util.CleanerUtils;
import org.apache.hudi.common.util.CollectionUtils; import org.apache.hudi.common.util.CollectionUtils;
import org.apache.hudi.common.util.CompactionUtils; import org.apache.hudi.common.util.CompactionUtils;
@@ -51,13 +51,14 @@ import org.apache.hudi.exception.HoodieException;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.log4j.LogManager; import org.apache.log4j.LogManager;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import org.junit.Assert; import org.junit.jupiter.api.BeforeEach;
import org.junit.Before; import org.junit.jupiter.api.Test;
import org.junit.Test;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
@@ -70,6 +71,9 @@ import java.util.stream.Collectors;
import java.util.stream.IntStream; import java.util.stream.IntStream;
import static org.apache.hudi.common.table.timeline.HoodieTimeline.COMPACTION_ACTION; import static org.apache.hudi.common.table.timeline.HoodieTimeline.COMPACTION_ACTION;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
/** /**
* Tests incremental file system view sync. * Tests incremental file system view sync.
@@ -84,18 +88,20 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
private final List<String> fileIdsPerPartition = private final List<String> fileIdsPerPartition =
IntStream.range(0, 10).mapToObj(x -> UUID.randomUUID().toString()).collect(Collectors.toList()); IntStream.range(0, 10).mapToObj(x -> UUID.randomUUID().toString()).collect(Collectors.toList());
@Before @BeforeEach
public void init() throws IOException { public void init() throws IOException {
initMetaClient(); initMetaClient();
partitions.forEach(p -> new File(basePath + "/" + p).mkdirs()); for (String p : partitions) {
Files.createDirectories(Paths.get(basePath, p));
}
refreshFsView(); refreshFsView();
} }
@Test @Test
public void testEmptyPartitionsAndTimeline() throws IOException { public void testEmptyPartitionsAndTimeline() throws IOException {
SyncableFileSystemView view = getFileSystemView(metaClient); SyncableFileSystemView view = getFileSystemView(metaClient);
Assert.assertFalse(view.getLastInstant().isPresent()); assertFalse(view.getLastInstant().isPresent());
partitions.forEach(p -> Assert.assertEquals(0, view.getLatestFileSlices(p).count())); partitions.forEach(p -> assertEquals(0, view.getLatestFileSlices(p).count()));
} }
@Test @Test
@@ -167,11 +173,11 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
Option.of(metadata.toJsonString().getBytes(StandardCharsets.UTF_8))); Option.of(metadata.toJsonString().getBytes(StandardCharsets.UTF_8)));
view.sync(); view.sync();
Assert.assertTrue(view.getLastInstant().isPresent()); assertTrue(view.getLastInstant().isPresent());
Assert.assertEquals("11", view.getLastInstant().get().getTimestamp()); assertEquals("11", view.getLastInstant().get().getTimestamp());
Assert.assertEquals(State.COMPLETED, view.getLastInstant().get().getState()); assertEquals(State.COMPLETED, view.getLastInstant().get().getState());
Assert.assertEquals(HoodieTimeline.COMMIT_ACTION, view.getLastInstant().get().getAction()); assertEquals(HoodieTimeline.COMMIT_ACTION, view.getLastInstant().get().getAction());
partitions.forEach(p -> Assert.assertEquals(0, view.getLatestFileSlices(p).count())); partitions.forEach(p -> assertEquals(0, view.getLatestFileSlices(p).count()));
metaClient.reloadActiveTimeline(); metaClient.reloadActiveTimeline();
SyncableFileSystemView newView = getFileSystemView(metaClient); SyncableFileSystemView newView = getFileSystemView(metaClient);
@@ -316,7 +322,7 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
private void testCleans(SyncableFileSystemView view, List<String> newCleanerInstants, private void testCleans(SyncableFileSystemView view, List<String> newCleanerInstants,
Map<String, List<String>> deltaInstantMap, Map<String, List<String>> instantsToFiles, Map<String, List<String>> deltaInstantMap, Map<String, List<String>> instantsToFiles,
List<String> cleanedInstants) { List<String> cleanedInstants) {
Assert.assertEquals(newCleanerInstants.size(), cleanedInstants.size()); assertEquals(newCleanerInstants.size(), cleanedInstants.size());
long exp = partitions.stream().mapToLong(p1 -> view.getAllFileSlices(p1).count()).findAny().getAsLong(); long exp = partitions.stream().mapToLong(p1 -> view.getAllFileSlices(p1).count()).findAny().getAsLong();
LOG.info("Initial File Slices :" + exp); LOG.info("Initial File Slices :" + exp);
for (int idx = 0; idx < newCleanerInstants.size(); idx++) { for (int idx = 0; idx < newCleanerInstants.size(); idx++) {
@@ -330,17 +336,17 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
exp -= fileIdsPerPartition.size(); exp -= fileIdsPerPartition.size();
final long expTotalFileSlicesPerPartition = exp; final long expTotalFileSlicesPerPartition = exp;
view.sync(); view.sync();
Assert.assertTrue(view.getLastInstant().isPresent()); assertTrue(view.getLastInstant().isPresent());
Assert.assertEquals(newCleanerInstants.get(idx), view.getLastInstant().get().getTimestamp()); assertEquals(newCleanerInstants.get(idx), view.getLastInstant().get().getTimestamp());
Assert.assertEquals(State.COMPLETED, view.getLastInstant().get().getState()); assertEquals(State.COMPLETED, view.getLastInstant().get().getState());
Assert.assertEquals(HoodieTimeline.CLEAN_ACTION, view.getLastInstant().get().getAction()); assertEquals(HoodieTimeline.CLEAN_ACTION, view.getLastInstant().get().getAction());
partitions.forEach(p -> { partitions.forEach(p -> {
LOG.info("PARTITION : " + p); LOG.info("PARTITION : " + p);
LOG.info("\tFileSlices :" + view.getAllFileSlices(p).collect(Collectors.toList())); LOG.info("\tFileSlices :" + view.getAllFileSlices(p).collect(Collectors.toList()));
}); });
partitions.forEach(p -> Assert.assertEquals(fileIdsPerPartition.size(), view.getLatestFileSlices(p).count())); partitions.forEach(p -> assertEquals(fileIdsPerPartition.size(), view.getLatestFileSlices(p).count()));
partitions.forEach(p -> Assert.assertEquals(expTotalFileSlicesPerPartition, view.getAllFileSlices(p).count())); partitions.forEach(p -> assertEquals(expTotalFileSlicesPerPartition, view.getAllFileSlices(p).count()));
metaClient.reloadActiveTimeline(); metaClient.reloadActiveTimeline();
SyncableFileSystemView newView = getFileSystemView(metaClient); SyncableFileSystemView newView = getFileSystemView(metaClient);
@@ -364,7 +370,7 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
private void testRestore(SyncableFileSystemView view, List<String> newRestoreInstants, boolean isDeltaCommit, private void testRestore(SyncableFileSystemView view, List<String> newRestoreInstants, boolean isDeltaCommit,
Map<String, List<String>> instantsToFiles, List<String> rolledBackInstants, String emptyRestoreInstant, Map<String, List<String>> instantsToFiles, List<String> rolledBackInstants, String emptyRestoreInstant,
boolean isRestore) { boolean isRestore) {
Assert.assertEquals(newRestoreInstants.size(), rolledBackInstants.size()); assertEquals(newRestoreInstants.size(), rolledBackInstants.size());
long initialFileSlices = partitions.stream().mapToLong(p -> view.getAllFileSlices(p).count()).findAny().getAsLong(); long initialFileSlices = partitions.stream().mapToLong(p -> view.getAllFileSlices(p).count()).findAny().getAsLong();
IntStream.range(0, newRestoreInstants.size()).forEach(idx -> { IntStream.range(0, newRestoreInstants.size()).forEach(idx -> {
String instant = rolledBackInstants.get(idx); String instant = rolledBackInstants.get(idx);
@@ -373,21 +379,21 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
final long expTotalFileSlicesPerPartition = final long expTotalFileSlicesPerPartition =
isDeltaCommit ? initialFileSlices : initialFileSlices - ((idx + 1) * fileIdsPerPartition.size()); isDeltaCommit ? initialFileSlices : initialFileSlices - ((idx + 1) * fileIdsPerPartition.size());
view.sync(); view.sync();
Assert.assertTrue(view.getLastInstant().isPresent()); assertTrue(view.getLastInstant().isPresent());
LOG.info("Last Instant is :" + view.getLastInstant().get()); LOG.info("Last Instant is :" + view.getLastInstant().get());
if (isRestore) { if (isRestore) {
Assert.assertEquals(newRestoreInstants.get(idx), view.getLastInstant().get().getTimestamp()); assertEquals(newRestoreInstants.get(idx), view.getLastInstant().get().getTimestamp());
Assert.assertEquals(HoodieTimeline.RESTORE_ACTION, view.getLastInstant().get().getAction()); assertEquals(HoodieTimeline.RESTORE_ACTION, view.getLastInstant().get().getAction());
} }
Assert.assertEquals(State.COMPLETED, view.getLastInstant().get().getState()); assertEquals(State.COMPLETED, view.getLastInstant().get().getState());
if (HoodieTimeline.compareTimestamps(newRestoreInstants.get(idx), HoodieTimeline.GREATER_THAN_OR_EQUALS, emptyRestoreInstant if (HoodieTimeline.compareTimestamps(newRestoreInstants.get(idx), HoodieTimeline.GREATER_THAN_OR_EQUALS, emptyRestoreInstant
)) { )) {
partitions.forEach(p -> Assert.assertEquals(0, view.getLatestFileSlices(p).count())); partitions.forEach(p -> assertEquals(0, view.getLatestFileSlices(p).count()));
} else { } else {
partitions.forEach(p -> Assert.assertEquals(fileIdsPerPartition.size(), view.getLatestFileSlices(p).count())); partitions.forEach(p -> assertEquals(fileIdsPerPartition.size(), view.getLatestFileSlices(p).count()));
} }
partitions.forEach(p -> Assert.assertEquals(expTotalFileSlicesPerPartition, view.getAllFileSlices(p).count())); partitions.forEach(p -> assertEquals(expTotalFileSlicesPerPartition, view.getAllFileSlices(p).count()));
metaClient.reloadActiveTimeline(); metaClient.reloadActiveTimeline();
SyncableFileSystemView newView = getFileSystemView(metaClient); SyncableFileSystemView newView = getFileSystemView(metaClient);
@@ -504,14 +510,13 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
view.sync(); view.sync();
partitions.forEach(p -> { partitions.forEach(p -> {
view.getLatestFileSlices(p).forEach(fs -> { view.getLatestFileSlices(p).forEach(fs -> {
Assert.assertEquals(instantTime, fs.getBaseInstantTime()); assertEquals(instantTime, fs.getBaseInstantTime());
Assert.assertEquals(p, fs.getPartitionPath()); assertEquals(p, fs.getPartitionPath());
Assert.assertFalse(fs.getBaseFile().isPresent()); assertFalse(fs.getBaseFile().isPresent());
}); });
view.getLatestMergedFileSlicesBeforeOrOn(p, instantTime).forEach(fs -> { view.getLatestMergedFileSlicesBeforeOrOn(p, instantTime).forEach(fs -> {
Assert assertTrue(HoodieTimeline.compareTimestamps(instantTime, HoodieTimeline.GREATER_THAN, fs.getBaseInstantTime()));
.assertTrue(HoodieTimeline.compareTimestamps(instantTime, HoodieTimeline.GREATER_THAN, fs.getBaseInstantTime())); assertEquals(p, fs.getPartitionPath());
Assert.assertEquals(p, fs.getPartitionPath());
}); });
}); });
@@ -535,8 +540,8 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
ValidationUtils.checkArgument(deleted, "Unable to delete compaction instant."); ValidationUtils.checkArgument(deleted, "Unable to delete compaction instant.");
view.sync(); view.sync();
Assert.assertEquals(newLastInstant, view.getLastInstant().get().getTimestamp()); assertEquals(newLastInstant, view.getLastInstant().get().getTimestamp());
partitions.forEach(p -> view.getLatestFileSlices(p).forEach(fs -> Assert.assertEquals(newBaseInstant, fs.getBaseInstantTime()))); partitions.forEach(p -> view.getLatestFileSlices(p).forEach(fs -> assertEquals(newBaseInstant, fs.getBaseInstantTime())));
} }
/** /**
@@ -609,22 +614,21 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
List<String> filePaths = List<String> filePaths =
addInstant(metaClient, instant, deltaCommit, deltaCommit ? baseInstantForDeltaCommit : instant); addInstant(metaClient, instant, deltaCommit, deltaCommit ? baseInstantForDeltaCommit : instant);
view.sync(); view.sync();
Assert.assertTrue(view.getLastInstant().isPresent()); assertTrue(view.getLastInstant().isPresent());
Assert.assertEquals(lastInstant.getTimestamp(), view.getLastInstant().get().getTimestamp()); assertEquals(lastInstant.getTimestamp(), view.getLastInstant().get().getTimestamp());
Assert.assertEquals(State.COMPLETED, view.getLastInstant().get().getState()); assertEquals(State.COMPLETED, view.getLastInstant().get().getState());
Assert.assertEquals( assertEquals(lastInstant.getAction(), view.getLastInstant().get().getAction(),
"Expected Last=" + lastInstant + ", Found Instants=" "Expected Last=" + lastInstant + ", Found Instants="
+ view.getTimeline().getInstants().collect(Collectors.toList()), + view.getTimeline().getInstants().collect(Collectors.toList()));
lastInstant.getAction(), view.getLastInstant().get().getAction()); partitions.forEach(p -> assertEquals(fileIdsPerPartition.size(), view.getLatestFileSlices(p).count()));
partitions.forEach(p -> Assert.assertEquals(fileIdsPerPartition.size(), view.getLatestFileSlices(p).count()));
final long expTotalFileSlicesPerPartition = fileIdsPerPartition.size() * multiple; final long expTotalFileSlicesPerPartition = fileIdsPerPartition.size() * multiple;
partitions.forEach(p -> Assert.assertEquals(expTotalFileSlicesPerPartition, view.getAllFileSlices(p).count())); partitions.forEach(p -> assertEquals(expTotalFileSlicesPerPartition, view.getAllFileSlices(p).count()));
if (deltaCommit) { if (deltaCommit) {
partitions.forEach(p -> partitions.forEach(p ->
view.getLatestFileSlices(p).forEach(f -> Assert.assertEquals(baseInstantForDeltaCommit, f.getBaseInstantTime())) view.getLatestFileSlices(p).forEach(f -> assertEquals(baseInstantForDeltaCommit, f.getBaseInstantTime()))
); );
} else { } else {
partitions.forEach(p -> view.getLatestBaseFiles(p).forEach(f -> Assert.assertEquals(instant, f.getCommitTime()))); partitions.forEach(p -> view.getLatestBaseFiles(p).forEach(f -> assertEquals(instant, f.getCommitTime())));
} }
metaClient.reloadActiveTimeline(); metaClient.reloadActiveTimeline();
@@ -649,7 +653,7 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
// Timeline check // Timeline check
HoodieTimeline timeline1 = view1.getTimeline(); HoodieTimeline timeline1 = view1.getTimeline();
HoodieTimeline timeline2 = view2.getTimeline(); HoodieTimeline timeline2 = view2.getTimeline();
Assert.assertEquals(view1.getLastInstant(), view2.getLastInstant()); assertEquals(view1.getLastInstant(), view2.getLastInstant());
CollectionUtils.elementsEqual(timeline1.getInstants().iterator(), timeline2.getInstants().iterator()); CollectionUtils.elementsEqual(timeline1.getInstants().iterator(), timeline2.getInstants().iterator());
// View Checks // View Checks
@@ -657,45 +661,45 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
.collect(Collectors.toMap(HoodieFileGroup::getFileGroupId, fg -> fg)); .collect(Collectors.toMap(HoodieFileGroup::getFileGroupId, fg -> fg));
Map<HoodieFileGroupId, HoodieFileGroup> fileGroupsMap2 = partitions.stream().flatMap(view2::getAllFileGroups) Map<HoodieFileGroupId, HoodieFileGroup> fileGroupsMap2 = partitions.stream().flatMap(view2::getAllFileGroups)
.collect(Collectors.toMap(HoodieFileGroup::getFileGroupId, fg -> fg)); .collect(Collectors.toMap(HoodieFileGroup::getFileGroupId, fg -> fg));
Assert.assertEquals(fileGroupsMap1.keySet(), fileGroupsMap2.keySet()); assertEquals(fileGroupsMap1.keySet(), fileGroupsMap2.keySet());
long gotSlicesCount = fileGroupsMap1.keySet().stream() long gotSlicesCount = fileGroupsMap1.keySet().stream()
.map(k -> Pair.of(fileGroupsMap1.get(k), fileGroupsMap2.get(k))).mapToLong(e -> { .map(k -> Pair.of(fileGroupsMap1.get(k), fileGroupsMap2.get(k))).mapToLong(e -> {
HoodieFileGroup fg1 = e.getKey(); HoodieFileGroup fg1 = e.getKey();
HoodieFileGroup fg2 = e.getValue(); HoodieFileGroup fg2 = e.getValue();
Assert.assertEquals(fg1.getFileGroupId(), fg2.getFileGroupId()); assertEquals(fg1.getFileGroupId(), fg2.getFileGroupId());
List<FileSlice> slices1 = fg1.getAllRawFileSlices().collect(Collectors.toList()); List<FileSlice> slices1 = fg1.getAllRawFileSlices().collect(Collectors.toList());
List<FileSlice> slices2 = fg2.getAllRawFileSlices().collect(Collectors.toList()); List<FileSlice> slices2 = fg2.getAllRawFileSlices().collect(Collectors.toList());
Assert.assertEquals(slices1.size(), slices2.size()); assertEquals(slices1.size(), slices2.size());
IntStream.range(0, slices1.size()).mapToObj(idx -> Pair.of(slices1.get(idx), slices2.get(idx))) IntStream.range(0, slices1.size()).mapToObj(idx -> Pair.of(slices1.get(idx), slices2.get(idx)))
.forEach(e2 -> { .forEach(e2 -> {
FileSlice slice1 = e2.getKey(); FileSlice slice1 = e2.getKey();
FileSlice slice2 = e2.getValue(); FileSlice slice2 = e2.getValue();
Assert.assertEquals(slice1.getBaseInstantTime(), slice2.getBaseInstantTime()); assertEquals(slice1.getBaseInstantTime(), slice2.getBaseInstantTime());
Assert.assertEquals(slice1.getFileId(), slice2.getFileId()); assertEquals(slice1.getFileId(), slice2.getFileId());
Assert.assertEquals(slice1.getBaseFile().isPresent(), slice2.getBaseFile().isPresent()); assertEquals(slice1.getBaseFile().isPresent(), slice2.getBaseFile().isPresent());
if (slice1.getBaseFile().isPresent()) { if (slice1.getBaseFile().isPresent()) {
HoodieBaseFile df1 = slice1.getBaseFile().get(); HoodieBaseFile df1 = slice1.getBaseFile().get();
HoodieBaseFile df2 = slice2.getBaseFile().get(); HoodieBaseFile df2 = slice2.getBaseFile().get();
Assert.assertEquals(df1.getCommitTime(), df2.getCommitTime()); assertEquals(df1.getCommitTime(), df2.getCommitTime());
Assert.assertEquals(df1.getFileId(), df2.getFileId()); assertEquals(df1.getFileId(), df2.getFileId());
Assert.assertEquals(df1.getFileName(), df2.getFileName()); assertEquals(df1.getFileName(), df2.getFileName());
Assert.assertEquals(Path.getPathWithoutSchemeAndAuthority(new Path(df1.getPath())), assertEquals(Path.getPathWithoutSchemeAndAuthority(new Path(df1.getPath())),
Path.getPathWithoutSchemeAndAuthority(new Path(df2.getPath()))); Path.getPathWithoutSchemeAndAuthority(new Path(df2.getPath())));
} }
List<Path> logPaths1 = slice1.getLogFiles() List<Path> logPaths1 = slice1.getLogFiles()
.map(lf -> Path.getPathWithoutSchemeAndAuthority(lf.getPath())).collect(Collectors.toList()); .map(lf -> Path.getPathWithoutSchemeAndAuthority(lf.getPath())).collect(Collectors.toList());
List<Path> logPaths2 = slice2.getLogFiles() List<Path> logPaths2 = slice2.getLogFiles()
.map(lf -> Path.getPathWithoutSchemeAndAuthority(lf.getPath())).collect(Collectors.toList()); .map(lf -> Path.getPathWithoutSchemeAndAuthority(lf.getPath())).collect(Collectors.toList());
Assert.assertEquals(logPaths1, logPaths2); assertEquals(logPaths1, logPaths2);
}); });
return slices1.size(); return slices1.size();
}).sum(); }).sum();
Assert.assertEquals(expectedTotalFileSlices, gotSlicesCount); assertEquals(expectedTotalFileSlices, gotSlicesCount);
// Pending Compaction Operations Check // Pending Compaction Operations Check
Set<Pair<String, CompactionOperation>> ops1 = view1.getPendingCompactionOperations().collect(Collectors.toSet()); Set<Pair<String, CompactionOperation>> ops1 = view1.getPendingCompactionOperations().collect(Collectors.toSet());
Set<Pair<String, CompactionOperation>> ops2 = view2.getPendingCompactionOperations().collect(Collectors.toSet()); Set<Pair<String, CompactionOperation>> ops2 = view2.getPendingCompactionOperations().collect(Collectors.toSet());
Assert.assertEquals(ops1, ops2); assertEquals(ops1, ops2);
} }
private List<String> addInstant(HoodieTableMetaClient metaClient, String instant, boolean deltaCommit, private List<String> addInstant(HoodieTableMetaClient metaClient, String instant, boolean deltaCommit,

View File

@@ -22,6 +22,7 @@ import org.apache.hudi.common.table.HoodieTableMetaClient;
import org.apache.hudi.common.table.timeline.HoodieTimeline; import org.apache.hudi.common.table.timeline.HoodieTimeline;
import java.io.IOException; import java.io.IOException;
import java.nio.file.Files;
/** /**
* Tests rocks db based incremental file system view sync {@link RocksDbBasedFileSystemView}. * Tests rocks db based incremental file system view sync {@link RocksDbBasedFileSystemView}.
@@ -31,7 +32,8 @@ public class TestRocksDBBasedIncrementalFSViewSync extends TestIncrementalFSView
@Override @Override
protected SyncableFileSystemView getFileSystemView(HoodieTableMetaClient metaClient, HoodieTimeline timeline) protected SyncableFileSystemView getFileSystemView(HoodieTableMetaClient metaClient, HoodieTimeline timeline)
throws IOException { throws IOException {
String subdirPath = Files.createTempDirectory(tempDir, null).toAbsolutePath().toString();
return new RocksDbBasedFileSystemView(metaClient, timeline, FileSystemViewStorageConfig.newBuilder() return new RocksDbBasedFileSystemView(metaClient, timeline, FileSystemViewStorageConfig.newBuilder()
.withRocksDBPath(folder.newFolder().getAbsolutePath()).withIncrementalTimelineSync(true).build()); .withRocksDBPath(subdirPath).withIncrementalTimelineSync(true).build());
} }
} }

View File

@@ -16,7 +16,7 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hudi.common; package org.apache.hudi.common.testutils;
import org.apache.hudi.common.model.HoodieTableType; import org.apache.hudi.common.model.HoodieTableType;
import org.apache.hudi.common.model.HoodieTestUtils; import org.apache.hudi.common.model.HoodieTestUtils;
@@ -25,8 +25,7 @@ import org.apache.hudi.common.table.timeline.HoodieTimeline;
import org.apache.hudi.common.table.view.HoodieTableFileSystemView; import org.apache.hudi.common.table.view.HoodieTableFileSystemView;
import org.apache.hudi.common.table.view.SyncableFileSystemView; import org.apache.hudi.common.table.view.SyncableFileSystemView;
import org.junit.Rule; import org.junit.jupiter.api.io.TempDir;
import org.junit.rules.TemporaryFolder;
import java.io.IOException; import java.io.IOException;
@@ -36,17 +35,15 @@ import java.io.IOException;
public class HoodieCommonTestHarness { public class HoodieCommonTestHarness {
protected String basePath = null; protected String basePath = null;
@Rule
public TemporaryFolder folder = new TemporaryFolder();
protected transient HoodieTableMetaClient metaClient; protected transient HoodieTableMetaClient metaClient;
@TempDir
public java.nio.file.Path tempDir;
/** /**
* Initializes basePath. * Initializes basePath.
*/ */
protected void initPath() { protected void initPath() {
this.basePath = folder.getRoot().getAbsolutePath(); this.basePath = tempDir.toAbsolutePath().toString();
} }
/** /**
@@ -56,7 +53,7 @@ public class HoodieCommonTestHarness {
* @throws IOException * @throws IOException
*/ */
protected void initMetaClient() throws IOException { protected void initMetaClient() throws IOException {
metaClient = HoodieTestUtils.init(folder.getRoot().getAbsolutePath(), getTableType()); metaClient = HoodieTestUtils.init(tempDir.toAbsolutePath().toString(), getTableType());
basePath = metaClient.getBasePath(); basePath = metaClient.getBasePath();
} }

View File

@@ -1,52 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hudi.common.testutils;
import org.apache.hudi.common.model.HoodieTestUtils;
import org.apache.hudi.common.table.HoodieTableMetaClient;
import org.junit.jupiter.api.io.TempDir;
import java.io.IOException;
/**
* The JUnit 5 version of {@link org.apache.hudi.common.HoodieCommonTestHarness}.
* <p>
* To incrementally migrate test classes.
*/
public class HoodieCommonTestHarnessJunit5 extends org.apache.hudi.common.HoodieCommonTestHarness {
@TempDir
public java.nio.file.Path tempDir;
/**
* Initializes basePath.
*/
protected void initPath() {
this.basePath = tempDir.toAbsolutePath().toString();
}
/**
* Initializes an instance of {@link HoodieTableMetaClient} with a special table type specified by {@code getTableType()}.
*/
protected void initMetaClient() throws IOException {
metaClient = HoodieTestUtils.init(tempDir.toAbsolutePath().toString(), getTableType());
basePath = metaClient.getBasePath();
}
}

View File

@@ -34,7 +34,6 @@ import org.apache.hudi.common.util.collection.Pair;
import org.apache.hudi.exception.HoodieIOException; import org.apache.hudi.exception.HoodieIOException;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.junit.Assert;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
@@ -49,6 +48,8 @@ import java.util.stream.Stream;
import static org.apache.hudi.common.model.HoodieTestUtils.DEFAULT_PARTITION_PATHS; import static org.apache.hudi.common.model.HoodieTestUtils.DEFAULT_PARTITION_PATHS;
import static org.apache.hudi.common.table.timeline.HoodieTimeline.COMPACTION_ACTION; import static org.apache.hudi.common.table.timeline.HoodieTimeline.COMPACTION_ACTION;
import static org.apache.hudi.common.table.timeline.HoodieTimeline.DELTA_COMMIT_ACTION; import static org.apache.hudi.common.table.timeline.HoodieTimeline.DELTA_COMMIT_ACTION;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
/** /**
* The utility class to support testing compaction. * The utility class to support testing compaction.
@@ -95,10 +96,11 @@ public class CompactionTestUtils {
List<HoodieCompactionPlan> plans = CollectionUtils.createImmutableList(plan1, plan2, plan3, plan4); List<HoodieCompactionPlan> plans = CollectionUtils.createImmutableList(plan1, plan2, plan3, plan4);
IntStream.range(0, 4).boxed().forEach(idx -> { IntStream.range(0, 4).boxed().forEach(idx -> {
if (expectedNumEntries.get(idx) > 0) { if (expectedNumEntries.get(idx) > 0) {
Assert.assertEquals("check if plan " + idx + " has exp entries", expectedNumEntries.get(idx).longValue(), assertEquals(expectedNumEntries.get(idx).longValue(),
plans.get(idx).getOperations().size()); plans.get(idx).getOperations().size(),
"check if plan " + idx + " has exp entries");
} else { } else {
Assert.assertNull("Plan " + idx + " has null ops", plans.get(idx).getOperations()); assertNull(plans.get(idx).getOperations(), "Plan " + idx + " has null ops");
} }
}); });
@@ -110,7 +112,7 @@ public class CompactionTestUtils {
generateExpectedCompactionOperations(Arrays.asList(plan1, plan2, plan3, plan4), baseInstantsToCompaction); generateExpectedCompactionOperations(Arrays.asList(plan1, plan2, plan3, plan4), baseInstantsToCompaction);
// Ensure Compaction operations are fine. // Ensure Compaction operations are fine.
Assert.assertEquals(expPendingCompactionMap, pendingCompactionMap); assertEquals(expPendingCompactionMap, pendingCompactionMap);
return expPendingCompactionMap; return expPendingCompactionMap;
} }

View File

@@ -18,12 +18,13 @@
package org.apache.hudi.common.util; package org.apache.hudi.common.util;
import org.junit.Assert; import org.junit.jupiter.api.Test;
import org.junit.Test;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.util.UUID; import java.util.UUID;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
public class TestBase64CodecUtil { public class TestBase64CodecUtil {
@Test @Test
@@ -39,7 +40,7 @@ public class TestBase64CodecUtil {
String encodeData = Base64CodecUtil.encode(originalData); String encodeData = Base64CodecUtil.encode(originalData);
byte[] decodeData = Base64CodecUtil.decode(encodeData); byte[] decodeData = Base64CodecUtil.decode(encodeData);
Assert.assertArrayEquals(originalData, decodeData); assertArrayEquals(originalData, decodeData);
} }
} }

View File

@@ -20,7 +20,6 @@ package org.apache.hudi.common.util;
import org.apache.hudi.avro.model.HoodieCompactionOperation; import org.apache.hudi.avro.model.HoodieCompactionOperation;
import org.apache.hudi.avro.model.HoodieCompactionPlan; import org.apache.hudi.avro.model.HoodieCompactionPlan;
import org.apache.hudi.common.HoodieCommonTestHarness;
import org.apache.hudi.common.fs.FSUtils; import org.apache.hudi.common.fs.FSUtils;
import org.apache.hudi.common.model.FileSlice; import org.apache.hudi.common.model.FileSlice;
import org.apache.hudi.common.model.HoodieBaseFile; import org.apache.hudi.common.model.HoodieBaseFile;
@@ -29,13 +28,13 @@ import org.apache.hudi.common.model.HoodieLogFile;
import org.apache.hudi.common.model.HoodieTableType; import org.apache.hudi.common.model.HoodieTableType;
import org.apache.hudi.common.table.HoodieTableMetaClient; import org.apache.hudi.common.table.HoodieTableMetaClient;
import org.apache.hudi.common.table.timeline.versioning.compaction.CompactionPlanMigrator; import org.apache.hudi.common.table.timeline.versioning.compaction.CompactionPlanMigrator;
import org.apache.hudi.common.testutils.HoodieCommonTestHarness;
import org.apache.hudi.common.util.CompactionTestUtils.TestHoodieBaseFile; import org.apache.hudi.common.util.CompactionTestUtils.TestHoodieBaseFile;
import org.apache.hudi.common.util.collection.Pair; import org.apache.hudi.common.util.collection.Pair;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.junit.Assert; import org.junit.jupiter.api.BeforeEach;
import org.junit.Before; import org.junit.jupiter.api.Test;
import org.junit.Test;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
@@ -52,6 +51,9 @@ import static org.apache.hudi.common.util.CompactionTestUtils.scheduleCompaction
import static org.apache.hudi.common.util.CompactionTestUtils.setupAndValidateCompactionOperations; import static org.apache.hudi.common.util.CompactionTestUtils.setupAndValidateCompactionOperations;
import static org.apache.hudi.common.util.CompactionUtils.COMPACTION_METADATA_VERSION_1; import static org.apache.hudi.common.util.CompactionUtils.COMPACTION_METADATA_VERSION_1;
import static org.apache.hudi.common.util.CompactionUtils.LATEST_COMPACTION_METADATA_VERSION; import static org.apache.hudi.common.util.CompactionUtils.LATEST_COMPACTION_METADATA_VERSION;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
/** /**
* The utility class for testing compaction. * The utility class for testing compaction.
@@ -68,7 +70,7 @@ public class TestCompactionUtils extends HoodieCommonTestHarness {
}; };
private Function<Pair<String, FileSlice>, Map<String, Double>> metricsCaptureFn = (partitionFileSlice) -> METRICS; private Function<Pair<String, FileSlice>, Map<String, Double>> metricsCaptureFn = (partitionFileSlice) -> METRICS;
@Before @BeforeEach
public void init() throws IOException { public void init() throws IOException {
initMetaClient(); initMetaClient();
} }
@@ -81,13 +83,13 @@ public class TestCompactionUtils extends HoodieCommonTestHarness {
CompactionPlanMigrator migrator = new CompactionPlanMigrator(metaClient); CompactionPlanMigrator migrator = new CompactionPlanMigrator(metaClient);
HoodieCompactionPlan plan = inputAndPlan.getRight(); HoodieCompactionPlan plan = inputAndPlan.getRight();
System.out.println("Plan=" + plan.getOperations()); System.out.println("Plan=" + plan.getOperations());
Assert.assertEquals(LATEST_COMPACTION_METADATA_VERSION, plan.getVersion()); assertEquals(LATEST_COMPACTION_METADATA_VERSION, plan.getVersion());
HoodieCompactionPlan oldPlan = migrator.migrateToVersion(plan, plan.getVersion(), COMPACTION_METADATA_VERSION_1); HoodieCompactionPlan oldPlan = migrator.migrateToVersion(plan, plan.getVersion(), COMPACTION_METADATA_VERSION_1);
// Check with older version of compaction plan // Check with older version of compaction plan
Assert.assertEquals(COMPACTION_METADATA_VERSION_1, oldPlan.getVersion()); assertEquals(COMPACTION_METADATA_VERSION_1, oldPlan.getVersion());
testFileSlicesCompactionPlanEquality(inputAndPlan.getKey(), oldPlan); testFileSlicesCompactionPlanEquality(inputAndPlan.getKey(), oldPlan);
HoodieCompactionPlan newPlan = migrator.upgradeToLatest(plan, plan.getVersion()); HoodieCompactionPlan newPlan = migrator.upgradeToLatest(plan, plan.getVersion());
Assert.assertEquals(LATEST_COMPACTION_METADATA_VERSION, newPlan.getVersion()); assertEquals(LATEST_COMPACTION_METADATA_VERSION, newPlan.getVersion());
testFileSlicesCompactionPlanEquality(inputAndPlan.getKey(), newPlan); testFileSlicesCompactionPlanEquality(inputAndPlan.getKey(), newPlan);
HoodieCompactionPlan latestPlan = migrator.migrateToVersion(oldPlan, oldPlan.getVersion(), newPlan.getVersion()); HoodieCompactionPlan latestPlan = migrator.migrateToVersion(oldPlan, oldPlan.getVersion(), newPlan.getVersion());
testFileSlicesCompactionPlanEquality(inputAndPlan.getKey(), latestPlan); testFileSlicesCompactionPlanEquality(inputAndPlan.getKey(), latestPlan);
@@ -172,11 +174,11 @@ public class TestCompactionUtils extends HoodieCommonTestHarness {
.map(CompactionUtils::buildCompactionOperation) .map(CompactionUtils::buildCompactionOperation)
.map(CompactionUtils::buildHoodieCompactionOperation) .map(CompactionUtils::buildHoodieCompactionOperation)
.collect(Collectors.toList()); .collect(Collectors.toList());
Assert.assertTrue("Transformation did get tested", originalOps.size() > 0); assertTrue(originalOps.size() > 0, "Transformation did get tested");
Assert.assertEquals("All fields set correctly in transformations", originalOps, regeneratedOps); assertEquals(originalOps, regeneratedOps, "All fields set correctly in transformations");
} }
@Test(expected = IllegalStateException.class) @Test
public void testGetAllPendingCompactionOperationsWithDupFileId() throws IOException { public void testGetAllPendingCompactionOperationsWithDupFileId() throws IOException {
// Case where there is duplicate fileIds in compaction requests // Case where there is duplicate fileIds in compaction requests
HoodieCompactionPlan plan1 = createCompactionPlan(metaClient, "000", "001", 10, true, true); HoodieCompactionPlan plan1 = createCompactionPlan(metaClient, "000", "001", 10, true, true);
@@ -187,8 +189,9 @@ public class TestCompactionUtils extends HoodieCommonTestHarness {
plan1.getOperations().get(0).setDataFilePath("bla"); plan1.getOperations().get(0).setDataFilePath("bla");
scheduleCompaction(metaClient, "005", plan1); scheduleCompaction(metaClient, "005", plan1);
metaClient = new HoodieTableMetaClient(metaClient.getHadoopConf(), basePath, true); metaClient = new HoodieTableMetaClient(metaClient.getHadoopConf(), basePath, true);
Map<HoodieFileGroupId, Pair<String, HoodieCompactionOperation>> res = assertThrows(IllegalStateException.class, () -> {
CompactionUtils.getAllPendingCompactionOperations(metaClient); CompactionUtils.getAllPendingCompactionOperations(metaClient);
});
} }
@Test @Test
@@ -230,7 +233,7 @@ public class TestCompactionUtils extends HoodieCommonTestHarness {
* @param plan Compaction Plan * @param plan Compaction Plan
*/ */
private void testFileSlicesCompactionPlanEquality(List<Pair<String, FileSlice>> input, HoodieCompactionPlan plan) { private void testFileSlicesCompactionPlanEquality(List<Pair<String, FileSlice>> input, HoodieCompactionPlan plan) {
Assert.assertEquals("All file-slices present", input.size(), plan.getOperations().size()); assertEquals(input.size(), plan.getOperations().size(), "All file-slices present");
IntStream.range(0, input.size()).boxed().forEach(idx -> testFileSliceCompactionOpEquality(input.get(idx).getValue(), IntStream.range(0, input.size()).boxed().forEach(idx -> testFileSliceCompactionOpEquality(input.get(idx).getValue(),
plan.getOperations().get(idx), input.get(idx).getKey(), plan.getVersion())); plan.getOperations().get(idx), input.get(idx).getKey(), plan.getVersion()));
} }
@@ -244,19 +247,19 @@ public class TestCompactionUtils extends HoodieCommonTestHarness {
*/ */
private void testFileSliceCompactionOpEquality(FileSlice slice, HoodieCompactionOperation op, String expPartitionPath, private void testFileSliceCompactionOpEquality(FileSlice slice, HoodieCompactionOperation op, String expPartitionPath,
int version) { int version) {
Assert.assertEquals("Partition path is correct", expPartitionPath, op.getPartitionPath()); assertEquals(expPartitionPath, op.getPartitionPath(), "Partition path is correct");
Assert.assertEquals("Same base-instant", slice.getBaseInstantTime(), op.getBaseInstantTime()); assertEquals(slice.getBaseInstantTime(), op.getBaseInstantTime(), "Same base-instant");
Assert.assertEquals("Same file-id", slice.getFileId(), op.getFileId()); assertEquals(slice.getFileId(), op.getFileId(), "Same file-id");
if (slice.getBaseFile().isPresent()) { if (slice.getBaseFile().isPresent()) {
HoodieBaseFile df = slice.getBaseFile().get(); HoodieBaseFile df = slice.getBaseFile().get();
Assert.assertEquals("Same data-file", version == COMPACTION_METADATA_VERSION_1 ? df.getPath() : df.getFileName(), assertEquals(version == COMPACTION_METADATA_VERSION_1 ? df.getPath() : df.getFileName(),
op.getDataFilePath()); op.getDataFilePath(), "Same data-file");
} }
List<String> paths = slice.getLogFiles().map(l -> l.getPath().toString()).collect(Collectors.toList()); List<String> paths = slice.getLogFiles().map(l -> l.getPath().toString()).collect(Collectors.toList());
IntStream.range(0, paths.size()).boxed().forEach(idx -> Assert.assertEquals("Log File Index " + idx, IntStream.range(0, paths.size()).boxed().forEach(idx -> assertEquals(
version == COMPACTION_METADATA_VERSION_1 ? paths.get(idx) : new Path(paths.get(idx)).getName(), version == COMPACTION_METADATA_VERSION_1 ? paths.get(idx) : new Path(paths.get(idx)).getName(),
op.getDeltaFilePaths().get(idx))); op.getDeltaFilePaths().get(idx), "Log File Index " + idx));
Assert.assertEquals("Metrics set", METRICS, op.getMetrics()); assertEquals(METRICS, op.getMetrics(), "Metrics set");
} }
@Override @Override

View File

@@ -25,17 +25,17 @@ import org.apache.hudi.common.minicluster.HdfsTestService;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.AfterClass; import org.junit.jupiter.api.AfterAll;
import org.junit.BeforeClass; import org.junit.jupiter.api.BeforeAll;
import org.junit.Test; import org.junit.jupiter.api.Test;
import java.io.IOException; import java.io.IOException;
import java.io.PrintStream; import java.io.PrintStream;
import static org.junit.Assert.assertEquals; import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.Assert.fail; import static org.junit.jupiter.api.Assertions.assertTrue;
/** /**
* Tests basic functionality of {@link DFSPropertiesConfiguration}. * Tests basic functionality of {@link DFSPropertiesConfiguration}.
@@ -47,7 +47,7 @@ public class TestDFSPropertiesConfiguration {
private static MiniDFSCluster dfsCluster; private static MiniDFSCluster dfsCluster;
private static DistributedFileSystem dfs; private static DistributedFileSystem dfs;
@BeforeClass @BeforeAll
public static void initClass() throws Exception { public static void initClass() throws Exception {
hdfsTestService = new HdfsTestService(); hdfsTestService = new HdfsTestService();
dfsCluster = hdfsTestService.start(true); dfsCluster = hdfsTestService.start(true);
@@ -72,7 +72,7 @@ public class TestDFSPropertiesConfiguration {
writePropertiesFile(filePath, new String[] {"double.prop=838.3", "include = t4.props"}); writePropertiesFile(filePath, new String[] {"double.prop=838.3", "include = t4.props"});
} }
@AfterClass @AfterAll
public static void cleanupClass() throws Exception { public static void cleanupClass() throws Exception {
if (hdfsTestService != null) { if (hdfsTestService != null) {
hdfsTestService.stop(); hdfsTestService.stop();
@@ -93,12 +93,9 @@ public class TestDFSPropertiesConfiguration {
DFSPropertiesConfiguration cfg = new DFSPropertiesConfiguration(dfs, new Path(dfsBasePath + "/t1.props")); DFSPropertiesConfiguration cfg = new DFSPropertiesConfiguration(dfs, new Path(dfsBasePath + "/t1.props"));
TypedProperties props = cfg.getConfig(); TypedProperties props = cfg.getConfig();
assertEquals(5, props.size()); assertEquals(5, props.size());
try { assertThrows(IllegalArgumentException.class, () -> {
props.getString("invalid.key"); props.getString("invalid.key");
fail("Should error out here."); }, "Should error out here.");
} catch (IllegalArgumentException iae) {
// ignore
}
assertEquals(123, props.getInteger("int.prop")); assertEquals(123, props.getInteger("int.prop"));
assertEquals(113.4, props.getDouble("double.prop"), 0.001); assertEquals(113.4, props.getDouble("double.prop"), 0.001);
@@ -129,12 +126,8 @@ public class TestDFSPropertiesConfiguration {
assertTrue(props.getBoolean("boolean.prop")); assertTrue(props.getBoolean("boolean.prop"));
assertEquals("t3.value", props.getString("string.prop")); assertEquals("t3.value", props.getString("string.prop"));
assertEquals(1354354354, props.getLong("long.prop")); assertEquals(1354354354, props.getLong("long.prop"));
assertThrows(IllegalStateException.class, () -> {
try {
new DFSPropertiesConfiguration(dfs, new Path(dfsBasePath + "/t4.props")); new DFSPropertiesConfiguration(dfs, new Path(dfsBasePath + "/t4.props"));
fail("Should error out on a self-included file."); }, "Should error out on a self-included file.");
} catch (IllegalStateException ise) {
// ignore
}
} }
} }

View File

@@ -18,7 +18,7 @@
package org.apache.hudi.common.util; package org.apache.hudi.common.util;
import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5; import org.apache.hudi.common.testutils.HoodieCommonTestHarness;
import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Test;
@@ -35,7 +35,7 @@ import static org.junit.jupiter.api.Assertions.fail;
/** /**
* Tests file I/O utils. * Tests file I/O utils.
*/ */
public class TestFileIOUtils extends HoodieCommonTestHarnessJunit5 { public class TestFileIOUtils extends HoodieCommonTestHarness {
@Test @Test
public void testMkdirAndDelete() throws IOException { public void testMkdirAndDelete() throws IOException {

View File

@@ -18,12 +18,12 @@
package org.apache.hudi.common.util; package org.apache.hudi.common.util;
import org.junit.Test; import org.junit.jupiter.api.Test;
import java.util.Arrays; import java.util.Arrays;
import static org.junit.Assert.assertEquals; import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.Assert.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals;
/** /**
* Tests numeric utils. * Tests numeric utils.

View File

@@ -20,12 +20,12 @@ package org.apache.hudi.common.util;
import org.apache.hudi.avro.HoodieAvroUtils; import org.apache.hudi.avro.HoodieAvroUtils;
import org.apache.hudi.avro.HoodieAvroWriteSupport; import org.apache.hudi.avro.HoodieAvroWriteSupport;
import org.apache.hudi.common.HoodieCommonTestHarness;
import org.apache.hudi.common.bloom.BloomFilter; import org.apache.hudi.common.bloom.BloomFilter;
import org.apache.hudi.common.bloom.BloomFilterFactory; import org.apache.hudi.common.bloom.BloomFilterFactory;
import org.apache.hudi.common.bloom.BloomFilterTypeCode; import org.apache.hudi.common.bloom.BloomFilterTypeCode;
import org.apache.hudi.common.model.HoodieRecord; import org.apache.hudi.common.model.HoodieRecord;
import org.apache.hudi.common.model.HoodieTestUtils; import org.apache.hudi.common.model.HoodieTestUtils;
import org.apache.hudi.common.testutils.HoodieCommonTestHarness;
import org.apache.avro.Schema; import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericData;
@@ -34,58 +34,50 @@ import org.apache.hadoop.fs.Path;
import org.apache.parquet.avro.AvroSchemaConverter; import org.apache.parquet.avro.AvroSchemaConverter;
import org.apache.parquet.hadoop.ParquetWriter; import org.apache.parquet.hadoop.ParquetWriter;
import org.apache.parquet.hadoop.metadata.CompressionCodecName; import org.apache.parquet.hadoop.metadata.CompressionCodecName;
import org.junit.Before; import org.junit.jupiter.api.BeforeEach;
import org.junit.Test; import org.junit.jupiter.params.ParameterizedTest;
import org.junit.runner.RunWith; import org.junit.jupiter.params.provider.Arguments;
import org.junit.runners.Parameterized; import org.junit.jupiter.params.provider.MethodSource;
import org.junit.runners.Parameterized.Parameters;
import java.nio.file.Paths;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
import java.util.UUID; import java.util.UUID;
import static org.junit.Assert.assertEquals; import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.jupiter.api.Assertions.assertTrue;
/** /**
* Tests parquet utils. * Tests parquet utils.
*/ */
@RunWith(Parameterized.class)
public class TestParquetUtils extends HoodieCommonTestHarness { public class TestParquetUtils extends HoodieCommonTestHarness {
String bloomFilterTypeToTest; public static List<Arguments> bloomFilterTypeCodes() {
return Arrays.asList(
@Parameters() Arguments.of(BloomFilterTypeCode.SIMPLE.name()),
public static Collection<Object[]> data() { Arguments.of(BloomFilterTypeCode.DYNAMIC_V0.name())
return Arrays.asList(new Object[][] { );
{BloomFilterTypeCode.SIMPLE.name()},
{BloomFilterTypeCode.DYNAMIC_V0.name()}
});
} }
public TestParquetUtils(String bloomFilterTypeToTest) { @BeforeEach
this.bloomFilterTypeToTest = bloomFilterTypeToTest;
}
@Before
public void setup() { public void setup() {
initPath(); initPath();
} }
@Test @ParameterizedTest
public void testHoodieWriteSupport() throws Exception { @MethodSource("bloomFilterTypeCodes")
public void testHoodieWriteSupport(String typeCode) throws Exception {
List<String> rowKeys = new ArrayList<>(); List<String> rowKeys = new ArrayList<>();
for (int i = 0; i < 1000; i++) { for (int i = 0; i < 1000; i++) {
rowKeys.add(UUID.randomUUID().toString()); rowKeys.add(UUID.randomUUID().toString());
} }
String filePath = basePath + "/test.parquet"; String filePath = Paths.get(basePath, "test.parquet").toString();
writeParquetFile(filePath, rowKeys); writeParquetFile(typeCode, filePath, rowKeys);
// Read and verify // Read and verify
List<String> rowKeysInFile = new ArrayList<>( List<String> rowKeysInFile = new ArrayList<>(
@@ -93,16 +85,17 @@ public class TestParquetUtils extends HoodieCommonTestHarness {
Collections.sort(rowKeysInFile); Collections.sort(rowKeysInFile);
Collections.sort(rowKeys); Collections.sort(rowKeys);
assertEquals("Did not read back the expected list of keys", rowKeys, rowKeysInFile); assertEquals(rowKeys, rowKeysInFile, "Did not read back the expected list of keys");
BloomFilter filterInFile = BloomFilter filterInFile =
ParquetUtils.readBloomFilterFromParquetMetadata(HoodieTestUtils.getDefaultHadoopConf(), new Path(filePath)); ParquetUtils.readBloomFilterFromParquetMetadata(HoodieTestUtils.getDefaultHadoopConf(), new Path(filePath));
for (String rowKey : rowKeys) { for (String rowKey : rowKeys) {
assertTrue("key should be found in bloom filter", filterInFile.mightContain(rowKey)); assertTrue(filterInFile.mightContain(rowKey), "key should be found in bloom filter");
} }
} }
@Test @ParameterizedTest
public void testFilterParquetRowKeys() throws Exception { @MethodSource("bloomFilterTypeCodes")
public void testFilterParquetRowKeys(String typeCode) throws Exception {
List<String> rowKeys = new ArrayList<>(); List<String> rowKeys = new ArrayList<>();
Set<String> filter = new HashSet<>(); Set<String> filter = new HashSet<>();
for (int i = 0; i < 1000; i++) { for (int i = 0; i < 1000; i++) {
@@ -113,25 +106,25 @@ public class TestParquetUtils extends HoodieCommonTestHarness {
} }
} }
String filePath = basePath + "/test.parquet"; String filePath = Paths.get(basePath, "test.parquet").toString();
writeParquetFile(filePath, rowKeys); writeParquetFile(typeCode, filePath, rowKeys);
// Read and verify // Read and verify
Set<String> filtered = Set<String> filtered =
ParquetUtils.filterParquetRowKeys(HoodieTestUtils.getDefaultHadoopConf(), new Path(filePath), filter); ParquetUtils.filterParquetRowKeys(HoodieTestUtils.getDefaultHadoopConf(), new Path(filePath), filter);
assertEquals("Filtered count does not match", filter.size(), filtered.size()); assertEquals(filter.size(), filtered.size(), "Filtered count does not match");
for (String rowKey : filtered) { for (String rowKey : filtered) {
assertTrue("filtered key must be in the given filter", filter.contains(rowKey)); assertTrue(filter.contains(rowKey), "filtered key must be in the given filter");
} }
} }
private void writeParquetFile(String filePath, List<String> rowKeys) throws Exception { private void writeParquetFile(String typeCode, String filePath, List<String> rowKeys) throws Exception {
// Write out a parquet file // Write out a parquet file
Schema schema = HoodieAvroUtils.getRecordKeySchema(); Schema schema = HoodieAvroUtils.getRecordKeySchema();
BloomFilter filter = BloomFilterFactory BloomFilter filter = BloomFilterFactory
.createBloomFilter(1000, 0.0001, 10000, bloomFilterTypeToTest); .createBloomFilter(1000, 0.0001, 10000, typeCode);
HoodieAvroWriteSupport writeSupport = HoodieAvroWriteSupport writeSupport =
new HoodieAvroWriteSupport(new AvroSchemaConverter().convert(schema), schema, filter); new HoodieAvroWriteSupport(new AvroSchemaConverter().convert(schema), schema, filter);
ParquetWriter writer = new ParquetWriter(new Path(filePath), writeSupport, CompressionCodecName.GZIP, ParquetWriter writer = new ParquetWriter(new Path(filePath), writeSupport, CompressionCodecName.GZIP,

View File

@@ -19,14 +19,18 @@
package org.apache.hudi.common.util; package org.apache.hudi.common.util;
import org.apache.avro.util.Utf8; import org.apache.avro.util.Utf8;
import org.junit.Assert; import org.junit.jupiter.api.Test;
import org.junit.Test;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.Objects; import java.util.Objects;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
/** /**
* Tests serialization utils. * Tests serialization utils.
*/ */
@@ -50,13 +54,14 @@ public class TestSerializationUtils {
private <T> void verifyObject(T expectedValue) throws IOException { private <T> void verifyObject(T expectedValue) throws IOException {
byte[] serializedObject = SerializationUtils.serialize(expectedValue); byte[] serializedObject = SerializationUtils.serialize(expectedValue);
Assert.assertTrue(serializedObject != null && serializedObject.length > 0); assertNotNull(serializedObject);
assertTrue(serializedObject.length > 0);
final T deserializedValue = SerializationUtils.<T>deserialize(serializedObject); final T deserializedValue = SerializationUtils.<T>deserialize(serializedObject);
if (expectedValue == null) { if (expectedValue == null) {
Assert.assertNull(deserializedValue); assertNull(deserializedValue);
} else { } else {
Assert.assertTrue(expectedValue.equals(deserializedValue)); assertEquals(expectedValue, deserializedValue);
} }
} }

View File

@@ -18,12 +18,12 @@
package org.apache.hudi.common.util; package org.apache.hudi.common.util;
import org.junit.Test; import org.junit.jupiter.api.Test;
import static org.junit.Assert.assertEquals; import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.Assert.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.Assert.assertNull; import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.Assert.assertTrue; import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestStringUtils { public class TestStringUtils {

View File

@@ -19,13 +19,13 @@
package org.apache.hudi.common.util.collection; package org.apache.hudi.common.util.collection;
import org.apache.hudi.avro.HoodieAvroUtils; import org.apache.hudi.avro.HoodieAvroUtils;
import org.apache.hudi.common.HoodieCommonTestHarness;
import org.apache.hudi.common.model.AvroBinaryTestPayload; import org.apache.hudi.common.model.AvroBinaryTestPayload;
import org.apache.hudi.common.model.HoodieAvroPayload; import org.apache.hudi.common.model.HoodieAvroPayload;
import org.apache.hudi.common.model.HoodieKey; import org.apache.hudi.common.model.HoodieKey;
import org.apache.hudi.common.model.HoodieRecord; import org.apache.hudi.common.model.HoodieRecord;
import org.apache.hudi.common.model.HoodieRecordPayload; import org.apache.hudi.common.model.HoodieRecordPayload;
import org.apache.hudi.common.table.timeline.HoodieActiveTimeline; import org.apache.hudi.common.table.timeline.HoodieActiveTimeline;
import org.apache.hudi.common.testutils.HoodieCommonTestHarness;
import org.apache.hudi.common.util.HoodieRecordSizeEstimator; import org.apache.hudi.common.util.HoodieRecordSizeEstimator;
import org.apache.hudi.common.util.Option; import org.apache.hudi.common.util.Option;
import org.apache.hudi.common.util.SchemaTestUtil; import org.apache.hudi.common.util.SchemaTestUtil;
@@ -35,9 +35,9 @@ import org.apache.hudi.common.util.SpillableMapUtils;
import org.apache.avro.Schema; import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord; import org.apache.avro.generic.GenericRecord;
import org.apache.avro.generic.IndexedRecord; import org.apache.avro.generic.IndexedRecord;
import org.junit.Before; import org.junit.jupiter.api.BeforeEach;
import org.junit.Ignore; import org.junit.jupiter.api.Disabled;
import org.junit.Test; import org.junit.jupiter.api.Test;
import java.io.IOException; import java.io.IOException;
import java.io.UncheckedIOException; import java.io.UncheckedIOException;
@@ -53,15 +53,15 @@ import java.util.UUID;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import static org.apache.hudi.common.util.SchemaTestUtil.getSimpleSchema; import static org.apache.hudi.common.util.SchemaTestUtil.getSimpleSchema;
import static org.junit.Assert.assertEquals; import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.jupiter.api.Assertions.assertTrue;
/** /**
* Tests dis based map {@link DiskBasedMap}. * Tests dis based map {@link DiskBasedMap}.
*/ */
public class TestDiskBasedMap extends HoodieCommonTestHarness { public class TestDiskBasedMap extends HoodieCommonTestHarness {
@Before @BeforeEach
public void setup() { public void setup() {
initPath(); initPath();
} }
@@ -212,7 +212,7 @@ public class TestDiskBasedMap extends HoodieCommonTestHarness {
/** /**
* @na: Leaving this test here for a quick performance test * @na: Leaving this test here for a quick performance test
*/ */
@Ignore @Disabled
@Test @Test
public void testSizeEstimatorPerformance() throws IOException, URISyntaxException { public void testSizeEstimatorPerformance() throws IOException, URISyntaxException {
// Test sizeEstimatorPerformance with simpleSchema // Test sizeEstimatorPerformance with simpleSchema

View File

@@ -19,12 +19,12 @@
package org.apache.hudi.common.util.collection; package org.apache.hudi.common.util.collection;
import org.apache.hudi.avro.HoodieAvroUtils; import org.apache.hudi.avro.HoodieAvroUtils;
import org.apache.hudi.common.HoodieCommonTestHarness;
import org.apache.hudi.common.model.HoodieAvroPayload; import org.apache.hudi.common.model.HoodieAvroPayload;
import org.apache.hudi.common.model.HoodieKey; import org.apache.hudi.common.model.HoodieKey;
import org.apache.hudi.common.model.HoodieRecord; import org.apache.hudi.common.model.HoodieRecord;
import org.apache.hudi.common.model.HoodieRecordPayload; import org.apache.hudi.common.model.HoodieRecordPayload;
import org.apache.hudi.common.table.timeline.HoodieActiveTimeline; import org.apache.hudi.common.table.timeline.HoodieActiveTimeline;
import org.apache.hudi.common.testutils.HoodieCommonTestHarness;
import org.apache.hudi.common.util.DefaultSizeEstimator; import org.apache.hudi.common.util.DefaultSizeEstimator;
import org.apache.hudi.common.util.HoodieRecordSizeEstimator; import org.apache.hudi.common.util.HoodieRecordSizeEstimator;
import org.apache.hudi.common.util.Option; import org.apache.hudi.common.util.Option;
@@ -34,10 +34,10 @@ import org.apache.hudi.common.util.SpillableMapTestUtils;
import org.apache.avro.Schema; import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord; import org.apache.avro.generic.GenericRecord;
import org.apache.avro.generic.IndexedRecord; import org.apache.avro.generic.IndexedRecord;
import org.junit.Before; import org.junit.jupiter.api.BeforeEach;
import org.junit.FixMethodOrder; import org.junit.jupiter.api.MethodOrderer.Alphanumeric;
import org.junit.Test; import org.junit.jupiter.api.Test;
import org.junit.runners.MethodSorters; import org.junit.jupiter.api.TestMethodOrder;
import java.io.IOException; import java.io.IOException;
import java.io.UncheckedIOException; import java.io.UncheckedIOException;
@@ -46,19 +46,20 @@ import java.util.ArrayList;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import static org.junit.Assert.assertEquals; import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
/** /**
* Tests external spillable map {@link ExternalSpillableMap}. * Tests external spillable map {@link ExternalSpillableMap}.
*/ */
@FixMethodOrder(MethodSorters.NAME_ASCENDING) @TestMethodOrder(Alphanumeric.class)
public class TestExternalSpillableMap extends HoodieCommonTestHarness { public class TestExternalSpillableMap extends HoodieCommonTestHarness {
private static String failureOutputPath; private static String failureOutputPath;
@Before @BeforeEach
public void setUp() { public void setUp() {
initPath(); initPath();
failureOutputPath = basePath + "/test_fail"; failureOutputPath = basePath + "/test_fail";
@@ -175,7 +176,7 @@ public class TestExternalSpillableMap extends HoodieCommonTestHarness {
assertTrue(records.size() == 0); assertTrue(records.size() == 0);
} }
@Test(expected = IOException.class) @Test
public void simpleTestWithException() throws IOException, URISyntaxException { public void simpleTestWithException() throws IOException, URISyntaxException {
Schema schema = HoodieAvroUtils.addMetadataFields(SchemaTestUtil.getSimpleSchema()); Schema schema = HoodieAvroUtils.addMetadataFields(SchemaTestUtil.getSimpleSchema());
@@ -186,9 +187,11 @@ public class TestExternalSpillableMap extends HoodieCommonTestHarness {
List<String> recordKeys = SpillableMapTestUtils.upsertRecords(iRecords, records); List<String> recordKeys = SpillableMapTestUtils.upsertRecords(iRecords, records);
assert (recordKeys.size() == 100); assert (recordKeys.size() == 100);
Iterator<HoodieRecord<? extends HoodieRecordPayload>> itr = records.iterator(); Iterator<HoodieRecord<? extends HoodieRecordPayload>> itr = records.iterator();
assertThrows(IOException.class, () -> {
while (itr.hasNext()) { while (itr.hasNext()) {
throw new IOException("Testing failures..."); throw new IOException("Testing failures...");
} }
});
} }
@Test @Test

View File

@@ -20,10 +20,9 @@ package org.apache.hudi.common.util.collection;
import org.apache.hudi.common.table.view.FileSystemViewStorageConfig; import org.apache.hudi.common.table.view.FileSystemViewStorageConfig;
import org.junit.After; import org.junit.jupiter.api.AfterEach;
import org.junit.Assert; import org.junit.jupiter.api.BeforeEach;
import org.junit.Before; import org.junit.jupiter.api.Test;
import org.junit.Test;
import java.io.File; import java.io.File;
import java.io.Serializable; import java.io.Serializable;
@@ -38,6 +37,11 @@ import java.util.UUID;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import java.util.stream.IntStream; import java.util.stream.IntStream;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
/** /**
* Tests RocksDB manager {@link RocksDBDAO}. * Tests RocksDB manager {@link RocksDBDAO}.
*/ */
@@ -45,13 +49,13 @@ public class TestRocksDBManager {
private RocksDBDAO dbManager; private RocksDBDAO dbManager;
@Before @BeforeEach
public void setUpClass() { public void setUpClass() {
dbManager = new RocksDBDAO("/dummy/path/" + UUID.randomUUID().toString(), dbManager = new RocksDBDAO("/dummy/path/" + UUID.randomUUID().toString(),
FileSystemViewStorageConfig.newBuilder().build().newBuilder().build().getRocksdbBasePath()); FileSystemViewStorageConfig.newBuilder().build().newBuilder().build().getRocksdbBasePath());
} }
@After @AfterEach
public void tearDownClass() { public void tearDownClass() {
if (dbManager != null) { if (dbManager != null) {
dbManager.close(); dbManager.close();
@@ -104,35 +108,36 @@ public class TestRocksDBManager {
List<Pair<String, Payload>> gotPayloads = List<Pair<String, Payload>> gotPayloads =
dbManager.<Payload>prefixSearch(family, prefix).collect(Collectors.toList()); dbManager.<Payload>prefixSearch(family, prefix).collect(Collectors.toList());
Integer expCount = countsMap.get(family).get(prefix); Integer expCount = countsMap.get(family).get(prefix);
Assert.assertEquals("Size check for prefix (" + prefix + ") and family (" + family + ")", assertEquals(expCount == null ? 0L : expCount.longValue(), gotPayloads.size(),
expCount == null ? 0L : expCount.longValue(), gotPayloads.size()); "Size check for prefix (" + prefix + ") and family (" + family + ")");
gotPayloads.forEach(p -> { gotPayloads.forEach(p -> {
Assert.assertEquals(p.getRight().getFamily(), family); assertEquals(p.getRight().getFamily(), family);
Assert.assertTrue(p.getRight().getKey().toString().startsWith(prefix)); assertTrue(p.getRight().getKey().toString().startsWith(prefix));
}); });
}); });
}); });
payloads.stream().filter(p -> !p.getPrefix().equalsIgnoreCase(prefix1)).forEach(payload -> { payloads.stream().filter(p -> !p.getPrefix().equalsIgnoreCase(prefix1)).forEach(payload -> {
Payload p = dbManager.get(payload.getFamily(), payload.getKey()); Payload p = dbManager.get(payload.getFamily(), payload.getKey());
Assert.assertEquals("Retrieved correct payload for key :" + payload.getKey(), payload, p); assertEquals(payload, p, "Retrieved correct payload for key :" + payload.getKey());
dbManager.delete(payload.getFamily(), payload.getKey()); dbManager.delete(payload.getFamily(), payload.getKey());
Payload p2 = dbManager.get(payload.getFamily(), payload.getKey()); Payload p2 = dbManager.get(payload.getFamily(), payload.getKey());
Assert.assertNull("Retrieved correct payload for key :" + payload.getKey(), p2); assertNull(p2, "Retrieved correct payload for key :" + payload.getKey());
}); });
colFamilies.forEach(family -> { colFamilies.forEach(family -> {
dbManager.prefixDelete(family, prefix1); dbManager.prefixDelete(family, prefix1);
int got = dbManager.prefixSearch(family, prefix1).collect(Collectors.toList()).size(); int got = dbManager.prefixSearch(family, prefix1).collect(Collectors.toList()).size();
Assert.assertEquals("Expected prefix delete to leave at least one item for family: " + family, countsMap.get(family).get(prefix1) == null ? 0 : 1, got); assertEquals(countsMap.get(family).get(prefix1) == null ? 0 : 1, got,
"Expected prefix delete to leave at least one item for family: " + family);
}); });
payloads.stream().filter(p -> !p.getPrefix().equalsIgnoreCase(prefix1)).forEach(payload -> { payloads.stream().filter(p -> !p.getPrefix().equalsIgnoreCase(prefix1)).forEach(payload -> {
Payload p2 = dbManager.get(payload.getFamily(), payload.getKey()); Payload p2 = dbManager.get(payload.getFamily(), payload.getKey());
Assert.assertNull("Retrieved correct payload for key :" + payload.getKey(), p2); assertNull(p2, "Retrieved correct payload for key :" + payload.getKey());
}); });
// Now do a prefix search // Now do a prefix search
@@ -140,14 +145,14 @@ public class TestRocksDBManager {
prefixes.stream().filter(p -> !p.equalsIgnoreCase(prefix1)).forEach(prefix -> { prefixes.stream().filter(p -> !p.equalsIgnoreCase(prefix1)).forEach(prefix -> {
List<Pair<String, Payload>> gotPayloads = List<Pair<String, Payload>> gotPayloads =
dbManager.<Payload>prefixSearch(family, prefix).collect(Collectors.toList()); dbManager.<Payload>prefixSearch(family, prefix).collect(Collectors.toList());
Assert.assertEquals("Size check for prefix (" + prefix + ") and family (" + family + ")", 0, assertEquals(0, gotPayloads.size(),
gotPayloads.size()); "Size check for prefix (" + prefix + ") and family (" + family + ")");
}); });
}); });
String rocksDBBasePath = dbManager.getRocksDBBasePath(); String rocksDBBasePath = dbManager.getRocksDBBasePath();
dbManager.close(); dbManager.close();
Assert.assertFalse(new File(rocksDBBasePath).exists()); assertFalse(new File(rocksDBBasePath).exists());
} }
@Test @Test
@@ -196,26 +201,26 @@ public class TestRocksDBManager {
payloads.forEach(payload -> { payloads.forEach(payload -> {
Payload p = dbManager.get(payload.getFamily(), payload.getKey()); Payload p = dbManager.get(payload.getFamily(), payload.getKey());
Assert.assertEquals("Retrieved correct payload for key :" + payload.getKey(), payload, p); assertEquals(payload, p, "Retrieved correct payload for key :" + payload.getKey());
}); });
payloadSplits.next().forEach(payload -> { payloadSplits.next().forEach(payload -> {
dbManager.delete(payload.getFamily(), payload.getKey()); dbManager.delete(payload.getFamily(), payload.getKey());
Payload want = dbManager.get(payload.getFamily(), payload.getKey()); Payload want = dbManager.get(payload.getFamily(), payload.getKey());
Assert.assertNull("Verify deleted during single delete for key :" + payload.getKey(), want); assertNull(want, "Verify deleted during single delete for key :" + payload.getKey());
}); });
dbManager.writeBatch(batch -> { dbManager.writeBatch(batch -> {
payloadSplits.next().forEach(payload -> { payloadSplits.next().forEach(payload -> {
dbManager.deleteInBatch(batch, payload.getFamily(), payload.getKey()); dbManager.deleteInBatch(batch, payload.getFamily(), payload.getKey());
Payload want = dbManager.get(payload.getFamily(), payload.getKey()); Payload want = dbManager.get(payload.getFamily(), payload.getKey());
Assert.assertEquals("Verify not deleted during batch delete in progress for key :" + payload.getKey(), payload, want); assertEquals(payload, want, "Verify not deleted during batch delete in progress for key :" + payload.getKey());
}); });
}); });
payloads.forEach(payload -> { payloads.forEach(payload -> {
Payload want = dbManager.get(payload.getFamily(), payload.getKey()); Payload want = dbManager.get(payload.getFamily(), payload.getKey());
Assert.assertNull("Verify delete for key :" + payload.getKey(), want); assertNull(want, "Verify delete for key :" + payload.getKey());
}); });
// Now do a prefix search // Now do a prefix search
@@ -223,14 +228,14 @@ public class TestRocksDBManager {
prefixes.forEach(prefix -> { prefixes.forEach(prefix -> {
List<Pair<String, Payload>> gotPayloads = List<Pair<String, Payload>> gotPayloads =
dbManager.<Payload>prefixSearch(family, prefix).collect(Collectors.toList()); dbManager.<Payload>prefixSearch(family, prefix).collect(Collectors.toList());
Assert.assertEquals("Size check for prefix (" + prefix + ") and family (" + family + ")", 0, assertEquals(0, gotPayloads.size(),
gotPayloads.size()); "Size check for prefix (" + prefix + ") and family (" + family + ")");
}); });
}); });
String rocksDBBasePath = dbManager.getRocksDBBasePath(); String rocksDBBasePath = dbManager.getRocksDBBasePath();
dbManager.close(); dbManager.close();
Assert.assertFalse(new File(rocksDBBasePath).exists()); assertFalse(new File(rocksDBBasePath).exists());
} }
public static class PayloadKey implements Serializable { public static class PayloadKey implements Serializable {

View File

@@ -18,17 +18,16 @@
package org.apache.hudi.common.util.collection; package org.apache.hudi.common.util.collection;
import org.apache.hudi.common.HoodieCommonTestHarness;
import org.apache.hudi.common.model.HoodieRecord; import org.apache.hudi.common.model.HoodieRecord;
import org.apache.hudi.common.model.HoodieRecordPayload; import org.apache.hudi.common.model.HoodieRecordPayload;
import org.apache.hudi.common.testutils.HoodieCommonTestHarness;
import org.apache.hudi.common.util.SchemaTestUtil; import org.apache.hudi.common.util.SchemaTestUtil;
import org.apache.hudi.common.util.SpillableMapTestUtils; import org.apache.hudi.common.util.SpillableMapTestUtils;
import org.apache.avro.generic.GenericRecord; import org.apache.avro.generic.GenericRecord;
import org.apache.avro.generic.IndexedRecord; import org.apache.avro.generic.IndexedRecord;
import org.junit.Assert; import org.junit.jupiter.api.BeforeEach;
import org.junit.Before; import org.junit.jupiter.api.Test;
import org.junit.Test;
import java.io.IOException; import java.io.IOException;
import java.net.URISyntaxException; import java.net.URISyntaxException;
@@ -36,12 +35,14 @@ import java.util.ArrayList;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import static org.junit.jupiter.api.Assertions.assertEquals;
/** /**
* Tests RocksDB based map {@link RocksDBBasedMap}. * Tests RocksDB based map {@link RocksDBBasedMap}.
*/ */
public class TestRocksDbBasedMap extends HoodieCommonTestHarness { public class TestRocksDbBasedMap extends HoodieCommonTestHarness {
@Before @BeforeEach
public void setUp() { public void setUp() {
initPath(); initPath();
} }
@@ -61,6 +62,6 @@ public class TestRocksDbBasedMap extends HoodieCommonTestHarness {
oRecords.add(rec); oRecords.add(rec);
assert recordKeys.contains(rec.getRecordKey()); assert recordKeys.contains(rec.getRecordKey());
} }
Assert.assertEquals(recordKeys.size(), oRecords.size()); assertEquals(recordKeys.size(), oRecords.size());
} }
} }

View File

@@ -20,7 +20,7 @@ package org.apache.hudi.hadoop;
import org.apache.hudi.common.model.HoodieTestUtils; import org.apache.hudi.common.model.HoodieTestUtils;
import org.apache.hudi.common.table.HoodieTableMetaClient; import org.apache.hudi.common.table.HoodieTableMetaClient;
import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5; import org.apache.hudi.common.testutils.HoodieCommonTestHarness;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeEach;
@@ -37,7 +37,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
/** /**
* *
*/ */
public class TestHoodieROTablePathFilter extends HoodieCommonTestHarnessJunit5 { public class TestHoodieROTablePathFilter extends HoodieCommonTestHarness {
@BeforeEach @BeforeEach
public void setUp() throws Exception { public void setUp() throws Exception {

View File

@@ -23,7 +23,7 @@ import org.apache.hudi.common.minicluster.MiniClusterUtil;
import org.apache.hudi.common.model.HoodieTableType; import org.apache.hudi.common.model.HoodieTableType;
import org.apache.hudi.common.model.HoodieTestUtils; import org.apache.hudi.common.model.HoodieTestUtils;
import org.apache.hudi.common.table.log.HoodieLogFormat; import org.apache.hudi.common.table.log.HoodieLogFormat;
import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5; import org.apache.hudi.common.testutils.HoodieCommonTestHarness;
import org.apache.hudi.common.util.SchemaTestUtil; import org.apache.hudi.common.util.SchemaTestUtil;
import org.apache.hudi.hadoop.InputFormatTestUtil; import org.apache.hudi.hadoop.InputFormatTestUtil;
import org.apache.hudi.hadoop.hive.HoodieCombineHiveInputFormat; import org.apache.hudi.hadoop.hive.HoodieCombineHiveInputFormat;
@@ -58,7 +58,7 @@ import static org.apache.hadoop.hive.ql.exec.Utilities.MAPRED_MAPPER_CLASS;
import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestHoodieCombineHiveInputFormat extends HoodieCommonTestHarnessJunit5 { public class TestHoodieCombineHiveInputFormat extends HoodieCommonTestHarness {
private JobConf jobConf; private JobConf jobConf;
private FileSystem fs; private FileSystem fs;
@@ -80,7 +80,7 @@ public class TestHoodieCombineHiveInputFormat extends HoodieCommonTestHarnessJun
this.fs = MiniClusterUtil.fileSystem; this.fs = MiniClusterUtil.fileSystem;
jobConf = new JobConf(); jobConf = new JobConf();
hadoopConf = HoodieTestUtils.getDefaultHadoopConf(); hadoopConf = HoodieTestUtils.getDefaultHadoopConf();
assertTrue(fs.mkdirs(new Path(folder.getRoot().getPath()))); assertTrue(fs.mkdirs(new Path(tempDir.toAbsolutePath().toString())));
HoodieTestUtils.init(MiniClusterUtil.configuration, tempDir.toAbsolutePath().toString(), HoodieTableType.MERGE_ON_READ); HoodieTestUtils.init(MiniClusterUtil.configuration, tempDir.toAbsolutePath().toString(), HoodieTableType.MERGE_ON_READ);
} }

View File

@@ -59,7 +59,7 @@ import org.apache.zookeeper.server.ZooKeeperServer;
import org.joda.time.DateTime; import org.joda.time.DateTime;
import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormat;
import org.joda.time.format.DateTimeFormatter; import org.joda.time.format.DateTimeFormatter;
import org.junit.runners.model.InitializationError; import org.junit.platform.commons.JUnitException;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
@@ -156,7 +156,7 @@ public class TestUtil {
} }
static void createCOWTable(String instantTime, int numberOfPartitions) static void createCOWTable(String instantTime, int numberOfPartitions)
throws IOException, InitializationError, URISyntaxException { throws IOException, URISyntaxException {
Path path = new Path(hiveSyncConfig.basePath); Path path = new Path(hiveSyncConfig.basePath);
FileIOUtils.deleteDirectory(new File(hiveSyncConfig.basePath)); FileIOUtils.deleteDirectory(new File(hiveSyncConfig.basePath));
HoodieTableMetaClient.initTableType(configuration, hiveSyncConfig.basePath, HoodieTableType.COPY_ON_WRITE, HoodieTableMetaClient.initTableType(configuration, hiveSyncConfig.basePath, HoodieTableType.COPY_ON_WRITE,
@@ -170,8 +170,7 @@ public class TestUtil {
} }
static void createMORTable(String commitTime, String deltaCommitTime, int numberOfPartitions, static void createMORTable(String commitTime, String deltaCommitTime, int numberOfPartitions,
boolean createDeltaCommit) boolean createDeltaCommit) throws IOException, URISyntaxException, InterruptedException {
throws IOException, InitializationError, URISyntaxException, InterruptedException {
Path path = new Path(hiveSyncConfig.basePath); Path path = new Path(hiveSyncConfig.basePath);
FileIOUtils.deleteDirectory(new File(hiveSyncConfig.basePath)); FileIOUtils.deleteDirectory(new File(hiveSyncConfig.basePath));
HoodieTableMetaClient.initTableType(configuration, hiveSyncConfig.basePath, HoodieTableType.MERGE_ON_READ, HoodieTableMetaClient.initTableType(configuration, hiveSyncConfig.basePath, HoodieTableType.MERGE_ON_READ,
@@ -312,9 +311,9 @@ public class TestUtil {
return logWriter.getLogFile(); return logWriter.getLogFile();
} }
private static void checkResult(boolean result) throws InitializationError { private static void checkResult(boolean result) {
if (!result) { if (!result) {
throw new InitializationError("Could not initialize"); throw new JUnitException("Could not initialize");
} }
} }

View File

@@ -21,7 +21,7 @@ package org.apache.hudi.utilities;
import org.apache.hudi.common.HoodieTestDataGenerator; import org.apache.hudi.common.HoodieTestDataGenerator;
import org.apache.hudi.common.fs.FSUtils; import org.apache.hudi.common.fs.FSUtils;
import org.apache.hudi.common.model.HoodieTestUtils; import org.apache.hudi.common.model.HoodieTestUtils;
import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5; import org.apache.hudi.common.testutils.HoodieCommonTestHarness;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@@ -39,7 +39,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertTrue;
public class TestHoodieSnapshotCopier extends HoodieCommonTestHarnessJunit5 { public class TestHoodieSnapshotCopier extends HoodieCommonTestHarness {
private static final String TEST_WRITE_TOKEN = "1-0-1"; private static final String TEST_WRITE_TOKEN = "1-0-1";

View File

@@ -20,7 +20,7 @@ package org.apache.hudi.utilities.checkpointing;
import org.apache.hudi.common.config.TypedProperties; import org.apache.hudi.common.config.TypedProperties;
import org.apache.hudi.common.model.HoodieTestUtils; import org.apache.hudi.common.model.HoodieTestUtils;
import org.apache.hudi.common.testutils.HoodieCommonTestHarnessJunit5; import org.apache.hudi.common.testutils.HoodieCommonTestHarness;
import org.apache.hudi.exception.HoodieException; import org.apache.hudi.exception.HoodieException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@@ -32,7 +32,7 @@ import java.io.File;
import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertThrows;
public class TestKafkaConnectHdfsProvider extends HoodieCommonTestHarnessJunit5 { public class TestKafkaConnectHdfsProvider extends HoodieCommonTestHarness {
private String topicPath = null; private String topicPath = null;
private Configuration hadoopConf = null; private Configuration hadoopConf = null;

View File

@@ -82,7 +82,6 @@
<kafka.version>2.0.0</kafka.version> <kafka.version>2.0.0</kafka.version>
<glassfish.version>2.17</glassfish.version> <glassfish.version>2.17</glassfish.version>
<parquet.version>1.10.1</parquet.version> <parquet.version>1.10.1</parquet.version>
<junit.version>4.12</junit.version>
<junit.jupiter.version>5.6.1</junit.jupiter.version> <junit.jupiter.version>5.6.1</junit.jupiter.version>
<junit.vintage.version>5.6.1</junit.vintage.version> <junit.vintage.version>5.6.1</junit.vintage.version>
<mockito.jupiter.version>3.3.3</mockito.jupiter.version> <mockito.jupiter.version>3.3.3</mockito.jupiter.version>
@@ -815,13 +814,6 @@
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>${junit.version}</version>
<scope>test</scope>
</dependency>
<dependency> <dependency>
<groupId>org.junit.jupiter</groupId> <groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-api</artifactId> <artifactId>junit-jupiter-api</artifactId>

View File

@@ -264,8 +264,10 @@
</module> </module>
<module name="CommentsIndentation"/> <module name="CommentsIndentation"/>
<module name="IllegalImport"> <module name="IllegalImport">
<property name="illegalPkgs" value="org.apache.commons, com.google.common" /> <property name="regexp" value="true"/>
<property name="illegalClasses" value="java.util.Optional" /> <property name="illegalPkgs" value="org\.apache\.commons, com\.google\.common"/>
<property name="illegalClasses"
value="^java\.util\.Optional, ^org\.junit\.(?!jupiter|platform|contrib|Rule)(.*)"/>
</module> </module>
<module name="EmptyStatement" /> <module name="EmptyStatement" />