1
0

Remove stateful fs member from HoodieTestUtils & FSUtils

This commit is contained in:
vinothchandar
2018-01-03 16:05:30 -08:00
committed by vinoth chandar
parent cf7f7aabb9
commit 21ce846f18
19 changed files with 74 additions and 103 deletions

View File

@@ -102,7 +102,7 @@ public class TestHoodieClientOnCopyOnWriteStorage implements Serializable {
folder.create();
basePath = folder.getRoot().getAbsolutePath();
fs = FSUtils.getFs(basePath.toString(), jsc.hadoopConfiguration());
HoodieTestUtils.init(basePath);
HoodieTestUtils.init(fs, basePath);
dataGen = new HoodieTestDataGenerator();
}
@@ -1247,27 +1247,27 @@ public class TestHoodieClientOnCopyOnWriteStorage implements Serializable {
.retainFileVersions(1).build()).build();
HoodieTableMetaClient metaClient = HoodieTestUtils
.initTableType(basePath, HoodieTableType.MERGE_ON_READ);
.initTableType(fs, basePath, HoodieTableType.MERGE_ON_READ);
// Make 3 files, one base file and 2 log files associated with base file
String file1P0 = HoodieTestUtils.createNewDataFile(basePath, partitionPaths[0], "000");
String file2P0L0 = HoodieTestUtils
.createNewLogFile(basePath, partitionPaths[0], "000", file1P0, Optional.empty());
.createNewLogFile(fs, basePath, partitionPaths[0], "000", file1P0, Optional.empty());
String file2P0L1 = HoodieTestUtils
.createNewLogFile(basePath, partitionPaths[0], "000", file1P0, Optional.of(2));
.createNewLogFile(fs, basePath, partitionPaths[0], "000", file1P0, Optional.of(2));
// make 1 compaction commit
HoodieTestUtils.createCompactionCommitFiles(basePath, "000");
HoodieTestUtils.createCompactionCommitFiles(fs, basePath, "000");
// Make 4 files, one base file and 3 log files associated with base file
HoodieTestUtils.createDataFile(basePath, partitionPaths[0], "001", file1P0);
file2P0L0 = HoodieTestUtils
.createNewLogFile(basePath, partitionPaths[0], "001", file1P0, Optional.empty());
.createNewLogFile(fs, basePath, partitionPaths[0], "001", file1P0, Optional.empty());
file2P0L0 = HoodieTestUtils
.createNewLogFile(basePath, partitionPaths[0], "001", file1P0, Optional.of(2));
.createNewLogFile(fs, basePath, partitionPaths[0], "001", file1P0, Optional.of(2));
file2P0L0 = HoodieTestUtils
.createNewLogFile(basePath, partitionPaths[0], "001", file1P0, Optional.of(3));
.createNewLogFile(fs, basePath, partitionPaths[0], "001", file1P0, Optional.of(3));
// make 1 compaction commit
HoodieTestUtils.createCompactionCommitFiles(basePath, "001");
HoodieTestUtils.createCompactionCommitFiles(fs, basePath, "001");
HoodieTable table = HoodieTable.getHoodieTable(metaClient, config);
List<HoodieCleanStat> hoodieCleanStats = table.clean(jsc);

View File

@@ -47,8 +47,8 @@ import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SQLContext;
import org.junit.After;
import org.junit.Before;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestMultiFS implements Serializable {
@@ -64,8 +64,8 @@ public class TestMultiFS implements Serializable {
private static JavaSparkContext jsc;
private static SQLContext sqlContext;
@Before
public void initClass() throws Exception {
@BeforeClass
public static void initClass() throws Exception {
hdfsTestService = new HdfsTestService();
dfsCluster = hdfsTestService.start(true);
@@ -82,15 +82,18 @@ public class TestMultiFS implements Serializable {
sqlContext = new SQLContext(jsc);
}
@After
public void cleanupClass() throws Exception {
if (hdfsTestService != null) {
hdfsTestService.stop();
}
@AfterClass
public static void cleanupClass() throws Exception {
if (jsc != null) {
jsc.stop();
}
FSUtils.setFs(null);
if (hdfsTestService != null) {
hdfsTestService.stop();
dfsCluster.shutdown();
}
// Need to closeAll to clear FileSystem.Cache, required because DFS and LocalFS used in the same JVM
FileSystem.closeAll();
}
@Test

View File

@@ -48,7 +48,7 @@ public class TestUpdateMapFunction {
TemporaryFolder folder = new TemporaryFolder();
folder.create();
this.basePath = folder.getRoot().getAbsolutePath();
HoodieTestUtils.init(basePath);
HoodieTestUtils.init(FSUtils.getFs(basePath, HoodieTestUtils.getDefaultHadoopConf()), basePath);
}
@Test

View File

@@ -88,7 +88,7 @@ public class TestHoodieBloomIndex {
folder.create();
basePath = folder.getRoot().getAbsolutePath();
fs = FSUtils.getFs(basePath, jsc.hadoopConfiguration());
HoodieTestUtils.init(basePath);
HoodieTestUtils.init(fs, basePath);
// We have some records to be tagged (two different partitions)
schemaStr = IOUtils.toString(getClass().getResourceAsStream("/exampleSchema.txt"), "UTF-8");
schema = HoodieAvroUtils.addMetadataFields(new Schema.Parser().parse(schemaStr));

View File

@@ -54,8 +54,8 @@ public class TestHoodieCommitArchiveLog {
TemporaryFolder folder = new TemporaryFolder();
folder.create();
basePath = folder.getRoot().getAbsolutePath();
HoodieTestUtils.init(basePath);
fs = FSUtils.getFs(basePath, HoodieTestUtils.getDefaultHadoopConf());
HoodieTestUtils.init(fs, basePath);
}
@Test
@@ -75,7 +75,7 @@ public class TestHoodieCommitArchiveLog {
.withSchema(HoodieTestDataGenerator.TRIP_EXAMPLE_SCHEMA).withParallelism(2, 2)
.withCompactionConfig(HoodieCompactionConfig.newBuilder().archiveCommitsWith(2, 4).build())
.forTable("test-trip-table").build();
HoodieTestUtils.init(basePath);
HoodieTestUtils.init(fs, basePath);
HoodieTestDataGenerator.createCommitFile(basePath, "100");
HoodieTestDataGenerator.createCommitFile(basePath, "101");
HoodieTestDataGenerator.createCommitFile(basePath, "102");

View File

@@ -31,6 +31,7 @@ import com.uber.hoodie.common.model.HoodieTestUtils;
import com.uber.hoodie.common.table.HoodieTableMetaClient;
import com.uber.hoodie.common.table.HoodieTimeline;
import com.uber.hoodie.common.table.timeline.HoodieActiveTimeline;
import com.uber.hoodie.common.util.FSUtils;
import com.uber.hoodie.config.HoodieCompactionConfig;
import com.uber.hoodie.config.HoodieIndexConfig;
import com.uber.hoodie.config.HoodieStorageConfig;
@@ -44,6 +45,7 @@ import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.hadoop.fs.FileSystem;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.junit.After;
@@ -57,6 +59,7 @@ public class TestHoodieCompactor {
private String basePath = null;
private HoodieCompactor compactor;
private transient HoodieTestDataGenerator dataGen = null;
private transient FileSystem fs;
@Before
public void init() throws IOException {
@@ -67,7 +70,8 @@ public class TestHoodieCompactor {
TemporaryFolder folder = new TemporaryFolder();
folder.create();
basePath = folder.getRoot().getAbsolutePath();
HoodieTestUtils.initTableType(basePath, HoodieTableType.MERGE_ON_READ);
fs = FSUtils.getFs(basePath, HoodieTestUtils.getDefaultHadoopConf());
HoodieTestUtils.initTableType(fs, basePath, HoodieTableType.MERGE_ON_READ);
dataGen = new HoodieTestDataGenerator();
compactor = new HoodieRealtimeTableCompactor();
@@ -100,7 +104,7 @@ public class TestHoodieCompactor {
@Test(expected = IllegalArgumentException.class)
public void testCompactionOnCopyOnWriteFail() throws Exception {
HoodieTestUtils.initTableType(basePath, HoodieTableType.COPY_ON_WRITE);
HoodieTestUtils.initTableType(fs, basePath, HoodieTableType.COPY_ON_WRITE);
HoodieTableMetaClient metaClient = new HoodieTableMetaClient(jsc.hadoopConfiguration(),
basePath);
HoodieTable table = HoodieTable.getHoodieTable(metaClient, getConfig());
@@ -155,7 +159,7 @@ public class TestHoodieCompactor {
// Write them to corresponding avro logfiles
HoodieTestUtils
.writeRecordsToLogFiles(metaClient.getBasePath(), HoodieTestDataGenerator.avroSchema,
.writeRecordsToLogFiles(fs, metaClient.getBasePath(), HoodieTestDataGenerator.avroSchema,
updatedRecords);
// Verify that all data file has one log file

View File

@@ -76,7 +76,7 @@ public class TestCopyOnWriteTable {
TemporaryFolder folder = new TemporaryFolder();
folder.create();
this.basePath = folder.getRoot().getAbsolutePath();
HoodieTestUtils.init(basePath);
HoodieTestUtils.init(FSUtils.getFs(basePath, jsc.hadoopConfiguration()), basePath);
}
@Test

View File

@@ -42,7 +42,6 @@ import com.uber.hoodie.common.table.TableFileSystemView;
import com.uber.hoodie.common.table.timeline.HoodieActiveTimeline;
import com.uber.hoodie.common.table.timeline.HoodieInstant;
import com.uber.hoodie.common.table.view.HoodieTableFileSystemView;
import com.uber.hoodie.common.util.FSUtils;
import com.uber.hoodie.config.HoodieCompactionConfig;
import com.uber.hoodie.config.HoodieIndexConfig;
import com.uber.hoodie.config.HoodieStorageConfig;
@@ -80,7 +79,6 @@ public class TestMergeOnReadTable {
private transient SQLContext sqlContext;
private static String basePath = null;
private HoodieCompactor compactor;
private FileSystem fs;
//NOTE : Be careful in using DFS (FileSystem.class) vs LocalFs(RawLocalFileSystem.class)
//The implementation and gurantees of many API's differ, for example check rename(src,dst)
@@ -94,10 +92,8 @@ public class TestMergeOnReadTable {
hdfsTestService.stop();
dfsCluster.shutdown();
}
FSUtils.setFs(null);
// Need to closeAll to clear FileSystem.Cache, required because DFS and LocalFS used in the same JVM
FileSystem.closeAll();
HoodieTestUtils.resetFS(basePath);
}
@BeforeClass
@@ -110,8 +106,6 @@ public class TestMergeOnReadTable {
// Create a temp folder as the base path
dfs = dfsCluster.getFileSystem();
}
FSUtils.setFs(dfs);
HoodieTestUtils.resetFS(basePath);
}
@Before
@@ -124,12 +118,10 @@ public class TestMergeOnReadTable {
TemporaryFolder folder = new TemporaryFolder();
folder.create();
basePath = folder.getRoot().getAbsolutePath();
fs = FSUtils.getFs(basePath, jsc.hadoopConfiguration());
jsc.hadoopConfiguration().addResource(fs.getConf());
jsc.hadoopConfiguration().addResource(dfs.getConf());
dfs.mkdirs(new Path(basePath));
FSUtils.setFs(dfs);
HoodieTestUtils.initTableType(basePath, HoodieTableType.MERGE_ON_READ);
HoodieTestUtils.initTableType(dfs, basePath, HoodieTableType.MERGE_ON_READ);
sqlContext = new SQLContext(jsc); // SQLContext stuff
compactor = new HoodieRealtimeTableCompactor();
@@ -219,7 +211,7 @@ public class TestMergeOnReadTable {
compactor.compact(jsc, getConfig(true), table, HoodieActiveTimeline.createNewCommitTime());
allFiles = HoodieTestUtils.listAllDataFilesInPath(fs, cfg.getBasePath());
allFiles = HoodieTestUtils.listAllDataFilesInPath(dfs, cfg.getBasePath());
roView = new HoodieTableFileSystemView(metaClient, hoodieTable.getCompletedCommitTimeline(),
allFiles);
dataFilesToRead = roView.getLatestDataFiles();
@@ -339,7 +331,7 @@ public class TestMergeOnReadTable {
commit = metaClient.getActiveTimeline().getCommitTimeline().firstInstant();
assertFalse(commit.isPresent());
allFiles = HoodieTestUtils.listAllDataFilesInPath(fs, cfg.getBasePath());
allFiles = HoodieTestUtils.listAllDataFilesInPath(dfs, cfg.getBasePath());
roView = new HoodieTableFileSystemView(metaClient, hoodieTable.getCompletedCommitTimeline(),
allFiles);
dataFilesToRead = roView.getLatestDataFiles();
@@ -357,7 +349,7 @@ public class TestMergeOnReadTable {
public void testCOWToMORConvertedDatasetRollback() throws Exception {
//Set TableType to COW
HoodieTestUtils.initTableType(basePath, HoodieTableType.COPY_ON_WRITE);
HoodieTestUtils.initTableType(dfs, basePath, HoodieTableType.COPY_ON_WRITE);
HoodieWriteConfig cfg = getConfig(true);
HoodieWriteClient client = new HoodieWriteClient(jsc, cfg);
@@ -396,7 +388,7 @@ public class TestMergeOnReadTable {
assertNoWriteErrors(statuses);
//Set TableType to MOR
HoodieTestUtils.initTableType(basePath, HoodieTableType.MERGE_ON_READ);
HoodieTestUtils.initTableType(dfs, basePath, HoodieTableType.MERGE_ON_READ);
//rollback a COW commit when TableType is MOR
client.rollback(newCommitTime);

View File

@@ -16,7 +16,6 @@
package com.uber.hoodie.common.util;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.uber.hoodie.common.model.HoodieLogFile;
import com.uber.hoodie.common.model.HoodiePartitionMetadata;
@@ -57,15 +56,6 @@ public class FSUtils {
private static final long MIN_CLEAN_TO_KEEP = 10;
private static final long MIN_ROLLBACK_TO_KEEP = 10;
private static final String HOODIE_ENV_PROPS_PREFIX = "HOODIE_ENV_";
private static FileSystem fs;
/**
* Only to be used for testing.
*/
@VisibleForTesting
public static void setFs(FileSystem fs) {
FSUtils.fs = fs;
}
public static Configuration prepareHadoopConf(Configuration conf) {
conf.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName());
@@ -86,9 +76,6 @@ public class FSUtils {
public static FileSystem getFs(String path, Configuration conf) {
if (fs != null) {
return fs;
}
FileSystem fs;
conf = prepareHadoopConf(conf);
try {

View File

@@ -55,7 +55,6 @@ public class HdfsTestService {
private MiniDFSCluster miniDfsCluster;
public HdfsTestService() {
hadoopConf = HoodieTestUtils.getDefaultHadoopConf();
workDir = Files.createTempDir().getAbsolutePath();
}
@@ -66,10 +65,7 @@ public class HdfsTestService {
public MiniDFSCluster start(boolean format) throws IOException {
Preconditions
.checkState(workDir != null, "The work dir must be set before starting cluster.");
if (hadoopConf == null) {
hadoopConf = HoodieTestUtils.getDefaultHadoopConf();
}
hadoopConf = HoodieTestUtils.getDefaultHadoopConf();
// If clean, then remove the work dir so we can start fresh.
String localDFSLocation = getDFSLocation(workDir);
@@ -91,8 +87,8 @@ public class HdfsTestService {
}
public void stop() throws IOException {
logger.info("HDFS Minicluster service being shut down.");
miniDfsCluster.shutdown();
logger.info("HDFS Minicluster service shut down.");
miniDfsCluster = null;
hadoopConf = null;
}

View File

@@ -70,27 +70,22 @@ import org.junit.rules.TemporaryFolder;
public class HoodieTestUtils {
public static FileSystem fs;
public static final String TEST_EXTENSION = ".test";
public static final String RAW_TRIPS_TEST_NAME = "raw_trips";
public static final int DEFAULT_TASK_PARTITIONID = 1;
public static final String[] DEFAULT_PARTITION_PATHS = {"2016/03/15", "2015/03/16", "2015/03/17"};
private static Random rand = new Random(46474747);
public static void resetFS(String basePath) {
HoodieTestUtils.fs = FSUtils.getFs(basePath, HoodieTestUtils.getDefaultHadoopConf());
}
public static Configuration getDefaultHadoopConf() {
return new Configuration();
}
public static HoodieTableMetaClient init(String basePath) throws IOException {
fs = FSUtils.getFs(basePath, HoodieTestUtils.getDefaultHadoopConf());
return initTableType(basePath, HoodieTableType.COPY_ON_WRITE);
public static HoodieTableMetaClient init(FileSystem fs, String basePath) throws IOException {
return initTableType(fs, basePath, HoodieTableType.COPY_ON_WRITE);
}
public static HoodieTableMetaClient initTableType(String basePath, HoodieTableType tableType)
public static HoodieTableMetaClient initTableType(FileSystem fs, String basePath,
HoodieTableType tableType)
throws IOException {
Properties properties = new Properties();
properties.setProperty(HoodieTableConfig.HOODIE_TABLE_NAME_PROP_NAME, RAW_TRIPS_TEST_NAME);
@@ -105,7 +100,8 @@ public class HoodieTestUtils {
TemporaryFolder folder = new TemporaryFolder();
folder.create();
String basePath = folder.getRoot().getAbsolutePath();
return HoodieTestUtils.init(basePath);
return HoodieTestUtils
.init(FSUtils.getFs(basePath, HoodieTestUtils.getDefaultHadoopConf()), basePath);
}
public static String makeNewCommitTime() {
@@ -143,7 +139,7 @@ public class HoodieTestUtils {
return fileID;
}
public static final String createNewLogFile(String basePath, String partitionPath,
public static final String createNewLogFile(FileSystem fs, String basePath, String partitionPath,
String commitTime, String fileID, Optional<Integer> version) throws IOException {
String folderPath = basePath + "/" + partitionPath + "/";
boolean makeDir = fs.mkdirs(new Path(folderPath));
@@ -159,7 +155,8 @@ public class HoodieTestUtils {
return fileID;
}
public static final void createCompactionCommitFiles(String basePath, String... commitTimes)
public static final void createCompactionCommitFiles(FileSystem fs, String basePath,
String... commitTimes)
throws IOException {
for (String commitTime : commitTimes) {
boolean createFile = fs.createNewFile(new Path(
@@ -268,7 +265,7 @@ public class HoodieTestUtils {
return deseralizedObject;
}
public static void writeRecordsToLogFiles(String basePath, Schema schema,
public static void writeRecordsToLogFiles(FileSystem fs, String basePath, Schema schema,
List<HoodieRecord> updatedRecords) {
Map<HoodieRecordLocation, List<HoodieRecord>> groupedUpdated = updatedRecords.stream()
.collect(Collectors.groupingBy(HoodieRecord::getCurrentLocation));

View File

@@ -36,7 +36,6 @@ import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.junit.Before;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
public class HoodieTableMetaClientTest {
@@ -45,10 +44,8 @@ public class HoodieTableMetaClientTest {
@Before
public void init() throws IOException {
TemporaryFolder folder = new TemporaryFolder();
folder.create();
this.basePath = folder.getRoot().getAbsolutePath();
metaClient = HoodieTestUtils.init(basePath);
metaClient = HoodieTestUtils.initOnTemp();
basePath = metaClient.getBasePath();
}
@Test
@@ -109,7 +106,7 @@ public class HoodieTableMetaClientTest {
public void checkArchiveCommitTimeline() throws IOException {
Path archiveLogPath = HoodieArchivedTimeline.getArchiveLogPath(metaClient.getArchivePath());
SequenceFile.Writer writer = SequenceFile
.createWriter(HoodieTestUtils.fs.getConf(), SequenceFile.Writer.file(archiveLogPath),
.createWriter(metaClient.getHadoopConf(), SequenceFile.Writer.file(archiveLogPath),
SequenceFile.Writer.keyClass(Text.class),
SequenceFile.Writer.valueClass(Text.class));

View File

@@ -80,7 +80,6 @@ public class HoodieLogFormatTest {
@AfterClass
public static void tearDownClass() {
MiniClusterUtil.shutdown();
HoodieTestUtils.resetFS(basePath);
}
@Before
@@ -91,8 +90,7 @@ public class HoodieLogFormatTest {
assertTrue(fs.mkdirs(new Path(folder.getRoot().getPath())));
this.partitionPath = new Path(folder.getRoot().getPath());
this.basePath = folder.getRoot().getParent();
HoodieTestUtils.fs = fs;
HoodieTestUtils.initTableType(basePath, HoodieTableType.MERGE_ON_READ);
HoodieTestUtils.initTableType(fs, basePath, HoodieTableType.MERGE_ON_READ);
}
@After

View File

@@ -49,7 +49,7 @@ public class HoodieActiveTimelineTest {
@After
public void tearDown() throws Exception {
HoodieTestUtils.fs.delete(new Path(this.metaClient.getBasePath()), true);
metaClient.getFs().delete(new Path(this.metaClient.getBasePath()), true);
}
@Test

View File

@@ -45,7 +45,6 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.junit.Before;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
@SuppressWarnings("ResultOfMethodCallIgnored")
public class HoodieTableFileSystemViewTest {
@@ -58,10 +57,8 @@ public class HoodieTableFileSystemViewTest {
@Before
public void init() throws IOException {
TemporaryFolder folder = new TemporaryFolder();
folder.create();
this.basePath = folder.getRoot().getAbsolutePath();
metaClient = HoodieTestUtils.init(basePath);
metaClient = HoodieTestUtils.initOnTemp();
basePath = metaClient.getBasePath();
fsView = new HoodieTableFileSystemView(metaClient,
metaClient.getActiveTimeline().getCommitTimeline().filterCompletedInstants());
roView = (TableFileSystemView.ReadOptimizedView) fsView;
@@ -69,7 +66,7 @@ public class HoodieTableFileSystemViewTest {
}
private void refreshFsView(FileStatus[] statuses) {
metaClient = new HoodieTableMetaClient(HoodieTestUtils.fs.getConf(), basePath, true);
metaClient = new HoodieTableMetaClient(metaClient.getHadoopConf(), basePath, true);
if (statuses != null) {
fsView = new HoodieTableFileSystemView(metaClient,
metaClient.getActiveTimeline().getCommitTimeline().filterCompletedInstants(),
@@ -184,7 +181,7 @@ public class HoodieTableFileSystemViewTest {
new File(basePath + "/.hoodie/" + commitTime4 + ".commit").createNewFile();
// Now we list the entire partition
FileStatus[] statuses = HoodieTestUtils.fs.listStatus(new Path(fullPartitionPath));
FileStatus[] statuses = metaClient.getFs().listStatus(new Path(fullPartitionPath));
assertEquals(11, statuses.length);
refreshFsView(null);
@@ -285,7 +282,7 @@ public class HoodieTableFileSystemViewTest {
new File(basePath + "/.hoodie/" + commitTime4 + ".commit").createNewFile();
// Now we list the entire partition
FileStatus[] statuses = HoodieTestUtils.fs.listStatus(new Path(fullPartitionPath));
FileStatus[] statuses = metaClient.getFs().listStatus(new Path(fullPartitionPath));
assertEquals(7, statuses.length);
refreshFsView(null);
@@ -359,7 +356,7 @@ public class HoodieTableFileSystemViewTest {
new File(basePath + "/.hoodie/" + commitTime4 + ".commit").createNewFile();
// Now we list the entire partition
FileStatus[] statuses = HoodieTestUtils.fs.listStatus(new Path(fullPartitionPath));
FileStatus[] statuses = metaClient.getFs().listStatus(new Path(fullPartitionPath));
assertEquals(9, statuses.length);
refreshFsView(statuses);
@@ -430,7 +427,7 @@ public class HoodieTableFileSystemViewTest {
new File(basePath + "/.hoodie/" + commitTime4 + ".commit").createNewFile();
// Now we list the entire partition
FileStatus[] statuses = HoodieTestUtils.fs.listStatus(new Path(fullPartitionPath));
FileStatus[] statuses = metaClient.getFs().listStatus(new Path(fullPartitionPath));
assertEquals(7, statuses.length);
refreshFsView(null);
@@ -492,7 +489,7 @@ public class HoodieTableFileSystemViewTest {
new File(basePath + "/.hoodie/" + commitTime4 + ".commit").createNewFile();
// Now we list the entire partition
FileStatus[] statuses = HoodieTestUtils.fs.listStatus(new Path(fullPartitionPath));
FileStatus[] statuses = metaClient.getFs().listStatus(new Path(fullPartitionPath));
assertEquals(10, statuses.length);
refreshFsView(statuses);

View File

@@ -39,7 +39,9 @@ public class InputFormatTestUtil {
public static File prepareDataset(TemporaryFolder basePath, int numberOfFiles,
String commitNumber) throws IOException {
basePath.create();
HoodieTestUtils.init(basePath.getRoot().toString());
HoodieTestUtils
.init(FSUtils.getFs(basePath.getRoot().toString(), HoodieTestUtils.getDefaultHadoopConf()),
basePath.getRoot().toString());
File partitionPath = basePath.newFolder("2016", "05", "01");
for (int i = 0; i < numberOfFiles; i++) {
File dataFile =
@@ -99,7 +101,9 @@ public class InputFormatTestUtil {
int numberOfFiles, int numberOfRecords,
String commitNumber) throws IOException {
basePath.create();
HoodieTestUtils.init(basePath.getRoot().toString());
HoodieTestUtils
.init(FSUtils.getFs(basePath.getRoot().toString(), HoodieTestUtils.getDefaultHadoopConf()),
basePath.getRoot().toString());
File partitionPath = basePath.newFolder("2016", "05", "01");
AvroParquetWriter parquetWriter;
for (int i = 0; i < numberOfFiles; i++) {

View File

@@ -72,7 +72,6 @@ public class HoodieRealtimeRecordReaderTest {
jobConf = new JobConf();
fs = FSUtils
.getFs(basePath.getRoot().getAbsolutePath(), HoodieTestUtils.getDefaultHadoopConf());
HoodieTestUtils.fs = fs;
}
@Rule
@@ -105,7 +104,7 @@ public class HoodieRealtimeRecordReaderTest {
// initial commit
Schema schema = HoodieAvroUtils.addMetadataFields(SchemaTestUtil.getEvolvedSchema());
HoodieTestUtils
.initTableType(basePath.getRoot().getAbsolutePath(), HoodieTableType.MERGE_ON_READ);
.initTableType(fs, basePath.getRoot().getAbsolutePath(), HoodieTableType.MERGE_ON_READ);
String commitTime = "100";
File partitionDir = InputFormatTestUtil
.prepareParquetDataset(basePath, schema, 1, 100, commitTime);
@@ -163,7 +162,7 @@ public class HoodieRealtimeRecordReaderTest {
// initial commit
Schema schema = HoodieAvroUtils.addMetadataFields(SchemaTestUtil.getComplexEvolvedSchema());
HoodieTestUtils
.initTableType(basePath.getRoot().getAbsolutePath(), HoodieTableType.MERGE_ON_READ);
.initTableType(fs, basePath.getRoot().getAbsolutePath(), HoodieTableType.MERGE_ON_READ);
String commitTime = "100";
int numberOfRecords = 100;
int numberOfLogRecords = numberOfRecords / 2;

View File

@@ -27,7 +27,6 @@ import com.uber.hoodie.common.minicluster.HdfsTestService;
import com.uber.hoodie.common.model.HoodieTestUtils;
import com.uber.hoodie.common.table.HoodieTimeline;
import com.uber.hoodie.common.table.timeline.HoodieActiveTimeline;
import com.uber.hoodie.common.util.FSUtils;
import java.io.IOException;
import java.io.Serializable;
import java.text.ParseException;
@@ -71,7 +70,6 @@ public class TestHDFSParquetImporter implements Serializable {
dfs = dfsCluster.getFileSystem();
dfsBasePath = dfs.getWorkingDirectory().toString();
dfs.mkdirs(new Path(dfsBasePath));
FSUtils.setFs(dfs);
}
@AfterClass
@@ -79,7 +77,6 @@ public class TestHDFSParquetImporter implements Serializable {
if (hdfsTestService != null) {
hdfsTestService.stop();
}
FSUtils.setFs(null);
}
/**

View File

@@ -53,7 +53,7 @@ public class TestHoodieSnapshotCopier {
outputPath = rootPath + "/output";
fs = FSUtils.getFs(basePath, HoodieTestUtils.getDefaultHadoopConf());
HoodieTestUtils.init(basePath);
HoodieTestUtils.init(fs, basePath);
// Start a local Spark job
SparkConf conf = new SparkConf().setAppName("snapshot-test-job").setMaster("local[2]");
jsc = new JavaSparkContext(conf);