Remove stateful fs member from HoodieTestUtils & FSUtils
This commit is contained in:
committed by
vinoth chandar
parent
cf7f7aabb9
commit
21ce846f18
@@ -16,7 +16,6 @@
|
||||
|
||||
package com.uber.hoodie.common.util;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.uber.hoodie.common.model.HoodieLogFile;
|
||||
import com.uber.hoodie.common.model.HoodiePartitionMetadata;
|
||||
@@ -57,15 +56,6 @@ public class FSUtils {
|
||||
private static final long MIN_CLEAN_TO_KEEP = 10;
|
||||
private static final long MIN_ROLLBACK_TO_KEEP = 10;
|
||||
private static final String HOODIE_ENV_PROPS_PREFIX = "HOODIE_ENV_";
|
||||
private static FileSystem fs;
|
||||
|
||||
/**
|
||||
* Only to be used for testing.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
public static void setFs(FileSystem fs) {
|
||||
FSUtils.fs = fs;
|
||||
}
|
||||
|
||||
public static Configuration prepareHadoopConf(Configuration conf) {
|
||||
conf.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName());
|
||||
@@ -86,9 +76,6 @@ public class FSUtils {
|
||||
|
||||
|
||||
public static FileSystem getFs(String path, Configuration conf) {
|
||||
if (fs != null) {
|
||||
return fs;
|
||||
}
|
||||
FileSystem fs;
|
||||
conf = prepareHadoopConf(conf);
|
||||
try {
|
||||
|
||||
@@ -55,7 +55,6 @@ public class HdfsTestService {
|
||||
private MiniDFSCluster miniDfsCluster;
|
||||
|
||||
public HdfsTestService() {
|
||||
hadoopConf = HoodieTestUtils.getDefaultHadoopConf();
|
||||
workDir = Files.createTempDir().getAbsolutePath();
|
||||
}
|
||||
|
||||
@@ -66,10 +65,7 @@ public class HdfsTestService {
|
||||
public MiniDFSCluster start(boolean format) throws IOException {
|
||||
Preconditions
|
||||
.checkState(workDir != null, "The work dir must be set before starting cluster.");
|
||||
|
||||
if (hadoopConf == null) {
|
||||
hadoopConf = HoodieTestUtils.getDefaultHadoopConf();
|
||||
}
|
||||
hadoopConf = HoodieTestUtils.getDefaultHadoopConf();
|
||||
|
||||
// If clean, then remove the work dir so we can start fresh.
|
||||
String localDFSLocation = getDFSLocation(workDir);
|
||||
@@ -91,8 +87,8 @@ public class HdfsTestService {
|
||||
}
|
||||
|
||||
public void stop() throws IOException {
|
||||
logger.info("HDFS Minicluster service being shut down.");
|
||||
miniDfsCluster.shutdown();
|
||||
logger.info("HDFS Minicluster service shut down.");
|
||||
miniDfsCluster = null;
|
||||
hadoopConf = null;
|
||||
}
|
||||
|
||||
@@ -70,27 +70,22 @@ import org.junit.rules.TemporaryFolder;
|
||||
|
||||
public class HoodieTestUtils {
|
||||
|
||||
public static FileSystem fs;
|
||||
public static final String TEST_EXTENSION = ".test";
|
||||
public static final String RAW_TRIPS_TEST_NAME = "raw_trips";
|
||||
public static final int DEFAULT_TASK_PARTITIONID = 1;
|
||||
public static final String[] DEFAULT_PARTITION_PATHS = {"2016/03/15", "2015/03/16", "2015/03/17"};
|
||||
private static Random rand = new Random(46474747);
|
||||
|
||||
public static void resetFS(String basePath) {
|
||||
HoodieTestUtils.fs = FSUtils.getFs(basePath, HoodieTestUtils.getDefaultHadoopConf());
|
||||
}
|
||||
|
||||
public static Configuration getDefaultHadoopConf() {
|
||||
return new Configuration();
|
||||
}
|
||||
|
||||
public static HoodieTableMetaClient init(String basePath) throws IOException {
|
||||
fs = FSUtils.getFs(basePath, HoodieTestUtils.getDefaultHadoopConf());
|
||||
return initTableType(basePath, HoodieTableType.COPY_ON_WRITE);
|
||||
public static HoodieTableMetaClient init(FileSystem fs, String basePath) throws IOException {
|
||||
return initTableType(fs, basePath, HoodieTableType.COPY_ON_WRITE);
|
||||
}
|
||||
|
||||
public static HoodieTableMetaClient initTableType(String basePath, HoodieTableType tableType)
|
||||
public static HoodieTableMetaClient initTableType(FileSystem fs, String basePath,
|
||||
HoodieTableType tableType)
|
||||
throws IOException {
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(HoodieTableConfig.HOODIE_TABLE_NAME_PROP_NAME, RAW_TRIPS_TEST_NAME);
|
||||
@@ -105,7 +100,8 @@ public class HoodieTestUtils {
|
||||
TemporaryFolder folder = new TemporaryFolder();
|
||||
folder.create();
|
||||
String basePath = folder.getRoot().getAbsolutePath();
|
||||
return HoodieTestUtils.init(basePath);
|
||||
return HoodieTestUtils
|
||||
.init(FSUtils.getFs(basePath, HoodieTestUtils.getDefaultHadoopConf()), basePath);
|
||||
}
|
||||
|
||||
public static String makeNewCommitTime() {
|
||||
@@ -143,7 +139,7 @@ public class HoodieTestUtils {
|
||||
return fileID;
|
||||
}
|
||||
|
||||
public static final String createNewLogFile(String basePath, String partitionPath,
|
||||
public static final String createNewLogFile(FileSystem fs, String basePath, String partitionPath,
|
||||
String commitTime, String fileID, Optional<Integer> version) throws IOException {
|
||||
String folderPath = basePath + "/" + partitionPath + "/";
|
||||
boolean makeDir = fs.mkdirs(new Path(folderPath));
|
||||
@@ -159,7 +155,8 @@ public class HoodieTestUtils {
|
||||
return fileID;
|
||||
}
|
||||
|
||||
public static final void createCompactionCommitFiles(String basePath, String... commitTimes)
|
||||
public static final void createCompactionCommitFiles(FileSystem fs, String basePath,
|
||||
String... commitTimes)
|
||||
throws IOException {
|
||||
for (String commitTime : commitTimes) {
|
||||
boolean createFile = fs.createNewFile(new Path(
|
||||
@@ -268,7 +265,7 @@ public class HoodieTestUtils {
|
||||
return deseralizedObject;
|
||||
}
|
||||
|
||||
public static void writeRecordsToLogFiles(String basePath, Schema schema,
|
||||
public static void writeRecordsToLogFiles(FileSystem fs, String basePath, Schema schema,
|
||||
List<HoodieRecord> updatedRecords) {
|
||||
Map<HoodieRecordLocation, List<HoodieRecord>> groupedUpdated = updatedRecords.stream()
|
||||
.collect(Collectors.groupingBy(HoodieRecord::getCurrentLocation));
|
||||
|
||||
@@ -36,7 +36,6 @@ import org.apache.hadoop.io.SequenceFile;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.TemporaryFolder;
|
||||
|
||||
public class HoodieTableMetaClientTest {
|
||||
|
||||
@@ -45,10 +44,8 @@ public class HoodieTableMetaClientTest {
|
||||
|
||||
@Before
|
||||
public void init() throws IOException {
|
||||
TemporaryFolder folder = new TemporaryFolder();
|
||||
folder.create();
|
||||
this.basePath = folder.getRoot().getAbsolutePath();
|
||||
metaClient = HoodieTestUtils.init(basePath);
|
||||
metaClient = HoodieTestUtils.initOnTemp();
|
||||
basePath = metaClient.getBasePath();
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -109,7 +106,7 @@ public class HoodieTableMetaClientTest {
|
||||
public void checkArchiveCommitTimeline() throws IOException {
|
||||
Path archiveLogPath = HoodieArchivedTimeline.getArchiveLogPath(metaClient.getArchivePath());
|
||||
SequenceFile.Writer writer = SequenceFile
|
||||
.createWriter(HoodieTestUtils.fs.getConf(), SequenceFile.Writer.file(archiveLogPath),
|
||||
.createWriter(metaClient.getHadoopConf(), SequenceFile.Writer.file(archiveLogPath),
|
||||
SequenceFile.Writer.keyClass(Text.class),
|
||||
SequenceFile.Writer.valueClass(Text.class));
|
||||
|
||||
|
||||
@@ -80,7 +80,6 @@ public class HoodieLogFormatTest {
|
||||
@AfterClass
|
||||
public static void tearDownClass() {
|
||||
MiniClusterUtil.shutdown();
|
||||
HoodieTestUtils.resetFS(basePath);
|
||||
}
|
||||
|
||||
@Before
|
||||
@@ -91,8 +90,7 @@ public class HoodieLogFormatTest {
|
||||
assertTrue(fs.mkdirs(new Path(folder.getRoot().getPath())));
|
||||
this.partitionPath = new Path(folder.getRoot().getPath());
|
||||
this.basePath = folder.getRoot().getParent();
|
||||
HoodieTestUtils.fs = fs;
|
||||
HoodieTestUtils.initTableType(basePath, HoodieTableType.MERGE_ON_READ);
|
||||
HoodieTestUtils.initTableType(fs, basePath, HoodieTableType.MERGE_ON_READ);
|
||||
}
|
||||
|
||||
@After
|
||||
|
||||
@@ -49,7 +49,7 @@ public class HoodieActiveTimelineTest {
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
HoodieTestUtils.fs.delete(new Path(this.metaClient.getBasePath()), true);
|
||||
metaClient.getFs().delete(new Path(this.metaClient.getBasePath()), true);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
||||
@@ -45,7 +45,6 @@ import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.TemporaryFolder;
|
||||
|
||||
@SuppressWarnings("ResultOfMethodCallIgnored")
|
||||
public class HoodieTableFileSystemViewTest {
|
||||
@@ -58,10 +57,8 @@ public class HoodieTableFileSystemViewTest {
|
||||
|
||||
@Before
|
||||
public void init() throws IOException {
|
||||
TemporaryFolder folder = new TemporaryFolder();
|
||||
folder.create();
|
||||
this.basePath = folder.getRoot().getAbsolutePath();
|
||||
metaClient = HoodieTestUtils.init(basePath);
|
||||
metaClient = HoodieTestUtils.initOnTemp();
|
||||
basePath = metaClient.getBasePath();
|
||||
fsView = new HoodieTableFileSystemView(metaClient,
|
||||
metaClient.getActiveTimeline().getCommitTimeline().filterCompletedInstants());
|
||||
roView = (TableFileSystemView.ReadOptimizedView) fsView;
|
||||
@@ -69,7 +66,7 @@ public class HoodieTableFileSystemViewTest {
|
||||
}
|
||||
|
||||
private void refreshFsView(FileStatus[] statuses) {
|
||||
metaClient = new HoodieTableMetaClient(HoodieTestUtils.fs.getConf(), basePath, true);
|
||||
metaClient = new HoodieTableMetaClient(metaClient.getHadoopConf(), basePath, true);
|
||||
if (statuses != null) {
|
||||
fsView = new HoodieTableFileSystemView(metaClient,
|
||||
metaClient.getActiveTimeline().getCommitTimeline().filterCompletedInstants(),
|
||||
@@ -184,7 +181,7 @@ public class HoodieTableFileSystemViewTest {
|
||||
new File(basePath + "/.hoodie/" + commitTime4 + ".commit").createNewFile();
|
||||
|
||||
// Now we list the entire partition
|
||||
FileStatus[] statuses = HoodieTestUtils.fs.listStatus(new Path(fullPartitionPath));
|
||||
FileStatus[] statuses = metaClient.getFs().listStatus(new Path(fullPartitionPath));
|
||||
assertEquals(11, statuses.length);
|
||||
refreshFsView(null);
|
||||
|
||||
@@ -285,7 +282,7 @@ public class HoodieTableFileSystemViewTest {
|
||||
new File(basePath + "/.hoodie/" + commitTime4 + ".commit").createNewFile();
|
||||
|
||||
// Now we list the entire partition
|
||||
FileStatus[] statuses = HoodieTestUtils.fs.listStatus(new Path(fullPartitionPath));
|
||||
FileStatus[] statuses = metaClient.getFs().listStatus(new Path(fullPartitionPath));
|
||||
assertEquals(7, statuses.length);
|
||||
|
||||
refreshFsView(null);
|
||||
@@ -359,7 +356,7 @@ public class HoodieTableFileSystemViewTest {
|
||||
new File(basePath + "/.hoodie/" + commitTime4 + ".commit").createNewFile();
|
||||
|
||||
// Now we list the entire partition
|
||||
FileStatus[] statuses = HoodieTestUtils.fs.listStatus(new Path(fullPartitionPath));
|
||||
FileStatus[] statuses = metaClient.getFs().listStatus(new Path(fullPartitionPath));
|
||||
assertEquals(9, statuses.length);
|
||||
|
||||
refreshFsView(statuses);
|
||||
@@ -430,7 +427,7 @@ public class HoodieTableFileSystemViewTest {
|
||||
new File(basePath + "/.hoodie/" + commitTime4 + ".commit").createNewFile();
|
||||
|
||||
// Now we list the entire partition
|
||||
FileStatus[] statuses = HoodieTestUtils.fs.listStatus(new Path(fullPartitionPath));
|
||||
FileStatus[] statuses = metaClient.getFs().listStatus(new Path(fullPartitionPath));
|
||||
assertEquals(7, statuses.length);
|
||||
|
||||
refreshFsView(null);
|
||||
@@ -492,7 +489,7 @@ public class HoodieTableFileSystemViewTest {
|
||||
new File(basePath + "/.hoodie/" + commitTime4 + ".commit").createNewFile();
|
||||
|
||||
// Now we list the entire partition
|
||||
FileStatus[] statuses = HoodieTestUtils.fs.listStatus(new Path(fullPartitionPath));
|
||||
FileStatus[] statuses = metaClient.getFs().listStatus(new Path(fullPartitionPath));
|
||||
assertEquals(10, statuses.length);
|
||||
|
||||
refreshFsView(statuses);
|
||||
|
||||
Reference in New Issue
Block a user