HUDI-267 Refactor bad method name HoodieTestUtils#initTableType and HoodieTableMetaClient#initializePathAsHoodieDataset (#916)
This commit is contained in:
@@ -200,7 +200,7 @@ public abstract class HoodieClientTestHarness implements Serializable {
|
||||
throw new IllegalStateException("The Spark context has not been initialized.");
|
||||
}
|
||||
|
||||
HoodieTestUtils.initTableType(jsc.hadoopConfiguration(), basePath, getTableType());
|
||||
HoodieTestUtils.init(jsc.hadoopConfiguration(), basePath, getTableType());
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -515,7 +515,7 @@ public class TestCleaner extends TestHoodieClientBase {
|
||||
HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS).retainFileVersions(1).build())
|
||||
.build();
|
||||
|
||||
HoodieTableMetaClient metaClient = HoodieTestUtils.initTableType(jsc.hadoopConfiguration(), basePath,
|
||||
HoodieTableMetaClient metaClient = HoodieTestUtils.init(jsc.hadoopConfiguration(), basePath,
|
||||
HoodieTableType.MERGE_ON_READ);
|
||||
|
||||
// Make 3 files, one base file and 2 log files associated with base file
|
||||
@@ -858,7 +858,7 @@ public class TestCleaner extends TestHoodieClientBase {
|
||||
*/
|
||||
public void testPendingCompactions(HoodieWriteConfig config, int expNumFilesDeleted,
|
||||
int expNumFilesUnderCompactionDeleted) throws IOException {
|
||||
HoodieTableMetaClient metaClient = HoodieTestUtils.initTableType(jsc.hadoopConfiguration(), basePath,
|
||||
HoodieTableMetaClient metaClient = HoodieTestUtils.init(jsc.hadoopConfiguration(), basePath,
|
||||
HoodieTableType.MERGE_ON_READ);
|
||||
String[] instants = new String[]{"000", "001", "003", "005", "007", "009", "011", "013"};
|
||||
String[] compactionInstants = new String[]{"002", "004", "006", "008", "010"};
|
||||
|
||||
@@ -53,7 +53,7 @@ public class TestCompactionAdminClient extends TestHoodieClientBase {
|
||||
public void setUp() throws Exception {
|
||||
initTempFolderAndPath();
|
||||
initSparkContexts();
|
||||
metaClient = HoodieTestUtils.initTableType(HoodieTestUtils.getDefaultHadoopConf(), basePath, MERGE_ON_READ);
|
||||
metaClient = HoodieTestUtils.init(HoodieTestUtils.getDefaultHadoopConf(), basePath, MERGE_ON_READ);
|
||||
client = new CompactionAdminClient(jsc, basePath);
|
||||
}
|
||||
|
||||
|
||||
@@ -62,7 +62,7 @@ public class TestHoodieCompactor extends HoodieClientTestHarness {
|
||||
initTempFolderAndPath();
|
||||
hadoopConf = HoodieTestUtils.getDefaultHadoopConf();
|
||||
fs = FSUtils.getFs(basePath, hadoopConf);
|
||||
HoodieTestUtils.initTableType(hadoopConf, basePath, HoodieTableType.MERGE_ON_READ);
|
||||
HoodieTestUtils.init(hadoopConf, basePath, HoodieTableType.MERGE_ON_READ);
|
||||
initTestDataGenerator();
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ public class TestHoodieCompactor extends HoodieClientTestHarness {
|
||||
|
||||
@Test(expected = HoodieNotSupportedException.class)
|
||||
public void testCompactionOnCopyOnWriteFail() throws Exception {
|
||||
HoodieTestUtils.initTableType(hadoopConf, basePath, HoodieTableType.COPY_ON_WRITE);
|
||||
HoodieTestUtils.init(hadoopConf, basePath, HoodieTableType.COPY_ON_WRITE);
|
||||
HoodieTableMetaClient metaClient = new HoodieTableMetaClient(jsc.hadoopConfiguration(), basePath);
|
||||
|
||||
HoodieTable table = HoodieTable.getHoodieTable(metaClient, getConfig(), jsc);
|
||||
|
||||
@@ -85,7 +85,7 @@ public class TestMergeOnReadTable extends HoodieClientTestHarness {
|
||||
jsc.hadoopConfiguration().addResource(dfs.getConf());
|
||||
initTempFolderAndPath();
|
||||
dfs.mkdirs(new Path(basePath));
|
||||
HoodieTestUtils.initTableType(jsc.hadoopConfiguration(), basePath, HoodieTableType.MERGE_ON_READ);
|
||||
HoodieTestUtils.init(jsc.hadoopConfiguration(), basePath, HoodieTableType.MERGE_ON_READ);
|
||||
initTestDataGenerator();
|
||||
}
|
||||
|
||||
@@ -294,7 +294,7 @@ public class TestMergeOnReadTable extends HoodieClientTestHarness {
|
||||
public void testCOWToMORConvertedDatasetRollback() throws Exception {
|
||||
|
||||
//Set TableType to COW
|
||||
HoodieTestUtils.initTableType(jsc.hadoopConfiguration(), basePath, HoodieTableType.COPY_ON_WRITE);
|
||||
HoodieTestUtils.init(jsc.hadoopConfiguration(), basePath, HoodieTableType.COPY_ON_WRITE);
|
||||
|
||||
HoodieWriteConfig cfg = getConfig(true);
|
||||
try (HoodieWriteClient client = getWriteClient(cfg);) {
|
||||
@@ -330,7 +330,7 @@ public class TestMergeOnReadTable extends HoodieClientTestHarness {
|
||||
assertNoWriteErrors(statuses);
|
||||
|
||||
//Set TableType to MOR
|
||||
HoodieTestUtils.initTableType(jsc.hadoopConfiguration(), basePath, HoodieTableType.MERGE_ON_READ);
|
||||
HoodieTestUtils.init(jsc.hadoopConfiguration(), basePath, HoodieTableType.MERGE_ON_READ);
|
||||
|
||||
//rollback a COW commit when TableType is MOR
|
||||
client.rollback(newCommitTime);
|
||||
|
||||
@@ -273,7 +273,7 @@ public class HoodieTableMetaClient implements Serializable {
|
||||
properties.put(HoodieTableConfig.HOODIE_TABLE_NAME_PROP_NAME, tableName);
|
||||
properties.put(HoodieTableConfig.HOODIE_TABLE_TYPE_PROP_NAME, type.name());
|
||||
properties.put(HoodieTableConfig.HOODIE_ARCHIVELOG_FOLDER_PROP_NAME, archiveLogFolder);
|
||||
return HoodieTableMetaClient.initializePathAsHoodieDataset(hadoopConf, basePath, properties);
|
||||
return HoodieTableMetaClient.initDatasetAndGetMetaClient(hadoopConf, basePath, properties);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -287,7 +287,7 @@ public class HoodieTableMetaClient implements Serializable {
|
||||
if (tableType == HoodieTableType.MERGE_ON_READ) {
|
||||
properties.setProperty(HoodieTableConfig.HOODIE_PAYLOAD_CLASS_PROP_NAME, payloadClassName);
|
||||
}
|
||||
return HoodieTableMetaClient.initializePathAsHoodieDataset(hadoopConf, basePath, properties);
|
||||
return HoodieTableMetaClient.initDatasetAndGetMetaClient(hadoopConf, basePath, properties);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -296,7 +296,7 @@ public class HoodieTableMetaClient implements Serializable {
|
||||
*
|
||||
* @return Instance of HoodieTableMetaClient
|
||||
*/
|
||||
public static HoodieTableMetaClient initializePathAsHoodieDataset(Configuration hadoopConf,
|
||||
public static HoodieTableMetaClient initDatasetAndGetMetaClient(Configuration hadoopConf,
|
||||
String basePath, Properties props) throws IOException {
|
||||
log.info("Initializing " + basePath + " as hoodie dataset " + basePath);
|
||||
Path basePathDir = new Path(basePath);
|
||||
|
||||
@@ -96,21 +96,21 @@ public class HoodieTestUtils {
|
||||
}
|
||||
|
||||
public static HoodieTableMetaClient init(String basePath, HoodieTableType tableType) throws IOException {
|
||||
return initTableType(getDefaultHadoopConf(), basePath, tableType);
|
||||
return init(getDefaultHadoopConf(), basePath, tableType);
|
||||
}
|
||||
|
||||
public static HoodieTableMetaClient init(Configuration hadoopConf, String basePath)
|
||||
throws IOException {
|
||||
return initTableType(hadoopConf, basePath, HoodieTableType.COPY_ON_WRITE);
|
||||
return init(hadoopConf, basePath, HoodieTableType.COPY_ON_WRITE);
|
||||
}
|
||||
|
||||
public static HoodieTableMetaClient initTableType(Configuration hadoopConf, String basePath,
|
||||
HoodieTableType tableType) throws IOException {
|
||||
public static HoodieTableMetaClient init(Configuration hadoopConf, String basePath, HoodieTableType tableType)
|
||||
throws IOException {
|
||||
Properties properties = new Properties();
|
||||
properties.setProperty(HoodieTableConfig.HOODIE_TABLE_NAME_PROP_NAME, RAW_TRIPS_TEST_NAME);
|
||||
properties.setProperty(HoodieTableConfig.HOODIE_TABLE_TYPE_PROP_NAME, tableType.name());
|
||||
properties.setProperty(HoodieTableConfig.HOODIE_PAYLOAD_CLASS_PROP_NAME, HoodieAvroPayload.class.getName());
|
||||
return HoodieTableMetaClient.initializePathAsHoodieDataset(hadoopConf, basePath, properties);
|
||||
return HoodieTableMetaClient.initDatasetAndGetMetaClient(hadoopConf, basePath, properties);
|
||||
}
|
||||
|
||||
public static String makeNewCommitTime() {
|
||||
|
||||
@@ -113,7 +113,7 @@ public class HoodieLogFormatTest {
|
||||
assertTrue(fs.mkdirs(new Path(folder.getRoot().getPath())));
|
||||
this.partitionPath = new Path(folder.getRoot().getPath());
|
||||
this.basePath = folder.getRoot().getParent();
|
||||
HoodieTestUtils.initTableType(MiniClusterUtil.configuration, basePath, HoodieTableType.MERGE_ON_READ);
|
||||
HoodieTestUtils.init(MiniClusterUtil.configuration, basePath, HoodieTableType.MERGE_ON_READ);
|
||||
}
|
||||
|
||||
@After
|
||||
|
||||
@@ -65,7 +65,7 @@ public class TestCompactionUtils {
|
||||
|
||||
@Before
|
||||
public void init() throws IOException {
|
||||
metaClient = HoodieTestUtils.initTableType(getDefaultHadoopConf(),
|
||||
metaClient = HoodieTestUtils.init(getDefaultHadoopConf(),
|
||||
tmpFolder.getRoot().getAbsolutePath(), HoodieTableType.MERGE_ON_READ);
|
||||
basePath = metaClient.getBasePath();
|
||||
}
|
||||
|
||||
@@ -172,7 +172,7 @@ public class HoodieRealtimeRecordReaderTest {
|
||||
public void testReader(boolean partitioned) throws Exception {
|
||||
// initial commit
|
||||
Schema schema = HoodieAvroUtils.addMetadataFields(SchemaTestUtil.getEvolvedSchema());
|
||||
HoodieTestUtils.initTableType(hadoopConf, basePath.getRoot().getAbsolutePath(),
|
||||
HoodieTestUtils.init(hadoopConf, basePath.getRoot().getAbsolutePath(),
|
||||
HoodieTableType.MERGE_ON_READ);
|
||||
String baseInstant = "100";
|
||||
File partitionDir =
|
||||
@@ -263,7 +263,7 @@ public class HoodieRealtimeRecordReaderTest {
|
||||
public void testUnMergedReader() throws Exception {
|
||||
// initial commit
|
||||
Schema schema = HoodieAvroUtils.addMetadataFields(SchemaTestUtil.getEvolvedSchema());
|
||||
HoodieTestUtils.initTableType(hadoopConf, basePath.getRoot().getAbsolutePath(),
|
||||
HoodieTestUtils.init(hadoopConf, basePath.getRoot().getAbsolutePath(),
|
||||
HoodieTableType.MERGE_ON_READ);
|
||||
String commitTime = "100";
|
||||
final int numRecords = 1000;
|
||||
@@ -347,7 +347,7 @@ public class HoodieRealtimeRecordReaderTest {
|
||||
public void testReaderWithNestedAndComplexSchema() throws Exception {
|
||||
// initial commit
|
||||
Schema schema = HoodieAvroUtils.addMetadataFields(SchemaTestUtil.getComplexEvolvedSchema());
|
||||
HoodieTestUtils.initTableType(hadoopConf, basePath.getRoot().getAbsolutePath(),
|
||||
HoodieTestUtils.init(hadoopConf, basePath.getRoot().getAbsolutePath(),
|
||||
HoodieTableType.MERGE_ON_READ);
|
||||
String commitTime = "100";
|
||||
int numberOfRecords = 100;
|
||||
@@ -489,7 +489,7 @@ public class HoodieRealtimeRecordReaderTest {
|
||||
// initial commit
|
||||
List<String> logFilePaths = new ArrayList<>();
|
||||
Schema schema = HoodieAvroUtils.addMetadataFields(SchemaTestUtil.getSimpleSchema());
|
||||
HoodieTestUtils.initTableType(hadoopConf, basePath.getRoot().getAbsolutePath(),
|
||||
HoodieTestUtils.init(hadoopConf, basePath.getRoot().getAbsolutePath(),
|
||||
HoodieTableType.MERGE_ON_READ);
|
||||
String commitTime = "100";
|
||||
int numberOfRecords = 100;
|
||||
|
||||
@@ -131,7 +131,7 @@ public class HDFSParquetImporter implements Serializable {
|
||||
properties.put(HoodieTableConfig.HOODIE_TABLE_NAME_PROP_NAME, cfg.tableName);
|
||||
properties.put(HoodieTableConfig.HOODIE_TABLE_TYPE_PROP_NAME, cfg.tableType);
|
||||
HoodieTableMetaClient
|
||||
.initializePathAsHoodieDataset(jsc.hadoopConfiguration(), cfg.targetPath, properties);
|
||||
.initDatasetAndGetMetaClient(jsc.hadoopConfiguration(), cfg.targetPath, properties);
|
||||
|
||||
HoodieWriteClient client = UtilHelpers.createHoodieClient(jsc, cfg.targetPath, schemaStr,
|
||||
cfg.parallelism, Option.empty(), props);
|
||||
|
||||
Reference in New Issue
Block a user