Use hadoopConf in HoodieTableMetaClient and related tests
This commit is contained in:
@@ -105,7 +105,7 @@ public class TestHoodieClientOnCopyOnWriteStorage implements Serializable {
|
||||
folder.create();
|
||||
basePath = folder.getRoot().getAbsolutePath();
|
||||
fs = FSUtils.getFs(basePath.toString(), jsc.hadoopConfiguration());
|
||||
HoodieTestUtils.init(fs, basePath);
|
||||
HoodieTestUtils.init(jsc.hadoopConfiguration(), basePath);
|
||||
dataGen = new HoodieTestDataGenerator();
|
||||
}
|
||||
|
||||
@@ -1258,7 +1258,7 @@ public class TestHoodieClientOnCopyOnWriteStorage implements Serializable {
|
||||
.retainFileVersions(1).build()).build();
|
||||
|
||||
HoodieTableMetaClient metaClient = HoodieTestUtils
|
||||
.initTableType(fs, basePath, HoodieTableType.MERGE_ON_READ);
|
||||
.initTableType(jsc.hadoopConfiguration(), basePath, HoodieTableType.MERGE_ON_READ);
|
||||
|
||||
// Make 3 files, one base file and 2 log files associated with base file
|
||||
String file1P0 = HoodieTestUtils.createNewDataFile(basePath, partitionPaths[0], "000");
|
||||
|
||||
@@ -103,9 +103,8 @@ public class TestMultiFS implements Serializable {
|
||||
HoodieTestDataGenerator dataGen = new HoodieTestDataGenerator();
|
||||
|
||||
// Initialize table and filesystem
|
||||
FileSystem hdfs = FSUtils.getFs(dfsBasePath, jsc.hadoopConfiguration());
|
||||
HoodieTableMetaClient
|
||||
.initTableType(hdfs, dfsBasePath, HoodieTableType.valueOf(tableType), tableName,
|
||||
.initTableType(jsc.hadoopConfiguration(), dfsBasePath, HoodieTableType.valueOf(tableType), tableName,
|
||||
HoodieAvroPayload.class.getName());
|
||||
|
||||
//Create write client to write some records in
|
||||
@@ -133,9 +132,8 @@ public class TestMultiFS implements Serializable {
|
||||
assertEquals("Should contain 100 records", readRecords.count(), records.size());
|
||||
|
||||
// Write to local
|
||||
FileSystem local = FSUtils.getFs(tablePath, jsc.hadoopConfiguration());
|
||||
HoodieTableMetaClient
|
||||
.initTableType(local, tablePath, HoodieTableType.valueOf(tableType), tableName,
|
||||
.initTableType(jsc.hadoopConfiguration(), tablePath, HoodieTableType.valueOf(tableType), tableName,
|
||||
HoodieAvroPayload.class.getName());
|
||||
HoodieWriteConfig localConfig = HoodieWriteConfig.newBuilder().withPath(tablePath)
|
||||
.withSchema(HoodieTestDataGenerator.TRIP_EXAMPLE_SCHEMA).withParallelism(2, 2)
|
||||
|
||||
@@ -48,7 +48,7 @@ public class TestUpdateMapFunction {
|
||||
TemporaryFolder folder = new TemporaryFolder();
|
||||
folder.create();
|
||||
this.basePath = folder.getRoot().getAbsolutePath();
|
||||
HoodieTestUtils.init(FSUtils.getFs(basePath, HoodieTestUtils.getDefaultHadoopConf()), basePath);
|
||||
HoodieTestUtils.init(HoodieTestUtils.getDefaultHadoopConf(), basePath);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
||||
@@ -112,7 +112,7 @@ public class TestHbaseIndex {
|
||||
basePath = folder.getRoot().getAbsolutePath();
|
||||
// Initialize table
|
||||
metaClient = HoodieTableMetaClient
|
||||
.initTableType(utility.getTestFileSystem(), basePath, HoodieTableType.COPY_ON_WRITE,
|
||||
.initTableType(utility.getConfiguration(), basePath, HoodieTableType.COPY_ON_WRITE,
|
||||
tableName, HoodieTableConfig.DEFAULT_PAYLOAD_CLASS);
|
||||
}
|
||||
|
||||
|
||||
@@ -88,7 +88,7 @@ public class TestHoodieBloomIndex {
|
||||
folder.create();
|
||||
basePath = folder.getRoot().getAbsolutePath();
|
||||
fs = FSUtils.getFs(basePath, jsc.hadoopConfiguration());
|
||||
HoodieTestUtils.init(fs, basePath);
|
||||
HoodieTestUtils.init(jsc.hadoopConfiguration(), basePath);
|
||||
// We have some records to be tagged (two different partitions)
|
||||
schemaStr = IOUtils.toString(getClass().getResourceAsStream("/exampleSchema.txt"), "UTF-8");
|
||||
schema = HoodieAvroUtils.addMetadataFields(new Schema.Parser().parse(schemaStr));
|
||||
|
||||
@@ -39,6 +39,7 @@ import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.avro.generic.GenericRecord;
|
||||
import org.apache.avro.generic.IndexedRecord;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.junit.Before;
|
||||
@@ -49,14 +50,16 @@ public class TestHoodieCommitArchiveLog {
|
||||
|
||||
private String basePath;
|
||||
private FileSystem fs;
|
||||
private Configuration hadoopConf;
|
||||
|
||||
@Before
|
||||
public void init() throws Exception {
|
||||
TemporaryFolder folder = new TemporaryFolder();
|
||||
folder.create();
|
||||
basePath = folder.getRoot().getAbsolutePath();
|
||||
fs = FSUtils.getFs(basePath, HoodieTestUtils.getDefaultHadoopConf());
|
||||
HoodieTestUtils.init(fs, basePath);
|
||||
hadoopConf = HoodieTestUtils.getDefaultHadoopConf();
|
||||
fs = FSUtils.getFs(basePath, hadoopConf);
|
||||
HoodieTestUtils.init(hadoopConf, basePath);
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -76,7 +79,7 @@ public class TestHoodieCommitArchiveLog {
|
||||
.withSchema(HoodieTestDataGenerator.TRIP_EXAMPLE_SCHEMA).withParallelism(2, 2)
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().archiveCommitsWith(2, 4).build())
|
||||
.forTable("test-trip-table").build();
|
||||
HoodieTestUtils.init(fs, basePath);
|
||||
HoodieTestUtils.init(hadoopConf, basePath);
|
||||
HoodieTestDataGenerator.createCommitFile(basePath, "100");
|
||||
HoodieTestDataGenerator.createCommitFile(basePath, "101");
|
||||
HoodieTestDataGenerator.createCommitFile(basePath, "102");
|
||||
|
||||
@@ -43,6 +43,7 @@ import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.spark.api.java.JavaRDD;
|
||||
import org.apache.spark.api.java.JavaSparkContext;
|
||||
@@ -58,6 +59,7 @@ public class TestHoodieCompactor {
|
||||
private HoodieCompactor compactor;
|
||||
private transient HoodieTestDataGenerator dataGen = null;
|
||||
private transient FileSystem fs;
|
||||
private Configuration hadoopConf;
|
||||
|
||||
@Before
|
||||
public void init() throws IOException {
|
||||
@@ -68,8 +70,9 @@ public class TestHoodieCompactor {
|
||||
TemporaryFolder folder = new TemporaryFolder();
|
||||
folder.create();
|
||||
basePath = folder.getRoot().getAbsolutePath();
|
||||
fs = FSUtils.getFs(basePath, HoodieTestUtils.getDefaultHadoopConf());
|
||||
HoodieTestUtils.initTableType(fs, basePath, HoodieTableType.MERGE_ON_READ);
|
||||
hadoopConf = HoodieTestUtils.getDefaultHadoopConf();
|
||||
fs = FSUtils.getFs(basePath, hadoopConf);
|
||||
HoodieTestUtils.initTableType(hadoopConf, basePath, HoodieTableType.MERGE_ON_READ);
|
||||
|
||||
dataGen = new HoodieTestDataGenerator();
|
||||
compactor = new HoodieRealtimeTableCompactor();
|
||||
@@ -102,7 +105,7 @@ public class TestHoodieCompactor {
|
||||
|
||||
@Test(expected = IllegalArgumentException.class)
|
||||
public void testCompactionOnCopyOnWriteFail() throws Exception {
|
||||
HoodieTestUtils.initTableType(fs, basePath, HoodieTableType.COPY_ON_WRITE);
|
||||
HoodieTestUtils.initTableType(hadoopConf, basePath, HoodieTableType.COPY_ON_WRITE);
|
||||
HoodieTableMetaClient metaClient = new HoodieTableMetaClient(jsc.hadoopConfiguration(),
|
||||
basePath);
|
||||
HoodieTable table = HoodieTable.getHoodieTable(metaClient, getConfig());
|
||||
|
||||
@@ -76,7 +76,7 @@ public class TestCopyOnWriteTable {
|
||||
TemporaryFolder folder = new TemporaryFolder();
|
||||
folder.create();
|
||||
this.basePath = folder.getRoot().getAbsolutePath();
|
||||
HoodieTestUtils.init(FSUtils.getFs(basePath, jsc.hadoopConfiguration()), basePath);
|
||||
HoodieTestUtils.init(jsc.hadoopConfiguration(), basePath);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
||||
@@ -121,7 +121,7 @@ public class TestMergeOnReadTable {
|
||||
jsc.hadoopConfiguration().addResource(dfs.getConf());
|
||||
|
||||
dfs.mkdirs(new Path(basePath));
|
||||
HoodieTestUtils.initTableType(dfs, basePath, HoodieTableType.MERGE_ON_READ);
|
||||
HoodieTestUtils.initTableType(jsc.hadoopConfiguration(), basePath, HoodieTableType.MERGE_ON_READ);
|
||||
|
||||
sqlContext = new SQLContext(jsc); // SQLContext stuff
|
||||
}
|
||||
@@ -346,7 +346,7 @@ public class TestMergeOnReadTable {
|
||||
public void testCOWToMORConvertedDatasetRollback() throws Exception {
|
||||
|
||||
//Set TableType to COW
|
||||
HoodieTestUtils.initTableType(dfs, basePath, HoodieTableType.COPY_ON_WRITE);
|
||||
HoodieTestUtils.initTableType(jsc.hadoopConfiguration(), basePath, HoodieTableType.COPY_ON_WRITE);
|
||||
|
||||
HoodieWriteConfig cfg = getConfig(true);
|
||||
HoodieWriteClient client = new HoodieWriteClient(jsc, cfg);
|
||||
@@ -385,7 +385,7 @@ public class TestMergeOnReadTable {
|
||||
assertNoWriteErrors(statuses);
|
||||
|
||||
//Set TableType to MOR
|
||||
HoodieTestUtils.initTableType(dfs, basePath, HoodieTableType.MERGE_ON_READ);
|
||||
HoodieTestUtils.initTableType(jsc.hadoopConfiguration(), basePath, HoodieTableType.MERGE_ON_READ);
|
||||
|
||||
//rollback a COW commit when TableType is MOR
|
||||
client.rollback(newCommitTime);
|
||||
|
||||
Reference in New Issue
Block a user