[HUDI-2472] Enabling metadata table for TestHoodieIndex test case (#4045)
- Enablng the metadata table for testSimpleGlobalIndexTagLocationWhenShouldUpdatePartitionPath. This is more of a test issue.
This commit is contained in:
committed by
GitHub
parent
a2c91a7a9b
commit
7f3b89fad7
@@ -100,6 +100,12 @@ public class HoodieWriteableTestTable extends HoodieMetadataTestTable {
|
||||
FileCreateUtils.createPartitionMetaFile(basePath, partition);
|
||||
String fileName = baseFileName(currentInstantTime, fileId);
|
||||
|
||||
Path baseFilePath = new Path(Paths.get(basePath, partition, fileName).toString());
|
||||
if (this.fs.exists(baseFilePath)) {
|
||||
LOG.warn("Deleting the existing base file " + baseFilePath);
|
||||
this.fs.delete(baseFilePath, true);
|
||||
}
|
||||
|
||||
if (HoodieTableConfig.BASE_FILE_FORMAT.defaultValue().equals(HoodieFileFormat.PARQUET)) {
|
||||
HoodieAvroWriteSupport writeSupport = new HoodieAvroWriteSupport(
|
||||
new AvroSchemaConverter().convert(schema), schema, Option.of(filter));
|
||||
|
||||
@@ -25,6 +25,7 @@ import org.apache.hudi.common.model.EmptyHoodieRecordPayload;
|
||||
import org.apache.hudi.common.model.HoodieKey;
|
||||
import org.apache.hudi.common.model.HoodieRecord;
|
||||
import org.apache.hudi.common.model.HoodieTableType;
|
||||
import org.apache.hudi.common.model.WriteOperationType;
|
||||
import org.apache.hudi.common.table.HoodieTableMetaClient;
|
||||
import org.apache.hudi.common.table.view.FileSystemViewStorageConfig;
|
||||
import org.apache.hudi.common.table.view.FileSystemViewStorageType;
|
||||
@@ -39,6 +40,8 @@ import org.apache.hudi.config.HoodieStorageConfig;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
import org.apache.hudi.index.HoodieIndex;
|
||||
import org.apache.hudi.index.HoodieIndex.IndexType;
|
||||
import org.apache.hudi.metadata.HoodieTableMetadataWriter;
|
||||
import org.apache.hudi.metadata.SparkHoodieBackedTableMetadataWriter;
|
||||
import org.apache.hudi.table.HoodieSparkTable;
|
||||
import org.apache.hudi.table.HoodieTable;
|
||||
import org.apache.hudi.testutils.Assertions;
|
||||
@@ -374,11 +377,18 @@ public class TestHoodieIndex extends HoodieClientTestHarness {
|
||||
.withGlobalSimpleIndexUpdatePartitionPath(true)
|
||||
.withBloomIndexUpdatePartitionPath(true)
|
||||
.build())
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(false).build()).build();
|
||||
.withMetadataConfig(
|
||||
HoodieMetadataConfig.newBuilder().enable(true).build())
|
||||
.build();
|
||||
writeClient = getHoodieWriteClient(config);
|
||||
index = writeClient.getIndex();
|
||||
|
||||
HoodieTable hoodieTable = HoodieSparkTable.create(config, context, metaClient);
|
||||
HoodieSparkWriteableTestTable testTable = HoodieSparkWriteableTestTable.of(hoodieTable, SCHEMA);
|
||||
HoodieTableMetadataWriter metadataWriter = SparkHoodieBackedTableMetadataWriter.create(
|
||||
writeClient.getEngineContext().getHadoopConf().get(), config, writeClient.getEngineContext());
|
||||
HoodieSparkWriteableTestTable testTable = HoodieSparkWriteableTestTable.of(hoodieTable.getMetaClient(),
|
||||
SCHEMA, metadataWriter);
|
||||
|
||||
final String p1 = "2016/01/31";
|
||||
final String p2 = "2016/02/28";
|
||||
|
||||
@@ -415,8 +425,14 @@ public class TestHoodieIndex extends HoodieClientTestHarness {
|
||||
new HoodieKey(incomingPayloadSamePartition.getRowKey(), incomingPayloadSamePartition.getPartitionPath()),
|
||||
incomingPayloadSamePartition);
|
||||
|
||||
final String file1P1C0 = UUID.randomUUID().toString();
|
||||
Map<String, List<Pair<String, Integer>>> c1PartitionToFilesNameLengthMap = new HashMap<>();
|
||||
c1PartitionToFilesNameLengthMap.put(p1, Collections.singletonList(Pair.of(file1P1C0, 100)));
|
||||
testTable.doWriteOperation("1000", WriteOperationType.INSERT, Arrays.asList(p1),
|
||||
c1PartitionToFilesNameLengthMap, false, false);
|
||||
|
||||
// We have some records to be tagged (two different partitions)
|
||||
testTable.addCommit("1000").getFileIdWithInserts(p1, originalRecord);
|
||||
testTable.withInserts(p1, file1P1C0, originalRecord);
|
||||
|
||||
// test against incoming record with a different partition
|
||||
JavaRDD<HoodieRecord> recordRDD = jsc.parallelize(Collections.singletonList(incomingRecord));
|
||||
|
||||
Reference in New Issue
Block a user