1
0

[HUDI-2798] Fix flink query operation fields (#4041)

This commit is contained in:
Danny Chan
2021-11-19 23:39:37 +08:00
committed by GitHub
parent 7a00f867ae
commit bf008762df
5 changed files with 77 additions and 39 deletions

View File

@@ -250,11 +250,17 @@ public class HoodieMergeHandle<T extends HoodieRecordPayload, I, K, O> extends H
+ ((ExternalSpillableMap) keyToNewRecords).getSizeOfFileOnDiskInBytes());
}
private boolean writeUpdateRecord(HoodieRecord<T> hoodieRecord, Option<IndexedRecord> indexedRecord) {
private boolean writeUpdateRecord(HoodieRecord<T> hoodieRecord, GenericRecord oldRecord, Option<IndexedRecord> indexedRecord) {
boolean isDelete = false;
if (indexedRecord.isPresent()) {
updatedRecordsWritten++;
GenericRecord record = (GenericRecord) indexedRecord.get();
if (oldRecord != record) {
// the incoming record is chosen
isDelete = HoodieOperation.isDelete(hoodieRecord.getOperation());
}
}
return writeRecord(hoodieRecord, indexedRecord);
return writeRecord(hoodieRecord, indexedRecord, isDelete);
}
protected void writeInsertRecord(HoodieRecord<T> hoodieRecord) throws IOException {
@@ -264,12 +270,16 @@ public class HoodieMergeHandle<T extends HoodieRecordPayload, I, K, O> extends H
if (insertRecord.isPresent() && insertRecord.get().equals(IGNORE_RECORD)) {
return;
}
if (writeRecord(hoodieRecord, insertRecord)) {
if (writeRecord(hoodieRecord, insertRecord, HoodieOperation.isDelete(hoodieRecord.getOperation()))) {
insertRecordsWritten++;
}
}
protected boolean writeRecord(HoodieRecord<T> hoodieRecord, Option<IndexedRecord> indexedRecord) {
return writeRecord(hoodieRecord, indexedRecord, false);
}
protected boolean writeRecord(HoodieRecord<T> hoodieRecord, Option<IndexedRecord> indexedRecord, boolean isDelete) {
Option recordMetadata = hoodieRecord.getData().getMetadata();
if (!partitionPath.equals(hoodieRecord.getPartitionPath())) {
HoodieUpsertException failureEx = new HoodieUpsertException("mismatched partition path, record partition: "
@@ -277,11 +287,8 @@ public class HoodieMergeHandle<T extends HoodieRecordPayload, I, K, O> extends H
writeStatus.markFailure(hoodieRecord, failureEx, recordMetadata);
return false;
}
if (HoodieOperation.isDelete(hoodieRecord.getOperation())) {
indexedRecord = Option.empty();
}
try {
if (indexedRecord.isPresent()) {
if (indexedRecord.isPresent() && !isDelete) {
// Convert GenericRecord to GenericRecord with hoodie commit metadata in schema
IndexedRecord recordWithMetadataInSchema = rewriteRecord((GenericRecord) indexedRecord.get());
fileWriter.writeAvroWithMetadata(recordWithMetadataInSchema, hoodieRecord);
@@ -321,7 +328,7 @@ public class HoodieMergeHandle<T extends HoodieRecordPayload, I, K, O> extends H
if (combinedAvroRecord.isPresent() && combinedAvroRecord.get().equals(IGNORE_RECORD)) {
// If it is an IGNORE_RECORD, just copy the old record, and do not update the new record.
copyOldRecord = true;
} else if (writeUpdateRecord(hoodieRecord, combinedAvroRecord)) {
} else if (writeUpdateRecord(hoodieRecord, oldRecord, combinedAvroRecord)) {
/*
* ONLY WHEN 1) we have an update for this key AND 2) We are able to successfully
* write the the combined new

View File

@@ -179,6 +179,7 @@ public abstract class HoodieCompactor<T extends HoodieRecordPayload, I, K, O> im
.withSpillableMapBasePath(config.getSpillableMapBasePath())
.withDiskMapType(config.getCommonConfig().getSpillableDiskMapType())
.withBitCaskDiskMapCompressionEnabled(config.getCommonConfig().isBitCaskDiskMapCompressionEnabled())
.withOperationField(config.allowOperationMetadataField())
.build();
if (!scanner.iterator().hasNext()) {
scanner.close();