HUDI-131 Zero FIle Listing in Compactor run
This commit is contained in:
committed by
vinoth chandar
parent
4074c5eb23
commit
99b0c72aa6
@@ -126,14 +126,13 @@ public class HoodieRealtimeTableCompactor implements HoodieCompactor {
|
||||
return Lists.<WriteStatus>newArrayList();
|
||||
}
|
||||
|
||||
Option<HoodieDataFile> oldDataFileOpt = hoodieCopyOnWriteTable.getROFileSystemView()
|
||||
.getDataFileOn(operation.getPartitionPath(), operation.getBaseInstantTime(), operation.getFileId());
|
||||
Option<HoodieDataFile> oldDataFileOpt = operation.getBaseFile();
|
||||
|
||||
// Compacting is very similar to applying updates to existing file
|
||||
Iterator<List<WriteStatus>> result;
|
||||
// If the dataFile is present, there is a base parquet file present, perform updates else perform inserts into a
|
||||
// new base parquet file.
|
||||
if (operation.getDataFilePath().isPresent()) {
|
||||
if (oldDataFileOpt.isPresent()) {
|
||||
result = hoodieCopyOnWriteTable
|
||||
.handleUpdate(commitTime, operation.getFileId(), scanner.getRecords(), oldDataFileOpt.get());
|
||||
} else {
|
||||
|
||||
@@ -110,6 +110,11 @@ public class CompactionOperation implements Serializable {
|
||||
return id;
|
||||
}
|
||||
|
||||
public Option<HoodieDataFile> getBaseFile() {
|
||||
//TODO: HUDI-130 - Paths return in compaction plan needs to be relative to base-path
|
||||
return dataFilePath.map(df -> new HoodieDataFile(df));
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert Avro generated Compaction operation to POJO for Spark RDD operation
|
||||
* @param operation Hoodie Compaction Operation
|
||||
|
||||
Reference in New Issue
Block a user