1
0

[HUDI-1557] Make Flink write pipeline write task scalable (#2506)

This is the #step 2 of RFC-24:
https://cwiki.apache.org/confluence/display/HUDI/RFC+-+24%3A+Hoodie+Flink+Writer+Proposal

This PR introduce a BucketAssigner that assigns bucket ID (partition
path & fileID) for each stream record.

There is no need to look up index and partition the records anymore in
the following pipeline for these records,
we actually decide the write target location before the write and each
record computes its location when the BucketAssigner receives it, thus,
the indexing is with streaming style.

Computing locations for a batch of records all at a time is resource
consuming so a pressure to the engine,
we should avoid that in streaming system.
This commit is contained in:
Danny Chan
2021-02-06 22:03:52 +08:00
committed by GitHub
parent 291f92069e
commit 4c5b6923cc
30 changed files with 1435 additions and 393 deletions

View File

@@ -44,7 +44,13 @@ public class WorkloadStat implements Serializable {
}
public long addUpdates(HoodieRecordLocation location, long numUpdates) {
updateLocationToCount.put(location.getFileId(), Pair.of(location.getInstantTime(), numUpdates));
long accNumUpdates = 0;
if (updateLocationToCount.containsKey(location.getFileId())) {
accNumUpdates = updateLocationToCount.get(location.getFileId()).getRight();
}
updateLocationToCount.put(
location.getFileId(),
Pair.of(location.getInstantTime(), numUpdates + accNumUpdates));
return this.numUpdates += numUpdates;
}

View File

@@ -19,6 +19,7 @@
package org.apache.hudi.table.action.commit;
import java.io.Serializable;
import java.util.Objects;
/**
* Helper class for a bucket's type (INSERT and UPDATE) and its file location.
@@ -29,6 +30,24 @@ public class BucketInfo implements Serializable {
String fileIdPrefix;
String partitionPath;
public BucketInfo(BucketType bucketType, String fileIdPrefix, String partitionPath) {
this.bucketType = bucketType;
this.fileIdPrefix = fileIdPrefix;
this.partitionPath = partitionPath;
}
public BucketType getBucketType() {
return bucketType;
}
public String getFileIdPrefix() {
return fileIdPrefix;
}
public String getPartitionPath() {
return partitionPath;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("BucketInfo {");
@@ -38,4 +57,23 @@ public class BucketInfo implements Serializable {
sb.append('}');
return sb.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
BucketInfo that = (BucketInfo) o;
return bucketType == that.bucketType
&& fileIdPrefix.equals(that.fileIdPrefix)
&& partitionPath.equals(that.partitionPath);
}
@Override
public int hashCode() {
return Objects.hash(bucketType, fileIdPrefix, partitionPath);
}
}