1
0

[HUDI-1400] Replace Operation enum with WriteOperationType (#2259)

This commit is contained in:
wangxianghu
2020-11-19 13:40:04 +08:00
committed by GitHub
parent 4d05680038
commit a23230c8c2
6 changed files with 50 additions and 50 deletions

View File

@@ -20,6 +20,7 @@ package org.apache.hudi.integ.testsuite;
import org.apache.hudi.client.WriteStatus;
import org.apache.hudi.common.model.HoodieRecord;
import org.apache.hudi.common.model.WriteOperationType;
import org.apache.hudi.common.util.collection.Pair;
import org.apache.hudi.utilities.deltastreamer.DeltaSync;
import org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer;
@@ -38,30 +39,30 @@ public class HoodieDeltaStreamerWrapper extends HoodieDeltaStreamer {
super(cfg, jssc);
}
public JavaRDD<WriteStatus> upsert(Operation operation) throws Exception {
public JavaRDD<WriteStatus> upsert(WriteOperationType operation) throws Exception {
cfg.operation = operation;
return deltaSyncService.get().getDeltaSync().syncOnce().getRight();
}
public JavaRDD<WriteStatus> insert() throws Exception {
return upsert(Operation.INSERT);
return upsert(WriteOperationType.INSERT);
}
public JavaRDD<WriteStatus> bulkInsert() throws
Exception {
return upsert(Operation.BULK_INSERT);
return upsert(WriteOperationType.BULK_INSERT);
}
public void scheduleCompact() throws Exception {
// Since we don't support scheduleCompact() operation in delta-streamer, assume upsert without any data that will
// trigger scheduling compaction
upsert(Operation.UPSERT);
upsert(WriteOperationType.UPSERT);
}
public JavaRDD<WriteStatus> compact() throws Exception {
// Since we don't support compact() operation in delta-streamer, assume upsert without any data that will trigger
// inline compaction
return upsert(Operation.UPSERT);
return upsert(WriteOperationType.UPSERT);
}
public Pair<SchemaProvider, Pair<String, JavaRDD<HoodieRecord>>> fetchSource() throws Exception {

View File

@@ -25,6 +25,7 @@ import org.apache.hudi.client.SparkRDDWriteClient;
import org.apache.hudi.client.WriteStatus;
import org.apache.hudi.client.common.HoodieSparkEngineContext;
import org.apache.hudi.common.model.HoodieRecord;
import org.apache.hudi.common.model.WriteOperationType;
import org.apache.hudi.common.table.timeline.HoodieActiveTimeline;
import org.apache.hudi.common.util.Option;
import org.apache.hudi.common.util.collection.Pair;
@@ -38,7 +39,6 @@ import org.apache.hudi.integ.testsuite.dag.nodes.DagNode;
import org.apache.hudi.integ.testsuite.dag.nodes.RollbackNode;
import org.apache.hudi.integ.testsuite.dag.nodes.ScheduleCompactNode;
import org.apache.hudi.integ.testsuite.writer.DeltaWriteStats;
import org.apache.hudi.utilities.deltastreamer.HoodieDeltaStreamer.Operation;
import org.apache.hudi.utilities.schema.SchemaProvider;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
@@ -126,7 +126,7 @@ public class HoodieTestSuiteWriter {
public JavaRDD<WriteStatus> upsert(Option<String> instantTime) throws Exception {
if (cfg.useDeltaStreamer) {
return deltaStreamerWrapper.upsert(Operation.UPSERT);
return deltaStreamerWrapper.upsert(WriteOperationType.UPSERT);
} else {
Pair<SchemaProvider, Pair<String, JavaRDD<HoodieRecord>>> nextBatch = fetchSource();
lastCheckpoint = Option.of(nextBatch.getValue().getLeft());