diff --git a/hudi-client/src/main/java/org/apache/hudi/table/HoodieCopyOnWriteTable.java b/hudi-client/src/main/java/org/apache/hudi/table/HoodieCopyOnWriteTable.java index 974d847c0..64b3bf0c0 100644 --- a/hudi-client/src/main/java/org/apache/hudi/table/HoodieCopyOnWriteTable.java +++ b/hudi-client/src/main/java/org/apache/hudi/table/HoodieCopyOnWriteTable.java @@ -31,7 +31,6 @@ import org.apache.hudi.client.utils.ParquetReaderIterator; import org.apache.hudi.common.model.HoodieBaseFile; import org.apache.hudi.common.model.HoodieKey; import org.apache.hudi.common.model.HoodieRecord; -import org.apache.hudi.common.model.HoodieRecordLocation; import org.apache.hudi.common.model.HoodieRecordPayload; import org.apache.hudi.common.table.HoodieTableMetaClient; import org.apache.hudi.common.table.timeline.HoodieInstant; @@ -66,7 +65,6 @@ import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaSparkContext; import java.io.IOException; -import java.io.Serializable; import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -75,7 +73,6 @@ import java.util.Map; /** * Implementation of a very heavily read-optimized Hoodie Table where, all data is stored in base files, with * zero read amplification. - * *

* INSERTS - Produce new files, block aligned to desired size (or) Merge with the smallest existing file, to expand it *

@@ -207,14 +204,11 @@ public class HoodieCopyOnWriteTable extends Hoodi return new SavepointActionExecutor(jsc, config, this, instantToSavepoint, user, comment).execute(); } + @Override public HoodieRestoreMetadata restore(JavaSparkContext jsc, String restoreInstantTime, String instantToRestore) { return new CopyOnWriteRestoreActionExecutor(jsc, config, this, restoreInstantTime, instantToRestore).execute(); } - enum BucketType { - UPDATE, INSERT - } - /** * Consumer that dequeues records from queue and sends to Merge Handle. */ @@ -240,61 +234,4 @@ public class HoodieCopyOnWriteTable extends Hoodi } } - /** - * Helper class for a small file's location and its actual size on disk. - */ - static class SmallFile implements Serializable { - - HoodieRecordLocation location; - long sizeBytes; - - @Override - public String toString() { - final StringBuilder sb = new StringBuilder("SmallFile {"); - sb.append("location=").append(location).append(", "); - sb.append("sizeBytes=").append(sizeBytes); - sb.append('}'); - return sb.toString(); - } - } - - /** - * Helper class for an insert bucket along with the weight [0.0, 1.0] that defines the amount of incoming inserts that - * should be allocated to the bucket. - */ - class InsertBucket implements Serializable { - - int bucketNumber; - // fraction of total inserts, that should go into this bucket - double weight; - - @Override - public String toString() { - final StringBuilder sb = new StringBuilder("WorkloadStat {"); - sb.append("bucketNumber=").append(bucketNumber).append(", "); - sb.append("weight=").append(weight); - sb.append('}'); - return sb.toString(); - } - } - - /** - * Helper class for a bucket's type (INSERT and UPDATE) and its file location. - */ - class BucketInfo implements Serializable { - - BucketType bucketType; - String fileIdPrefix; - String partitionPath; - - @Override - public String toString() { - final StringBuilder sb = new StringBuilder("BucketInfo {"); - sb.append("bucketType=").append(bucketType).append(", "); - sb.append("fileIdPrefix=").append(fileIdPrefix).append(", "); - sb.append("partitionPath=").append(partitionPath); - sb.append('}'); - return sb.toString(); - } - } } diff --git a/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java b/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java index 65981c2db..8496ea488 100644 --- a/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java +++ b/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java @@ -43,8 +43,6 @@ import org.apache.hudi.table.action.deltacommit.UpsertPreppedDeltaCommitActionEx import org.apache.hudi.table.action.compact.ScheduleCompactionActionExecutor; import org.apache.hudi.table.action.restore.MergeOnReadRestoreActionExecutor; import org.apache.hudi.table.action.rollback.MergeOnReadRollbackActionExecutor; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaSparkContext; @@ -69,8 +67,6 @@ import java.util.Map; */ public class HoodieMergeOnReadTable extends HoodieCopyOnWriteTable { - private static final Logger LOG = LogManager.getLogger(HoodieMergeOnReadTable.class); - HoodieMergeOnReadTable(HoodieWriteConfig config, Configuration hadoopConf, HoodieTableMetaClient metaClient) { super(config, hadoopConf, metaClient); } @@ -137,6 +133,7 @@ public class HoodieMergeOnReadTable extends Hoodi return new MergeOnReadRollbackActionExecutor(jsc, config, this, rollbackInstantTime, commitInstant, deleteInstants).execute(); } + @Override public HoodieRestoreMetadata restore(JavaSparkContext jsc, String restoreInstantTime, String instantToRestore) { return new MergeOnReadRestoreActionExecutor(jsc, config, this, restoreInstantTime, instantToRestore).execute(); }