1
0

[HUDI-2596] Make class names consistent in hudi-client (#4680)

This commit is contained in:
Raymond Xu
2022-01-27 17:05:08 -08:00
committed by GitHub
parent 4a9f826382
commit 0bd38f26ca
68 changed files with 216 additions and 175 deletions

View File

@@ -78,7 +78,7 @@ import java.util.stream.Collectors;
@SuppressWarnings("checkstyle:LineLength")
public class HoodieFlinkWriteClient<T extends HoodieRecordPayload> extends
AbstractHoodieWriteClient<T, List<HoodieRecord<T>>, List<HoodieKey>, List<WriteStatus>> {
BaseHoodieWriteClient<T, List<HoodieRecord<T>>, List<HoodieKey>, List<WriteStatus>> {
private static final Logger LOG = LoggerFactory.getLogger(HoodieFlinkWriteClient.class);

View File

@@ -132,7 +132,7 @@ public class FlinkHoodieBackedTableMetadataWriter extends HoodieBackedTableMetad
throw new HoodieMetadataException("Failed to commit metadata table records at instant " + instantTime);
}
});
// flink does not support auto-commit yet, also the auto commit logic is not complete as AbstractHoodieWriteClient now.
// flink does not support auto-commit yet, also the auto commit logic is not complete as BaseHoodieWriteClient now.
writeClient.commit(instantTime, statuses, Option.empty(), HoodieActiveTimeline.DELTA_COMMIT_ACTION, Collections.emptyMap());
// reload timeline

View File

@@ -43,7 +43,7 @@ import java.util.stream.Collectors;
@SuppressWarnings("checkstyle:LineLength")
public class FlinkDeleteHelper<R> extends
AbstractDeleteHelper<EmptyHoodieRecordPayload, List<HoodieRecord<EmptyHoodieRecordPayload>>, List<HoodieKey>, List<WriteStatus>, R> {
BaseDeleteHelper<EmptyHoodieRecordPayload, List<HoodieRecord<EmptyHoodieRecordPayload>>, List<HoodieKey>, List<WriteStatus>, R> {
private FlinkDeleteHelper() {
}

View File

@@ -45,7 +45,7 @@ import java.util.Iterator;
import scala.collection.immutable.List;
public class FlinkMergeHelper<T extends HoodieRecordPayload> extends AbstractMergeHelper<T, List<HoodieRecord<T>>,
public class FlinkMergeHelper<T extends HoodieRecordPayload> extends BaseMergeHelper<T, List<HoodieRecord<T>>,
List<HoodieKey>, List<WriteStatus>> {
private FlinkMergeHelper() {

View File

@@ -48,7 +48,7 @@ import java.util.stream.Collectors;
* <p>Computing the records batch locations all at a time is a pressure to the engine,
* we should avoid that in streaming system.
*/
public class FlinkWriteHelper<T extends HoodieRecordPayload, R> extends AbstractWriteHelper<T, List<HoodieRecord<T>>,
public class FlinkWriteHelper<T extends HoodieRecordPayload, R> extends BaseWriteHelper<T, List<HoodieRecord<T>>,
List<HoodieKey>, List<WriteStatus>, R> {
private FlinkWriteHelper() {

View File

@@ -29,7 +29,7 @@ import org.apache.hudi.table.HoodieTable;
/**
* Flink upgrade and downgrade helper.
*/
public class FlinkUpgradeDowngradeHelper implements BaseUpgradeDowngradeHelper {
public class FlinkUpgradeDowngradeHelper implements SupportsUpgradeDowngrade {
private static final FlinkUpgradeDowngradeHelper SINGLETON_INSTANCE =
new FlinkUpgradeDowngradeHelper();