1
0

[HUDI-1860] Add INSERT_OVERWRITE and INSERT_OVERWRITE_TABLE support to DeltaStreamer (#3184)

This commit is contained in:
Samrat
2021-07-20 07:19:43 +05:30
committed by GitHub
parent d5026e9a24
commit a086d255c8
8 changed files with 166 additions and 6 deletions

View File

@@ -673,8 +673,7 @@ public abstract class AbstractHoodieWriteClient<T extends HoodieRecordPayload, I
}
/**
* Provides a new commit time for a write operation (insert/update/delete).
*
* Provides a new commit time for a write operation (insert/update/delete/insert_overwrite/insert_overwrite_table) without specified action.
* @param instantTime Instant time to be generated
*/
public void startCommitWithTime(String instantTime) {
@@ -683,7 +682,7 @@ public abstract class AbstractHoodieWriteClient<T extends HoodieRecordPayload, I
}
/**
* Completes a new commit time for a write operation (insert/update/delete) with specified action.
* Completes a new commit time for a write operation (insert/update/delete/insert_overwrite/insert_overwrite_table) with specified action.
*/
public void startCommitWithTime(String instantTime, String actionType) {
HoodieTableMetaClient metaClient = createMetaClient(true);

View File

@@ -42,6 +42,8 @@ public class CommitUtils {
/**
* Gets the commit action type for given write operation and table type.
* Use this API when commit action type can differ not only on the basis of table type but also write operation type.
* For example, INSERT_OVERWRITE/INSERT_OVERWRITE_TABLE operations have REPLACE commit action type.
*/
public static String getCommitActionType(WriteOperationType operation, HoodieTableType tableType) {
if (operation == WriteOperationType.INSERT_OVERWRITE || operation == WriteOperationType.INSERT_OVERWRITE_TABLE) {
@@ -53,6 +55,8 @@ public class CommitUtils {
/**
* Gets the commit action type for given table type.
* Note: Use this API only when the commit action type is not dependent on the write operation type.
* See {@link CommitUtils#getCommitActionType(WriteOperationType, HoodieTableType)} for more details.
*/
public static String getCommitActionType(HoodieTableType tableType) {
switch (tableType) {

View File

@@ -53,6 +53,16 @@ public class HoodieDeltaStreamerWrapper extends HoodieDeltaStreamer {
return upsert(WriteOperationType.BULK_INSERT);
}
public JavaRDD<WriteStatus> insertOverwrite() throws
Exception {
return upsert(WriteOperationType.INSERT_OVERWRITE);
}
public JavaRDD<WriteStatus> insertOverwriteTable() throws
Exception {
return upsert(WriteOperationType.INSERT_OVERWRITE_TABLE);
}
public void scheduleCompact() throws Exception {
// Since we don't support scheduleCompact() operation in delta-streamer, assume upsert without any data that will
// trigger scheduling compaction

View File

@@ -163,6 +163,26 @@ public class HoodieTestSuiteWriter implements Serializable {
}
}
public JavaRDD<WriteStatus> insertOverwrite(Option<String> instantTime) throws Exception {
if(cfg.useDeltaStreamer){
return deltaStreamerWrapper.insertOverwrite();
} else {
Pair<SchemaProvider, Pair<String, JavaRDD<HoodieRecord>>> nextBatch = fetchSource();
lastCheckpoint = Option.of(nextBatch.getValue().getLeft());
return writeClient.insertOverwrite(nextBatch.getRight().getRight(), instantTime.get()).getWriteStatuses();
}
}
public JavaRDD<WriteStatus> insertOverwriteTable(Option<String> instantTime) throws Exception {
if(cfg.useDeltaStreamer){
return deltaStreamerWrapper.insertOverwriteTable();
} else {
Pair<SchemaProvider, Pair<String, JavaRDD<HoodieRecord>>> nextBatch = fetchSource();
lastCheckpoint = Option.of(nextBatch.getValue().getLeft());
return writeClient.insertOverwriteTable(nextBatch.getRight().getRight(), instantTime.get()).getWriteStatuses();
}
}
public JavaRDD<WriteStatus> bulkInsert(Option<String> instantTime) throws Exception {
if (cfg.useDeltaStreamer) {
return deltaStreamerWrapper.bulkInsert();

View File

@@ -0,0 +1,40 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hudi.integ.testsuite.dag.nodes;
import org.apache.hudi.client.WriteStatus;
import org.apache.hudi.common.util.Option;
import org.apache.hudi.integ.testsuite.HoodieTestSuiteWriter;
import org.apache.hudi.integ.testsuite.configuration.DeltaConfig.Config;
import org.apache.spark.api.java.JavaRDD;
public class InsertOverwriteNode extends InsertNode {
public InsertOverwriteNode(Config config) {
super(config);
}
@Override
protected JavaRDD<WriteStatus> ingest(HoodieTestSuiteWriter hoodieTestSuiteWriter,
Option<String> commitTime)
throws Exception {
log.info("Execute insert overwrite node {}", this.getName());
return hoodieTestSuiteWriter.insertOverwrite(commitTime);
}
}

View File

@@ -0,0 +1,40 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hudi.integ.testsuite.dag.nodes;
import org.apache.hudi.client.WriteStatus;
import org.apache.hudi.common.util.Option;
import org.apache.hudi.integ.testsuite.HoodieTestSuiteWriter;
import org.apache.hudi.integ.testsuite.configuration.DeltaConfig.Config;
import org.apache.spark.api.java.JavaRDD;
public class InsertOverwriteTableNode extends InsertNode {
public InsertOverwriteTableNode(Config config) {
super(config);
}
@Override
protected JavaRDD<WriteStatus> ingest(HoodieTestSuiteWriter hoodieTestSuiteWriter,
Option<String> commitTime)
throws Exception {
log.info("Execute insert overwrite table node {}", this.getName());
return hoodieTestSuiteWriter.insertOverwriteTable(commitTime);
}
}

View File

@@ -32,10 +32,13 @@ import org.apache.hudi.common.fs.FSUtils;
import org.apache.hudi.common.model.HoodieCommitMetadata;
import org.apache.hudi.common.model.HoodieRecord;
import org.apache.hudi.common.model.HoodieRecordPayload;
import org.apache.hudi.common.model.HoodieTableType;
import org.apache.hudi.common.model.WriteOperationType;
import org.apache.hudi.common.table.HoodieTableMetaClient;
import org.apache.hudi.common.table.timeline.HoodieActiveTimeline;
import org.apache.hudi.common.table.timeline.HoodieInstant;
import org.apache.hudi.common.table.timeline.HoodieTimeline;
import org.apache.hudi.common.util.CommitUtils;
import org.apache.hudi.common.util.Option;
import org.apache.hudi.common.util.ReflectionUtils;
import org.apache.hudi.common.util.StringUtils;
@@ -82,6 +85,7 @@ import java.io.IOException;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.function.Function;
import java.util.HashMap;
import java.util.HashSet;
@@ -460,6 +464,12 @@ public class DeltaSync implements Serializable {
case BULK_INSERT:
writeStatusRDD = writeClient.bulkInsert(records, instantTime);
break;
case INSERT_OVERWRITE:
writeStatusRDD = writeClient.insertOverwrite(records, instantTime).getWriteStatuses();
break;
case INSERT_OVERWRITE_TABLE:
writeStatusRDD = writeClient.insertOverwriteTable(records, instantTime).getWriteStatuses();
break;
default:
throw new HoodieDeltaStreamerException("Unknown operation : " + cfg.operation);
}
@@ -480,8 +490,8 @@ public class DeltaSync implements Serializable {
LOG.warn("Some records failed to be merged but forcing commit since commitOnErrors set. Errors/Total="
+ totalErrorRecords + "/" + totalRecords);
}
boolean success = writeClient.commit(instantTime, writeStatusRDD, Option.of(checkpointCommitMetadata));
String commitActionType = CommitUtils.getCommitActionType(cfg.operation, HoodieTableType.valueOf(cfg.tableType));
boolean success = writeClient.commit(instantTime, writeStatusRDD, Option.of(checkpointCommitMetadata), commitActionType, Collections.emptyMap());
if (success) {
LOG.info("Commit " + instantTime + " successful!");
this.formatAdapter.getSource().onCommit(checkpointStr);
@@ -530,7 +540,10 @@ public class DeltaSync implements Serializable {
RuntimeException lastException = null;
while (retryNum <= maxRetries) {
try {
return writeClient.startCommit();
String instantTime = HoodieActiveTimeline.createNewInstantTime();
String commitActionType = CommitUtils.getCommitActionType(cfg.operation, HoodieTableType.valueOf(cfg.tableType));
writeClient.startCommitWithTime(instantTime, commitActionType);
return instantTime;
} catch (IllegalArgumentException ie) {
lastException = ie;
LOG.error("Got error trying to start a new commit. Retrying after sleeping for a sec", ie);

View File

@@ -1723,6 +1723,40 @@ public class TestHoodieDeltaStreamer extends UtilitiesTestBase {
}
}
@Test
public void testInsertOverwrite() throws Exception {
testDeltaStreamerWithSpecifiedOperation(dfsBasePath + "/insert_overwrite", WriteOperationType.INSERT_OVERWRITE);
}
@Test
public void testInsertOverwriteTable() throws Exception {
testDeltaStreamerWithSpecifiedOperation(dfsBasePath + "/insert_overwrite_table", WriteOperationType.INSERT_OVERWRITE_TABLE);
}
void testDeltaStreamerWithSpecifiedOperation(final String tableBasePath, WriteOperationType operationType) throws Exception {
// Initial insert
HoodieDeltaStreamer.Config cfg = TestHelpers.makeConfig(tableBasePath, WriteOperationType.BULK_INSERT);
new HoodieDeltaStreamer(cfg, jsc).sync();
TestHelpers.assertRecordCount(1000, tableBasePath + "/*/*.parquet", sqlContext);
TestHelpers.assertDistanceCount(1000, tableBasePath + "/*/*.parquet", sqlContext);
TestHelpers.assertCommitMetadata("00000", tableBasePath, dfs, 1);
// setting the operationType
cfg.operation = operationType;
// No new data => no commits.
cfg.sourceLimit = 0;
new HoodieDeltaStreamer(cfg, jsc).sync();
TestHelpers.assertRecordCount(1000, tableBasePath + "/*/*.parquet", sqlContext);
TestHelpers.assertDistanceCount(1000, tableBasePath + "/*/*.parquet", sqlContext);
TestHelpers.assertCommitMetadata("00000", tableBasePath, dfs, 1);
cfg.sourceLimit = 1000;
new HoodieDeltaStreamer(cfg, jsc).sync();
TestHelpers.assertRecordCount(1950, tableBasePath + "/*/*.parquet", sqlContext);
TestHelpers.assertDistanceCount(1950, tableBasePath + "/*/*.parquet", sqlContext);
TestHelpers.assertCommitMetadata("00001", tableBasePath, dfs, 2);
}
/**
* UDF to calculate Haversine distance.
*/