Restore 0.8.0 config keys with deprecated annotation (#3506)
Co-authored-by: Sagar Sumit <sagarsumit09@gmail.com> Co-authored-by: Vinoth Chandar <vinoth@apache.org>
This commit is contained in:
@@ -18,24 +18,6 @@
|
||||
|
||||
package org.apache.hudi.client.functional;
|
||||
|
||||
import static org.apache.hudi.common.testutils.HoodieTestDataGenerator.TRIP_EXAMPLE_SCHEMA;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import static org.junit.jupiter.api.Assertions.assertNotNull;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hudi.client.HoodieWriteResult;
|
||||
import org.apache.hudi.client.SparkRDDWriteClient;
|
||||
import org.apache.hudi.client.WriteStatus;
|
||||
@@ -81,6 +63,9 @@ import org.apache.hudi.metadata.SparkHoodieBackedTableMetadataWriter;
|
||||
import org.apache.hudi.table.HoodieSparkTable;
|
||||
import org.apache.hudi.table.HoodieTable;
|
||||
import org.apache.hudi.testutils.HoodieClientTestHarness;
|
||||
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.log4j.LogManager;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.spark.api.java.JavaRDD;
|
||||
@@ -93,6 +78,22 @@ import org.junit.jupiter.api.io.TempDir;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
import org.junit.jupiter.params.provider.EnumSource;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.apache.hudi.common.testutils.HoodieTestDataGenerator.TRIP_EXAMPLE_SCHEMA;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import static org.junit.jupiter.api.Assertions.assertNotNull;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
@Tag("functional")
|
||||
public class TestHoodieBackedMetadata extends HoodieClientTestHarness {
|
||||
|
||||
@@ -800,8 +801,8 @@ public class TestHoodieBackedMetadata extends HoodieClientTestHarness {
|
||||
assertEquals(writer.getMetadataReader().getUpdateTime().get(), beforeInflightActionTimestamp);
|
||||
|
||||
// Reader should sync to all the completed instants
|
||||
HoodieTableMetadata metadata = HoodieTableMetadata.create(context, client.getConfig().getMetadataConfig(),
|
||||
client.getConfig().getBasePath(), FileSystemViewStorageConfig.FILESYSTEM_VIEW_SPILLABLE_DIR.defaultValue());
|
||||
HoodieTableMetadata metadata = HoodieTableMetadata.create(context, client.getConfig().getMetadataConfig(),
|
||||
client.getConfig().getBasePath(), FileSystemViewStorageConfig.SPILLABLE_DIR.defaultValue());
|
||||
assertEquals(((HoodieBackedTableMetadata)metadata).getReaderTime().get(), newCommitTime);
|
||||
|
||||
// Remove the inflight instance holding back table sync
|
||||
@@ -813,8 +814,8 @@ public class TestHoodieBackedMetadata extends HoodieClientTestHarness {
|
||||
assertEquals(writer.getMetadataReader().getUpdateTime().get(), newCommitTime);
|
||||
|
||||
// Reader should sync to all the completed instants
|
||||
metadata = HoodieTableMetadata.create(context, client.getConfig().getMetadataConfig(),
|
||||
client.getConfig().getBasePath(), FileSystemViewStorageConfig.FILESYSTEM_VIEW_SPILLABLE_DIR.defaultValue());
|
||||
metadata = HoodieTableMetadata.create(context, client.getConfig().getMetadataConfig(),
|
||||
client.getConfig().getBasePath(), FileSystemViewStorageConfig.SPILLABLE_DIR.defaultValue());
|
||||
assertEquals(writer.getMetadataReader().getUpdateTime().get(), newCommitTime);
|
||||
}
|
||||
|
||||
|
||||
@@ -18,21 +18,18 @@
|
||||
|
||||
package org.apache.hudi.client.functional;
|
||||
|
||||
import org.apache.avro.generic.GenericRecord;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hudi.avro.model.HoodieCleanMetadata;
|
||||
import org.apache.hudi.avro.model.HoodieClusteringPlan;
|
||||
import org.apache.hudi.avro.model.HoodieRequestedReplaceMetadata;
|
||||
import org.apache.hudi.avro.model.HoodieCleanMetadata;
|
||||
import org.apache.hudi.client.AbstractHoodieWriteClient;
|
||||
import org.apache.hudi.client.HoodieWriteResult;
|
||||
import org.apache.hudi.client.SparkRDDWriteClient;
|
||||
import org.apache.hudi.client.SparkTaskContextSupplier;
|
||||
import org.apache.hudi.client.WriteStatus;
|
||||
import org.apache.hudi.common.config.TypedProperties;
|
||||
import org.apache.hudi.client.validator.SparkPreCommitValidator;
|
||||
import org.apache.hudi.client.validator.SqlQueryEqualityPreCommitValidator;
|
||||
import org.apache.hudi.client.validator.SqlQuerySingleResultPreCommitValidator;
|
||||
import org.apache.hudi.common.config.TypedProperties;
|
||||
import org.apache.hudi.common.engine.HoodieEngineContext;
|
||||
import org.apache.hudi.common.fs.ConsistencyGuardConfig;
|
||||
import org.apache.hudi.common.fs.FSUtils;
|
||||
@@ -58,9 +55,9 @@ import org.apache.hudi.common.testutils.HoodieTestDataGenerator;
|
||||
import org.apache.hudi.common.testutils.HoodieTestTable;
|
||||
import org.apache.hudi.common.testutils.HoodieTestUtils;
|
||||
import org.apache.hudi.common.testutils.RawTripTestPayload;
|
||||
import org.apache.hudi.common.util.BaseFileUtils;
|
||||
import org.apache.hudi.common.util.ClusteringUtils;
|
||||
import org.apache.hudi.common.util.CollectionUtils;
|
||||
import org.apache.hudi.common.util.BaseFileUtils;
|
||||
import org.apache.hudi.common.util.FileIOUtils;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.StringUtils;
|
||||
@@ -93,6 +90,10 @@ import org.apache.hudi.table.marker.WriteMarkersFactory;
|
||||
import org.apache.hudi.testutils.HoodieClientTestBase;
|
||||
import org.apache.hudi.testutils.HoodieClientTestUtils;
|
||||
import org.apache.hudi.testutils.HoodieSparkWriteableTestTable;
|
||||
|
||||
import org.apache.avro.generic.GenericRecord;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.log4j.LogManager;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.spark.api.java.JavaRDD;
|
||||
@@ -136,7 +137,7 @@ import static org.apache.hudi.common.testutils.HoodieTestDataGenerator.TRIP_EXAM
|
||||
import static org.apache.hudi.common.testutils.Transformations.randomSelectAsHoodieKeys;
|
||||
import static org.apache.hudi.common.testutils.Transformations.recordsToRecordKeySet;
|
||||
import static org.apache.hudi.config.HoodieClusteringConfig.ASYNC_CLUSTERING_ENABLE;
|
||||
import static org.apache.hudi.config.HoodieClusteringConfig.CLUSTERING_EXECUTION_STRATEGY_CLASS;
|
||||
import static org.apache.hudi.config.HoodieClusteringConfig.EXECUTION_STRATEGY_CLASS_NAME;
|
||||
import static org.apache.hudi.testutils.Assertions.assertNoWriteErrors;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
@@ -2353,7 +2354,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends HoodieClientTestBase {
|
||||
|
||||
protected HoodieInstant createRequestedReplaceInstant(HoodieTableMetaClient metaClient, String clusterTime, List<FileSlice>[] fileSlices) throws IOException {
|
||||
HoodieClusteringPlan clusteringPlan =
|
||||
ClusteringUtils.createClusteringPlan(CLUSTERING_EXECUTION_STRATEGY_CLASS.defaultValue(), STRATEGY_PARAMS, fileSlices, Collections.emptyMap());
|
||||
ClusteringUtils.createClusteringPlan(EXECUTION_STRATEGY_CLASS_NAME.defaultValue(), STRATEGY_PARAMS, fileSlices, Collections.emptyMap());
|
||||
|
||||
HoodieInstant clusteringInstant = new HoodieInstant(HoodieInstant.State.REQUESTED, HoodieTimeline.REPLACE_COMMIT_ACTION, clusterTime);
|
||||
HoodieRequestedReplaceMetadata requestedReplaceMetadata = HoodieRequestedReplaceMetadata.newBuilder()
|
||||
|
||||
@@ -46,7 +46,7 @@ public class TestHBaseQPSResourceAllocator {
|
||||
|
||||
@Test
|
||||
public void testsExplicitDefaultQPSResourceAllocator() {
|
||||
HoodieWriteConfig config = getConfig(Option.of(HoodieHBaseIndexConfig.HBASE_INDEX_QPS_ALLOCATOR_CLASS.defaultValue()));
|
||||
HoodieWriteConfig config = getConfig(Option.of(HoodieHBaseIndexConfig.QPS_ALLOCATOR_CLASS_NAME.defaultValue()));
|
||||
SparkHoodieHBaseIndex index = new SparkHoodieHBaseIndex(config);
|
||||
HBaseIndexQPSResourceAllocator hBaseIndexQPSResourceAllocator = index.createQPSResourceAllocator(config);
|
||||
assertEquals(hBaseIndexQPSResourceAllocator.getClass().getName(),
|
||||
|
||||
@@ -53,7 +53,7 @@ public class TestCustomKeyGenerator extends KeyGeneratorTestUtilities {
|
||||
properties.put(KeyGeneratorOptions.RECORDKEY_FIELD.key(), "_row_key");
|
||||
}
|
||||
if (useKeyGeneratorClassName) {
|
||||
properties.put(HoodieWriteConfig.KEYGENERATOR_CLASS.key(), CustomKeyGenerator.class.getName());
|
||||
properties.put(HoodieWriteConfig.KEYGENERATOR_CLASS_NAME.key(), CustomKeyGenerator.class.getName());
|
||||
} else {
|
||||
properties.put(HoodieWriteConfig.KEYGENERATOR_TYPE.key(), KeyGeneratorType.CUSTOM.name());
|
||||
}
|
||||
@@ -96,7 +96,7 @@ public class TestCustomKeyGenerator extends KeyGeneratorTestUtilities {
|
||||
TypedProperties properties = new TypedProperties();
|
||||
properties.put(KeyGeneratorOptions.PARTITIONPATH_FIELD.key(), "timestamp:simple");
|
||||
if (useKeyGeneratorClassName) {
|
||||
properties.put(HoodieWriteConfig.KEYGENERATOR_CLASS.key(), CustomKeyGenerator.class.getName());
|
||||
properties.put(HoodieWriteConfig.KEYGENERATOR_CLASS_NAME.key(), CustomKeyGenerator.class.getName());
|
||||
} else {
|
||||
properties.put(HoodieWriteConfig.KEYGENERATOR_TYPE.key(), KeyGeneratorType.CUSTOM.name());
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ public class TestHoodieSparkKeyGeneratorFactory {
|
||||
|
||||
// set KeyGenerator class only
|
||||
props = getCommonProps();
|
||||
props.put(HoodieWriteConfig.KEYGENERATOR_CLASS.key(), SimpleKeyGenerator.class.getName());
|
||||
props.put(HoodieWriteConfig.KEYGENERATOR_CLASS_NAME.key(), SimpleKeyGenerator.class.getName());
|
||||
KeyGenerator keyGenerator2 = HoodieSparkKeyGeneratorFactory.createKeyGenerator(props);
|
||||
Assertions.assertEquals(SimpleKeyGenerator.class.getName(), keyGenerator2.getClass().getName());
|
||||
|
||||
@@ -63,7 +63,7 @@ public class TestHoodieSparkKeyGeneratorFactory {
|
||||
|
||||
// set wrong class name
|
||||
final TypedProperties props2 = getCommonProps();
|
||||
props2.put(HoodieWriteConfig.KEYGENERATOR_CLASS.key(), TestHoodieSparkKeyGeneratorFactory.class.getName());
|
||||
props2.put(HoodieWriteConfig.KEYGENERATOR_CLASS_NAME.key(), TestHoodieSparkKeyGeneratorFactory.class.getName());
|
||||
assertThrows(IOException.class, () -> HoodieSparkKeyGeneratorFactory.createKeyGenerator(props2));
|
||||
|
||||
// set wrong keyGenerator type
|
||||
|
||||
@@ -45,7 +45,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
*/
|
||||
public class TestConsistencyGuard extends HoodieClientTestHarness {
|
||||
|
||||
private static final String BASE_FILE_EXTENSION = HoodieTableConfig.HOODIE_BASE_FILE_FORMAT_PROP.defaultValue().getFileExtension();
|
||||
private static final String BASE_FILE_EXTENSION = HoodieTableConfig.BASE_FILE_FORMAT.defaultValue().getFileExtension();
|
||||
|
||||
// multiple parameters, uses Collection<Object[]>
|
||||
public static List<Arguments> consistencyGuardType() {
|
||||
|
||||
@@ -128,7 +128,7 @@ public class TestHoodieMergeOnReadTable extends HoodieClientTestHarness {
|
||||
dfs.mkdirs(new Path(basePath));
|
||||
|
||||
Properties properties = populateMetaFields ? new Properties() : getPropertiesForKeyGen();
|
||||
properties.setProperty(HoodieTableConfig.HOODIE_BASE_FILE_FORMAT_PROP.key(), baseFileFormat.toString());
|
||||
properties.setProperty(HoodieTableConfig.BASE_FILE_FORMAT.key(), baseFileFormat.toString());
|
||||
|
||||
metaClient = HoodieTestUtils.init(hadoopConf, basePath, HoodieTableType.MERGE_ON_READ, properties);
|
||||
initTestDataGenerator();
|
||||
@@ -140,7 +140,7 @@ public class TestHoodieMergeOnReadTable extends HoodieClientTestHarness {
|
||||
|
||||
@BeforeEach
|
||||
public void init() throws IOException {
|
||||
init(HoodieTableConfig.HOODIE_BASE_FILE_FORMAT_PROP.defaultValue(), true);
|
||||
init(HoodieTableConfig.BASE_FILE_FORMAT.defaultValue(), true);
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
@@ -165,7 +165,7 @@ public class TestHoodieMergeOnReadTable extends HoodieClientTestHarness {
|
||||
@MethodSource("populateMetaFieldsParams")
|
||||
public void testSimpleInsertAndUpdate(boolean populateMetaFields) throws Exception {
|
||||
clean();
|
||||
init(HoodieTableConfig.HOODIE_BASE_FILE_FORMAT_PROP.defaultValue(), populateMetaFields);
|
||||
init(HoodieTableConfig.BASE_FILE_FORMAT.defaultValue(), populateMetaFields);
|
||||
|
||||
HoodieWriteConfig.Builder cfgBuilder = getConfigBuilder(true);
|
||||
addConfigsForPopulateMetaFields(cfgBuilder, populateMetaFields);
|
||||
@@ -266,7 +266,7 @@ public class TestHoodieMergeOnReadTable extends HoodieClientTestHarness {
|
||||
@MethodSource("populateMetaFieldsAndPreserveMetadataParams")
|
||||
public void testSimpleClusteringNoUpdates(boolean populateMetaFields, boolean preserveCommitMetadata) throws Exception {
|
||||
clean();
|
||||
init(HoodieTableConfig.HOODIE_BASE_FILE_FORMAT_PROP.defaultValue(), populateMetaFields);
|
||||
init(HoodieTableConfig.BASE_FILE_FORMAT.defaultValue(), populateMetaFields);
|
||||
testClustering(false, populateMetaFields, preserveCommitMetadata);
|
||||
}
|
||||
|
||||
@@ -274,7 +274,7 @@ public class TestHoodieMergeOnReadTable extends HoodieClientTestHarness {
|
||||
@MethodSource("populateMetaFieldsAndPreserveMetadataParams")
|
||||
public void testSimpleClusteringWithUpdates(boolean populateMetaFields, boolean preserveCommitMetadata) throws Exception {
|
||||
clean();
|
||||
init(HoodieTableConfig.HOODIE_BASE_FILE_FORMAT_PROP.defaultValue(), populateMetaFields);
|
||||
init(HoodieTableConfig.BASE_FILE_FORMAT.defaultValue(), populateMetaFields);
|
||||
testClustering(true, populateMetaFields, preserveCommitMetadata);
|
||||
}
|
||||
|
||||
@@ -478,7 +478,7 @@ public class TestHoodieMergeOnReadTable extends HoodieClientTestHarness {
|
||||
@MethodSource("populateMetaFieldsParams")
|
||||
public void testSimpleInsertUpdateAndDelete(boolean populateMetaFields) throws Exception {
|
||||
clean();
|
||||
init(HoodieTableConfig.HOODIE_BASE_FILE_FORMAT_PROP.defaultValue(), populateMetaFields);
|
||||
init(HoodieTableConfig.BASE_FILE_FORMAT.defaultValue(), populateMetaFields);
|
||||
HoodieWriteConfig.Builder cfgBuilder = getConfigBuilder(true);
|
||||
addConfigsForPopulateMetaFields(cfgBuilder, populateMetaFields);
|
||||
HoodieWriteConfig cfg = cfgBuilder.build();
|
||||
@@ -777,7 +777,7 @@ public class TestHoodieMergeOnReadTable extends HoodieClientTestHarness {
|
||||
@MethodSource("populateMetaFieldsParams")
|
||||
public void testRollbackWithDeltaAndCompactionCommitUsingFileList(boolean populateMetaFields) throws Exception {
|
||||
clean();
|
||||
init(HoodieTableConfig.HOODIE_BASE_FILE_FORMAT_PROP.defaultValue(), populateMetaFields);
|
||||
init(HoodieTableConfig.BASE_FILE_FORMAT.defaultValue(), populateMetaFields);
|
||||
testRollbackWithDeltaAndCompactionCommit(false, populateMetaFields);
|
||||
}
|
||||
|
||||
@@ -785,7 +785,7 @@ public class TestHoodieMergeOnReadTable extends HoodieClientTestHarness {
|
||||
@MethodSource("populateMetaFieldsParams")
|
||||
public void testRollbackWithDeltaAndCompactionCommitUsingMarkers(boolean populateMetaFields) throws Exception {
|
||||
clean();
|
||||
init(HoodieTableConfig.HOODIE_BASE_FILE_FORMAT_PROP.defaultValue(), populateMetaFields);
|
||||
init(HoodieTableConfig.BASE_FILE_FORMAT.defaultValue(), populateMetaFields);
|
||||
testRollbackWithDeltaAndCompactionCommit(true, populateMetaFields);
|
||||
}
|
||||
|
||||
@@ -793,7 +793,7 @@ public class TestHoodieMergeOnReadTable extends HoodieClientTestHarness {
|
||||
@MethodSource("populateMetaFieldsParams")
|
||||
public void testMultiRollbackWithDeltaAndCompactionCommit(boolean populateMetaFields) throws Exception {
|
||||
clean();
|
||||
init(HoodieTableConfig.HOODIE_BASE_FILE_FORMAT_PROP.defaultValue(), populateMetaFields);
|
||||
init(HoodieTableConfig.BASE_FILE_FORMAT.defaultValue(), populateMetaFields);
|
||||
HoodieWriteConfig.Builder cfgBuilder = getConfigBuilder(false);
|
||||
addConfigsForPopulateMetaFields(cfgBuilder, populateMetaFields);
|
||||
HoodieWriteConfig cfg = cfgBuilder.build();
|
||||
@@ -966,7 +966,7 @@ public class TestHoodieMergeOnReadTable extends HoodieClientTestHarness {
|
||||
@MethodSource("populateMetaFieldsParams")
|
||||
public void testUpsertPartitioner(boolean populateMetaFields) throws Exception {
|
||||
clean();
|
||||
init(HoodieTableConfig.HOODIE_BASE_FILE_FORMAT_PROP.defaultValue(), populateMetaFields);
|
||||
init(HoodieTableConfig.BASE_FILE_FORMAT.defaultValue(), populateMetaFields);
|
||||
HoodieWriteConfig.Builder cfgBuilder = getConfigBuilder(true);
|
||||
addConfigsForPopulateMetaFields(cfgBuilder, populateMetaFields);
|
||||
HoodieWriteConfig cfg = cfgBuilder.build();
|
||||
|
||||
@@ -28,6 +28,7 @@ import org.apache.hudi.common.table.HoodieTableConfig;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
import org.apache.hudi.config.HoodieCompactionConfig;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.text.SimpleDateFormat;
|
||||
@@ -275,7 +276,7 @@ public class TestHoodieCompactionStrategy {
|
||||
private final long size;
|
||||
|
||||
public TestHoodieBaseFile(long size) {
|
||||
super("/tmp/XYXYXYXYXYYX_11_20180918020003" + HoodieTableConfig.HOODIE_BASE_FILE_FORMAT_PROP.defaultValue().getFileExtension());
|
||||
super("/tmp/XYXYXYXYXYYX_11_20180918020003" + HoodieTableConfig.BASE_FILE_FORMAT.defaultValue().getFileExtension());
|
||||
this.size = size;
|
||||
}
|
||||
|
||||
|
||||
@@ -18,15 +18,16 @@
|
||||
|
||||
package org.apache.hudi.table.action.rollback;
|
||||
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hudi.common.HoodieRollbackStat;
|
||||
import org.apache.hudi.common.table.HoodieTableConfig;
|
||||
import org.apache.hudi.common.table.log.block.HoodieLogBlock;
|
||||
import org.apache.hudi.common.table.timeline.HoodieInstant;
|
||||
import org.apache.hudi.common.table.timeline.HoodieTimeline;
|
||||
import org.apache.hudi.common.util.CollectionUtils;
|
||||
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.HashMap;
|
||||
@@ -34,11 +35,11 @@ import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
import static org.junit.jupiter.api.Assertions.assertIterableEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
|
||||
public class TestRollbackUtils {
|
||||
private static final String BASE_FILE_EXTENSION = HoodieTableConfig.HOODIE_BASE_FILE_FORMAT_PROP.defaultValue().getFileExtension();
|
||||
private static final String BASE_FILE_EXTENSION = HoodieTableConfig.BASE_FILE_FORMAT.defaultValue().getFileExtension();
|
||||
|
||||
private FileStatus generateFileStatus(String filePath) {
|
||||
Path dataFile1Path = new Path(filePath);
|
||||
|
||||
@@ -75,8 +75,8 @@ import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.apache.hudi.common.table.HoodieTableConfig.HOODIE_BASE_FILE_FORMAT_PROP;
|
||||
import static org.apache.hudi.common.table.HoodieTableConfig.HOODIE_TABLE_TYPE_PROP;
|
||||
import static org.apache.hudi.common.table.HoodieTableConfig.BASE_FILE_FORMAT;
|
||||
import static org.apache.hudi.common.table.HoodieTableConfig.TYPE;
|
||||
import static org.apache.hudi.common.testutils.HoodieTestDataGenerator.DEFAULT_FIRST_PARTITION_PATH;
|
||||
import static org.apache.hudi.common.testutils.HoodieTestDataGenerator.DEFAULT_SECOND_PARTITION_PATH;
|
||||
import static org.apache.hudi.common.util.MarkerUtils.MARKERS_FILENAME_PREFIX;
|
||||
@@ -145,7 +145,7 @@ public class TestUpgradeDowngrade extends HoodieClientTestBase {
|
||||
// init config, table and client.
|
||||
Map<String, String> params = new HashMap<>();
|
||||
if (tableType == HoodieTableType.MERGE_ON_READ) {
|
||||
params.put(HOODIE_TABLE_TYPE_PROP.key(), HoodieTableType.MERGE_ON_READ.name());
|
||||
params.put(TYPE.key(), HoodieTableType.MERGE_ON_READ.name());
|
||||
metaClient = HoodieTestUtils.init(hadoopConf, basePath, HoodieTableType.MERGE_ON_READ);
|
||||
}
|
||||
HoodieWriteConfig cfg = getConfigBuilder().withAutoCommit(false).withRollbackUsingMarkers(false).withProps(params).build();
|
||||
@@ -206,7 +206,7 @@ public class TestUpgradeDowngrade extends HoodieClientTestBase {
|
||||
Map<String, String> params = new HashMap<>();
|
||||
addNewTableParamsToProps(params);
|
||||
if (tableType == HoodieTableType.MERGE_ON_READ) {
|
||||
params.put(HOODIE_TABLE_TYPE_PROP.key(), HoodieTableType.MERGE_ON_READ.name());
|
||||
params.put(TYPE.key(), HoodieTableType.MERGE_ON_READ.name());
|
||||
metaClient = HoodieTestUtils.init(hadoopConf, basePath, HoodieTableType.MERGE_ON_READ);
|
||||
}
|
||||
HoodieWriteConfig cfg = getConfigBuilder().withAutoCommit(false).withRollbackUsingMarkers(false).withProps(params).build();
|
||||
@@ -233,8 +233,8 @@ public class TestUpgradeDowngrade extends HoodieClientTestBase {
|
||||
private void addNewTableParamsToProps(Map<String, String> params) {
|
||||
params.put(KeyGeneratorOptions.RECORDKEY_FIELD.key(), "uuid");
|
||||
params.put(KeyGeneratorOptions.PARTITIONPATH_FIELD.key(), "partition_path");
|
||||
params.put(HoodieTableConfig.HOODIE_TABLE_NAME_PROP.key(), metaClient.getTableConfig().getTableName());
|
||||
params.put(HOODIE_BASE_FILE_FORMAT_PROP.key(), HOODIE_BASE_FILE_FORMAT_PROP.defaultValue().name());
|
||||
params.put(HoodieTableConfig.NAME.key(), metaClient.getTableConfig().getTableName());
|
||||
params.put(BASE_FILE_FORMAT.key(), BASE_FILE_FORMAT.defaultValue().name());
|
||||
}
|
||||
|
||||
private void doInsert(SparkRDDWriteClient client) {
|
||||
@@ -248,11 +248,11 @@ public class TestUpgradeDowngrade extends HoodieClientTestBase {
|
||||
|
||||
private void downgradeTableConfigsFromTwoToOne(HoodieWriteConfig cfg) throws IOException {
|
||||
Properties properties = new Properties(cfg.getProps());
|
||||
properties.remove(HoodieTableConfig.HOODIE_TABLE_RECORDKEY_FIELDS.key());
|
||||
properties.remove(HoodieTableConfig.HOODIE_TABLE_PARTITION_FIELDS_PROP.key());
|
||||
properties.remove(HoodieTableConfig.HOODIE_TABLE_NAME_PROP.key());
|
||||
properties.remove(HOODIE_BASE_FILE_FORMAT_PROP.key());
|
||||
properties.setProperty(HoodieTableConfig.HOODIE_TABLE_VERSION_PROP.key(), "1");
|
||||
properties.remove(HoodieTableConfig.RECORDKEY_FIELDS.key());
|
||||
properties.remove(HoodieTableConfig.PARTITION_FIELDS.key());
|
||||
properties.remove(HoodieTableConfig.NAME.key());
|
||||
properties.remove(BASE_FILE_FORMAT.key());
|
||||
properties.setProperty(HoodieTableConfig.VERSION.key(), "1");
|
||||
|
||||
metaClient = HoodieTestUtils.init(hadoopConf, basePath, getTableType(), properties);
|
||||
// set hoodie.table.version to 1 in hoodie.properties file
|
||||
@@ -265,7 +265,7 @@ public class TestUpgradeDowngrade extends HoodieClientTestBase {
|
||||
assertEquals(tableConfig.getPartitionFieldProp(), originalProps.getProperty(KeyGeneratorOptions.PARTITIONPATH_FIELD.key()));
|
||||
assertEquals(tableConfig.getRecordKeyFieldProp(), originalProps.getProperty(KeyGeneratorOptions.RECORDKEY_FIELD.key()));
|
||||
assertEquals(tableConfig.getTableName(), cfg.getTableName());
|
||||
assertEquals(tableConfig.getBaseFileFormat().name(), originalProps.getProperty(HOODIE_BASE_FILE_FORMAT_PROP.key()));
|
||||
assertEquals(tableConfig.getBaseFileFormat().name(), originalProps.getProperty(BASE_FILE_FORMAT.key()));
|
||||
}
|
||||
|
||||
@ParameterizedTest(name = TEST_NAME_WITH_DOWNGRADE_PARAMS)
|
||||
@@ -278,7 +278,7 @@ public class TestUpgradeDowngrade extends HoodieClientTestBase {
|
||||
addNewTableParamsToProps(params);
|
||||
}
|
||||
if (tableType == HoodieTableType.MERGE_ON_READ) {
|
||||
params.put(HOODIE_TABLE_TYPE_PROP.key(), HoodieTableType.MERGE_ON_READ.name());
|
||||
params.put(TYPE.key(), HoodieTableType.MERGE_ON_READ.name());
|
||||
metaClient = HoodieTestUtils.init(hadoopConf, basePath, HoodieTableType.MERGE_ON_READ);
|
||||
}
|
||||
HoodieWriteConfig cfg = getConfigBuilder().withAutoCommit(false).withRollbackUsingMarkers(true)
|
||||
@@ -288,10 +288,10 @@ public class TestUpgradeDowngrade extends HoodieClientTestBase {
|
||||
if (fromVersion == HoodieTableVersion.TWO) {
|
||||
// set table configs
|
||||
HoodieTableConfig tableConfig = metaClient.getTableConfig();
|
||||
tableConfig.setValue(HoodieTableConfig.HOODIE_TABLE_NAME_PROP, cfg.getTableName());
|
||||
tableConfig.setValue(HoodieTableConfig.HOODIE_TABLE_PARTITION_FIELDS_PROP, cfg.getString(KeyGeneratorOptions.PARTITIONPATH_FIELD.key()));
|
||||
tableConfig.setValue(HoodieTableConfig.HOODIE_TABLE_RECORDKEY_FIELDS, cfg.getString(KeyGeneratorOptions.RECORDKEY_FIELD.key()));
|
||||
tableConfig.setValue(HOODIE_BASE_FILE_FORMAT_PROP, cfg.getString(HOODIE_BASE_FILE_FORMAT_PROP));
|
||||
tableConfig.setValue(HoodieTableConfig.NAME, cfg.getTableName());
|
||||
tableConfig.setValue(HoodieTableConfig.PARTITION_FIELDS, cfg.getString(KeyGeneratorOptions.PARTITIONPATH_FIELD.key()));
|
||||
tableConfig.setValue(HoodieTableConfig.RECORDKEY_FIELDS, cfg.getString(KeyGeneratorOptions.RECORDKEY_FIELD.key()));
|
||||
tableConfig.setValue(BASE_FILE_FORMAT, cfg.getString(BASE_FILE_FORMAT));
|
||||
}
|
||||
|
||||
// prepare data. Make 2 commits, in which 2nd is not committed.
|
||||
@@ -433,7 +433,7 @@ public class TestUpgradeDowngrade extends HoodieClientTestBase {
|
||||
private List<HoodieRecord> triggerCommit(String newCommitTime, HoodieTableType tableType, boolean enableMarkedBasedRollback) {
|
||||
Map<String, String> params = new HashMap<>();
|
||||
if (tableType == HoodieTableType.MERGE_ON_READ) {
|
||||
params.put(HOODIE_TABLE_TYPE_PROP.key(), HoodieTableType.MERGE_ON_READ.name());
|
||||
params.put(TYPE.key(), HoodieTableType.MERGE_ON_READ.name());
|
||||
}
|
||||
HoodieWriteConfig cfg = getConfigBuilder().withAutoCommit(false).withRollbackUsingMarkers(enableMarkedBasedRollback).withProps(params).build();
|
||||
SparkRDDWriteClient client = getHoodieWriteClient(cfg);
|
||||
@@ -571,6 +571,6 @@ public class TestUpgradeDowngrade extends HoodieClientTestBase {
|
||||
HoodieConfig hoodieConfig = HoodieConfig.create(fsDataInputStream);
|
||||
fsDataInputStream.close();
|
||||
assertEquals(Integer.toString(expectedVersion.versionCode()), hoodieConfig
|
||||
.getString(HoodieTableConfig.HOODIE_TABLE_VERSION_PROP));
|
||||
.getString(HoodieTableConfig.VERSION));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,13 +17,6 @@
|
||||
|
||||
package org.apache.hudi.testutils;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.LocalFileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hudi.client.HoodieReadClient;
|
||||
import org.apache.hudi.client.SparkRDDWriteClient;
|
||||
import org.apache.hudi.client.SparkTaskContextSupplier;
|
||||
@@ -46,6 +39,14 @@ import org.apache.hudi.config.HoodieWriteConfig;
|
||||
import org.apache.hudi.index.HoodieIndex;
|
||||
import org.apache.hudi.keygen.SimpleKeyGenerator;
|
||||
import org.apache.hudi.table.WorkloadStat;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.LocalFileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.log4j.LogManager;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.spark.api.java.JavaRDD;
|
||||
@@ -54,7 +55,6 @@ import org.apache.spark.sql.SQLContext;
|
||||
import org.junit.jupiter.api.AfterAll;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.TestInfo;
|
||||
import scala.Tuple2;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
@@ -64,13 +64,15 @@ import java.util.Properties;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
||||
import scala.Tuple2;
|
||||
|
||||
/**
|
||||
* The test harness for resource initialization and cleanup.
|
||||
*/
|
||||
public abstract class HoodieClientTestHarness extends HoodieCommonTestHarness implements Serializable {
|
||||
|
||||
private static final Logger LOG = LogManager.getLogger(HoodieClientTestHarness.class);
|
||||
|
||||
|
||||
private String testMethodName;
|
||||
protected transient JavaSparkContext jsc = null;
|
||||
protected transient HoodieSparkEngineContext context = null;
|
||||
@@ -232,12 +234,12 @@ public abstract class HoodieClientTestHarness extends HoodieCommonTestHarness im
|
||||
|
||||
protected Properties getPropertiesForKeyGen() {
|
||||
Properties properties = new Properties();
|
||||
properties.put(HoodieTableConfig.HOODIE_POPULATE_META_FIELDS.key(), "false");
|
||||
properties.put("hoodie.datasource.write.recordkey.field","_row_key");
|
||||
properties.put("hoodie.datasource.write.partitionpath.field","partition_path");
|
||||
properties.put(HoodieTableConfig.HOODIE_TABLE_RECORDKEY_FIELDS.key(), "_row_key");
|
||||
properties.put(HoodieTableConfig.HOODIE_TABLE_PARTITION_FIELDS_PROP.key(), "partition_path");
|
||||
properties.put(HoodieTableConfig.HOODIE_TABLE_KEY_GENERATOR_CLASS.key(), SimpleKeyGenerator.class.getName());
|
||||
properties.put(HoodieTableConfig.POPULATE_META_FIELDS.key(), "false");
|
||||
properties.put("hoodie.datasource.write.recordkey.field", "_row_key");
|
||||
properties.put("hoodie.datasource.write.partitionpath.field", "partition_path");
|
||||
properties.put(HoodieTableConfig.RECORDKEY_FIELDS.key(), "_row_key");
|
||||
properties.put(HoodieTableConfig.PARTITION_FIELDS.key(), "partition_path");
|
||||
properties.put(HoodieTableConfig.KEY_GENERATOR_CLASS_NAME.key(), SimpleKeyGenerator.class.getName());
|
||||
return properties;
|
||||
}
|
||||
|
||||
|
||||
@@ -121,9 +121,9 @@ public class HoodieClientTestUtils {
|
||||
getLatestFileIDsToFullPath(basePath, commitTimeline, Arrays.asList(commitInstant));
|
||||
LOG.info("Path :" + paths.values());
|
||||
Dataset<Row> unFilteredRows = null;
|
||||
if (HoodieTableConfig.HOODIE_BASE_FILE_FORMAT_PROP.defaultValue().equals(HoodieFileFormat.PARQUET)) {
|
||||
if (HoodieTableConfig.BASE_FILE_FORMAT.defaultValue().equals(HoodieFileFormat.PARQUET)) {
|
||||
unFilteredRows = sqlContext.read().parquet(paths.values().toArray(new String[paths.size()]));
|
||||
} else if (HoodieTableConfig.HOODIE_BASE_FILE_FORMAT_PROP.defaultValue().equals(HoodieFileFormat.ORC)) {
|
||||
} else if (HoodieTableConfig.BASE_FILE_FORMAT.defaultValue().equals(HoodieFileFormat.ORC)) {
|
||||
unFilteredRows = sqlContext.read().orc(paths.values().toArray(new String[paths.size()]));
|
||||
}
|
||||
if (unFilteredRows != null) {
|
||||
|
||||
Reference in New Issue
Block a user