[HUDI-2150] Rename/Restructure configs for better modularity (#6061)
- Move clean related configuration to HoodieCleanConfig - Move Archival related configuration to HoodieArchivalConfig - hoodie.compaction.payload.class move this to HoodiePayloadConfig
This commit is contained in:
@@ -38,7 +38,7 @@ import org.apache.hudi.common.testutils.HoodieMetadataTestTable;
|
||||
import org.apache.hudi.common.testutils.HoodieTestDataGenerator;
|
||||
import org.apache.hudi.common.testutils.HoodieTestTable;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
import org.apache.hudi.config.HoodieCompactionConfig;
|
||||
import org.apache.hudi.config.HoodieCleanConfig;
|
||||
import org.apache.hudi.config.HoodieIndexConfig;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
import org.apache.hudi.exception.HoodieRollbackException;
|
||||
@@ -80,7 +80,7 @@ public class TestClientRollback extends HoodieClientTestBase {
|
||||
*/
|
||||
@Test
|
||||
public void testSavepointAndRollback() throws Exception {
|
||||
HoodieWriteConfig cfg = getConfigBuilder().withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
HoodieWriteConfig cfg = getConfigBuilder().withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).retainCommits(1).build()).build();
|
||||
try (SparkRDDWriteClient client = getHoodieWriteClient(cfg)) {
|
||||
HoodieTestDataGenerator.writePartitionMetadataDeprecated(fs, HoodieTestDataGenerator.DEFAULT_PARTITION_PATHS, basePath);
|
||||
@@ -214,7 +214,7 @@ public class TestClientRollback extends HoodieClientTestBase {
|
||||
|
||||
HoodieWriteConfig config = HoodieWriteConfig.newBuilder().withPath(basePath)
|
||||
.withRollbackUsingMarkers(false)
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY).build())
|
||||
.withIndexConfig(HoodieIndexConfig.newBuilder().withIndexType(HoodieIndex.IndexType.INMEMORY).build()).build();
|
||||
|
||||
@@ -329,7 +329,7 @@ public class TestClientRollback extends HoodieClientTestBase {
|
||||
.enable(enableMetadataTable)
|
||||
.build()
|
||||
)
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY).build())
|
||||
.withIndexConfig(HoodieIndexConfig.newBuilder().withIndexType(HoodieIndex.IndexType.INMEMORY).build()).build();
|
||||
|
||||
@@ -436,7 +436,7 @@ public class TestClientRollback extends HoodieClientTestBase {
|
||||
// Set Failed Writes rollback to LAZY
|
||||
HoodieWriteConfig config = HoodieWriteConfig.newBuilder().withPath(basePath)
|
||||
.withIndexConfig(HoodieIndexConfig.newBuilder().withIndexType(HoodieIndex.IndexType.INMEMORY).build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY).build()).build();
|
||||
|
||||
HoodieTableMetadataWriter metadataWriter = SparkHoodieBackedTableMetadataWriter.create(hadoopConf, config, context);
|
||||
@@ -530,7 +530,7 @@ public class TestClientRollback extends HoodieClientTestBase {
|
||||
.enable(enableMetadataTable)
|
||||
.build()
|
||||
)
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY).build())
|
||||
.withIndexConfig(HoodieIndexConfig.newBuilder().withIndexType(HoodieIndex.IndexType.INMEMORY).build()).build();
|
||||
|
||||
|
||||
@@ -33,6 +33,8 @@ import org.apache.hudi.common.table.view.FileSystemViewStorageType;
|
||||
import org.apache.hudi.common.testutils.HoodieTestUtils;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.config.HoodieClusteringConfig;
|
||||
import org.apache.hudi.config.HoodieCleanConfig;
|
||||
import org.apache.hudi.config.HoodieArchivalConfig;
|
||||
import org.apache.hudi.config.HoodieCompactionConfig;
|
||||
import org.apache.hudi.config.HoodieLockConfig;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
@@ -100,9 +102,11 @@ public class TestHoodieClientMultiWriter extends HoodieClientTestBase {
|
||||
properties.setProperty(FILESYSTEM_LOCK_PATH_PROP_KEY, basePath + "/.hoodie/.locks");
|
||||
properties.setProperty(LockConfiguration.LOCK_ACQUIRE_WAIT_TIMEOUT_MS_PROP_KEY, "3000");
|
||||
HoodieWriteConfig writeConfig = getConfigBuilder()
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY)
|
||||
.withAutoArchive(false).withAutoClean(false).build())
|
||||
.withAutoClean(false).build())
|
||||
.withArchivalConfig(HoodieArchivalConfig.newBuilder()
|
||||
.withAutoArchive(false).build())
|
||||
.withWriteConcurrencyMode(WriteConcurrencyMode.OPTIMISTIC_CONCURRENCY_CONTROL)
|
||||
// Timeline-server-based markers are not used for multi-writer tests
|
||||
.withMarkersType(MarkerType.DIRECT.name())
|
||||
@@ -192,9 +196,11 @@ public class TestHoodieClientMultiWriter extends HoodieClientTestBase {
|
||||
properties.setProperty(LockConfiguration.LOCK_ACQUIRE_CLIENT_NUM_RETRIES_PROP_KEY, "20");
|
||||
|
||||
HoodieWriteConfig cfg = getConfigBuilder()
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY)
|
||||
.build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withInlineCompaction(false)
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY)
|
||||
.withMaxNumDeltaCommitsBeforeCompaction(2)
|
||||
.build())
|
||||
.withWriteConcurrencyMode(WriteConcurrencyMode.OPTIMISTIC_CONCURRENCY_CONTROL)
|
||||
@@ -265,9 +271,12 @@ public class TestHoodieClientMultiWriter extends HoodieClientTestBase {
|
||||
properties.setProperty(LockConfiguration.LOCK_ACQUIRE_WAIT_TIMEOUT_MS_PROP_KEY, "3000");
|
||||
// Disabling embedded timeline server, it doesn't work with multiwriter
|
||||
HoodieWriteConfig.Builder writeConfigBuilder = getConfigBuilder()
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().withAutoClean(false)
|
||||
.withInlineCompaction(false).withAsyncClean(true)
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY)
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withAutoClean(false)
|
||||
.withAsyncClean(true)
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY).build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withInlineCompaction(false)
|
||||
.withMaxNumDeltaCommitsBeforeCompaction(2).build())
|
||||
.withEmbeddedTimelineServerEnabled(false)
|
||||
// Timeline-server-based markers are not used for multi-writer tests
|
||||
@@ -402,7 +411,8 @@ public class TestHoodieClientMultiWriter extends HoodieClientTestBase {
|
||||
properties.setProperty(FILESYSTEM_LOCK_PATH_PROP_KEY, basePath + "/.hoodie/.locks");
|
||||
properties.setProperty(LockConfiguration.LOCK_ACQUIRE_WAIT_TIMEOUT_MS_PROP_KEY, "3000");
|
||||
HoodieWriteConfig.Builder writeConfigBuilder = getConfigBuilder()
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY)
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY)
|
||||
.withAutoClean(false).build())
|
||||
.withWriteConcurrencyMode(WriteConcurrencyMode.OPTIMISTIC_CONCURRENCY_CONTROL)
|
||||
// Timeline-server-based markers are not used for multi-writer tests
|
||||
@@ -453,7 +463,8 @@ public class TestHoodieClientMultiWriter extends HoodieClientTestBase {
|
||||
properties.setProperty(LockConfiguration.LOCK_ACQUIRE_WAIT_TIMEOUT_MS_PROP_KEY, "3000");
|
||||
properties.setProperty(LockConfiguration.LOCK_ACQUIRE_CLIENT_NUM_RETRIES_PROP_KEY, "100");
|
||||
HoodieWriteConfig.Builder writeConfigBuilder = getConfigBuilder()
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY)
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY)
|
||||
.withAutoClean(false).build())
|
||||
.withWriteConcurrencyMode(WriteConcurrencyMode.OPTIMISTIC_CONCURRENCY_CONTROL)
|
||||
// Timeline-server-based markers are not used for multi-writer tests
|
||||
@@ -536,7 +547,8 @@ public class TestHoodieClientMultiWriter extends HoodieClientTestBase {
|
||||
properties.setProperty(LockConfiguration.LOCK_ACQUIRE_WAIT_TIMEOUT_MS_PROP_KEY, "3000");
|
||||
properties.setProperty(LockConfiguration.LOCK_ACQUIRE_CLIENT_NUM_RETRIES_PROP_KEY, "100");
|
||||
HoodieWriteConfig.Builder writeConfigBuilder = getConfigBuilder()
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY)
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY)
|
||||
.withAutoClean(false).build())
|
||||
.withWriteConcurrencyMode(WriteConcurrencyMode.OPTIMISTIC_CONCURRENCY_CONTROL)
|
||||
// Timeline-server-based markers are not used for multi-writer tests
|
||||
|
||||
@@ -82,11 +82,13 @@ import org.apache.hudi.common.util.collection.ExternalSpillableMap;
|
||||
import org.apache.hudi.common.util.hash.ColumnIndexID;
|
||||
import org.apache.hudi.common.util.hash.PartitionIndexID;
|
||||
import org.apache.hudi.config.HoodieClusteringConfig;
|
||||
import org.apache.hudi.config.HoodieCleanConfig;
|
||||
import org.apache.hudi.config.HoodieArchivalConfig;
|
||||
import org.apache.hudi.config.HoodieCompactionConfig;
|
||||
import org.apache.hudi.config.HoodieIndexConfig;
|
||||
import org.apache.hudi.config.HoodieLockConfig;
|
||||
import org.apache.hudi.config.HoodieStorageConfig;
|
||||
import org.apache.hudi.config.HoodieIndexConfig;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
import org.apache.hudi.config.HoodieStorageConfig;
|
||||
import org.apache.hudi.exception.HoodieMetadataException;
|
||||
import org.apache.hudi.index.HoodieIndex;
|
||||
import org.apache.hudi.io.storage.HoodieHFileReader;
|
||||
@@ -476,7 +478,13 @@ public class TestHoodieBackedMetadata extends TestHoodieMetadataBase {
|
||||
.archiveCommitsWith(3, 4)
|
||||
.retainCommits(1)
|
||||
.build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().archiveCommitsWith(2, 3).retainCommits(1).build()).build();
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.retainCommits(1)
|
||||
.build())
|
||||
.withArchivalConfig(HoodieArchivalConfig.newBuilder()
|
||||
.archiveCommitsWith(2, 3)
|
||||
.build())
|
||||
.build();
|
||||
initWriteConfigAndMetatableWriter(writeConfig, true);
|
||||
|
||||
AtomicInteger commitTime = new AtomicInteger(1);
|
||||
@@ -637,8 +645,9 @@ public class TestHoodieBackedMetadata extends TestHoodieMetadataBase {
|
||||
initPath();
|
||||
int maxCommits = 1;
|
||||
HoodieWriteConfig cfg = getConfigBuilder(TRIP_EXAMPLE_SCHEMA, HoodieIndex.IndexType.BLOOM, HoodieFailedWritesCleaningPolicy.EAGER)
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).retainCommits(maxCommits).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).retainCommits(maxCommits)
|
||||
.build())
|
||||
.withParallelism(1, 1).withBulkInsertParallelism(1).withFinalizeWriteParallelism(1).withDeleteParallelism(1)
|
||||
.withConsistencyGuardConfig(ConsistencyGuardConfig.newBuilder().withConsistencyCheckEnabled(true).build())
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(true).build())
|
||||
@@ -1172,8 +1181,15 @@ public class TestHoodieBackedMetadata extends TestHoodieMetadataBase {
|
||||
.withMaxNumDeltaCommitsBeforeCompaction(maxDeltaCommitsBeforeCompaction)
|
||||
.withPopulateMetaFields(populateMateFields)
|
||||
.build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().archiveCommitsWith(minArchiveCommitsDataset, minArchiveCommitsDataset + 1)
|
||||
.retainCommits(1).retainFileVersions(1).withAutoClean(false).withAsyncClean(true).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.retainCommits(1)
|
||||
.retainFileVersions(1)
|
||||
.withAutoClean(false)
|
||||
.withAsyncClean(true)
|
||||
.build())
|
||||
.withArchivalConfig(HoodieArchivalConfig.newBuilder()
|
||||
.archiveCommitsWith(minArchiveCommitsDataset, minArchiveCommitsDataset + 1)
|
||||
.build())
|
||||
.build();
|
||||
|
||||
initWriteConfigAndMetatableWriter(writeConfig, true);
|
||||
@@ -1399,10 +1415,13 @@ public class TestHoodieBackedMetadata extends TestHoodieMetadataBase {
|
||||
// disable small file handling so that every insert goes to a new file group.
|
||||
HoodieWriteConfig writeConfig = getWriteConfigBuilder(true, true, false)
|
||||
.withRollbackUsingMarkers(false)
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.EAGER)
|
||||
.withAutoClean(false).retainCommits(1).retainFileVersions(1)
|
||||
.build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().compactionSmallFileSize(0)
|
||||
.withInlineCompaction(false).withMaxNumDeltaCommitsBeforeCompaction(1)
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.EAGER)
|
||||
.withAutoClean(false).retainCommits(1).retainFileVersions(1).build())
|
||||
.build())
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder()
|
||||
.enable(true)
|
||||
.withMetadataIndexColumnStats(true)
|
||||
@@ -1612,7 +1631,7 @@ public class TestHoodieBackedMetadata extends TestHoodieMetadataBase {
|
||||
properties.setProperty(LockConfiguration.LOCK_ACQUIRE_WAIT_TIMEOUT_MS_PROP_KEY, "1000");
|
||||
properties.setProperty(LockConfiguration.LOCK_ACQUIRE_CLIENT_NUM_RETRIES_PROP_KEY, "20");
|
||||
HoodieWriteConfig writeConfig = getWriteConfigBuilder(true, true, false)
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY).withAutoClean(false).build())
|
||||
.withWriteConcurrencyMode(WriteConcurrencyMode.OPTIMISTIC_CONCURRENCY_CONTROL)
|
||||
.withLockConfig(HoodieLockConfig.newBuilder().withLockProvider(InProcessLockProvider.class).build())
|
||||
@@ -1676,8 +1695,9 @@ public class TestHoodieBackedMetadata extends TestHoodieMetadataBase {
|
||||
properties.setProperty(LockConfiguration.LOCK_ACQUIRE_WAIT_TIMEOUT_MS_PROP_KEY, "3000");
|
||||
|
||||
HoodieWriteConfig writeConfig = getWriteConfigBuilder(true, true, false)
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY).withAutoClean(true).retainCommits(4).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY).withAutoClean(true).retainCommits(4)
|
||||
.build())
|
||||
.withAutoCommit(false)
|
||||
.withWriteConcurrencyMode(WriteConcurrencyMode.OPTIMISTIC_CONCURRENCY_CONTROL)
|
||||
.withLockConfig(HoodieLockConfig.newBuilder().withLockProvider(InProcessLockProvider.class).build())
|
||||
@@ -1853,9 +1873,12 @@ public class TestHoodieBackedMetadata extends TestHoodieMetadataBase {
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(true)
|
||||
.archiveCommitsWith(40, 60).retainCommits(1)
|
||||
.withMaxNumDeltaCommitsBeforeCompaction(maxDeltaCommitsBeforeCompaction).build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().archiveCommitsWith(2, 4)
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.NEVER)
|
||||
.retainCommits(1).retainFileVersions(1).withAutoClean(true).withAsyncClean(false).build())
|
||||
.retainCommits(1).retainFileVersions(1).withAutoClean(true).withAsyncClean(false)
|
||||
.build())
|
||||
.withArchivalConfig(HoodieArchivalConfig.newBuilder()
|
||||
.archiveCommitsWith(2, 4).build())
|
||||
.build();
|
||||
|
||||
List<HoodieRecord> records;
|
||||
@@ -2006,8 +2029,8 @@ public class TestHoodieBackedMetadata extends TestHoodieMetadataBase {
|
||||
properties.setProperty(LockConfiguration.LOCK_ACQUIRE_CLIENT_NUM_RETRIES_PROP_KEY, "3");
|
||||
properties.setProperty(LockConfiguration.LOCK_ACQUIRE_WAIT_TIMEOUT_MS_PROP_KEY, "3000");
|
||||
HoodieWriteConfig writeConfig = getWriteConfigBuilder(false, true, false)
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY).withAutoClean(false).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY).withAutoClean(false).build())
|
||||
.withWriteConcurrencyMode(WriteConcurrencyMode.OPTIMISTIC_CONCURRENCY_CONTROL)
|
||||
.withLockConfig(HoodieLockConfig.newBuilder().withLockProvider(InProcessLockProvider.class).build())
|
||||
.withProperties(properties)
|
||||
@@ -2034,10 +2057,8 @@ public class TestHoodieBackedMetadata extends TestHoodieMetadataBase {
|
||||
|
||||
// set hoodie.table.version to 2 in hoodie.properties file
|
||||
changeTableVersion(HoodieTableVersion.TWO);
|
||||
writeConfig = getWriteConfigBuilder(true, true, false)
|
||||
.withRollbackUsingMarkers(false)
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY).withAutoClean(false).build())
|
||||
writeConfig = getWriteConfigBuilder(true, true, false).withRollbackUsingMarkers(false).withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY).withAutoClean(false).build())
|
||||
.withWriteConcurrencyMode(WriteConcurrencyMode.OPTIMISTIC_CONCURRENCY_CONTROL)
|
||||
.withLockConfig(HoodieLockConfig.newBuilder().withLockProvider(InProcessLockProvider.class).build())
|
||||
.withProperties(properties)
|
||||
@@ -2119,7 +2140,7 @@ public class TestHoodieBackedMetadata extends TestHoodieMetadataBase {
|
||||
|
||||
int maxCommits = 1;
|
||||
HoodieWriteConfig cfg = getConfigBuilder(TRIP_EXAMPLE_SCHEMA, HoodieIndex.IndexType.BLOOM, HoodieFailedWritesCleaningPolicy.EAGER)
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).retainCommits(maxCommits).build())
|
||||
.withParallelism(1, 1).withBulkInsertParallelism(1).withFinalizeWriteParallelism(1).withDeleteParallelism(1)
|
||||
.withConsistencyGuardConfig(ConsistencyGuardConfig.newBuilder().withConsistencyCheckEnabled(true).build())
|
||||
@@ -2285,13 +2306,13 @@ public class TestHoodieBackedMetadata extends TestHoodieMetadataBase {
|
||||
|
||||
private HoodieWriteConfig getSmallInsertWriteConfig(int insertSplitSize, String schemaStr, long smallFileSize, boolean mergeAllowDuplicateInserts) {
|
||||
HoodieWriteConfig.Builder builder = getConfigBuilder(schemaStr, HoodieIndex.IndexType.BLOOM, HoodieFailedWritesCleaningPolicy.EAGER);
|
||||
return builder
|
||||
.withCompactionConfig(
|
||||
return builder.withCompactionConfig(
|
||||
HoodieCompactionConfig.newBuilder()
|
||||
.compactionSmallFileSize(smallFileSize)
|
||||
// Set rollback to LAZY so no inflights are deleted
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY)
|
||||
.insertSplitSize(insertSplitSize).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY).build())
|
||||
.withStorageConfig(
|
||||
HoodieStorageConfig.newBuilder()
|
||||
.hfileMaxFileSize(dataGen.getEstimatedFileSizeInBytes(200))
|
||||
@@ -2307,8 +2328,8 @@ public class TestHoodieBackedMetadata extends TestHoodieMetadataBase {
|
||||
.withTimelineLayoutVersion(TimelineLayoutVersion.CURR_VERSION)
|
||||
.withWriteStatusClass(MetadataMergeWriteStatus.class)
|
||||
.withConsistencyGuardConfig(ConsistencyGuardConfig.newBuilder().withConsistencyCheckEnabled(true).build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().withFailedWritesCleaningPolicy(cleaningPolicy)
|
||||
.compactionSmallFileSize(1024 * 1024).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder().withFailedWritesCleaningPolicy(cleaningPolicy).build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().compactionSmallFileSize(1024 * 1024).build())
|
||||
.withStorageConfig(HoodieStorageConfig.newBuilder().hfileMaxFileSize(1024 * 1024).parquetMaxFileSize(1024 * 1024).orcMaxFileSize(1024 * 1024).build())
|
||||
.forTable("test-trip-table")
|
||||
.withIndexConfig(HoodieIndexConfig.newBuilder().withIndexType(indexType).build())
|
||||
|
||||
@@ -71,12 +71,13 @@ import org.apache.hudi.common.util.MarkerUtils;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.StringUtils;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
import org.apache.hudi.config.HoodieClusteringConfig;
|
||||
import org.apache.hudi.config.HoodieCompactionConfig;
|
||||
import org.apache.hudi.config.HoodieIndexConfig;
|
||||
import org.apache.hudi.config.HoodiePreCommitValidatorConfig;
|
||||
import org.apache.hudi.config.HoodieClusteringConfig;
|
||||
import org.apache.hudi.config.HoodieStorageConfig;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
import org.apache.hudi.config.HoodiePreCommitValidatorConfig;
|
||||
import org.apache.hudi.config.HoodieCleanConfig;
|
||||
import org.apache.hudi.data.HoodieJavaRDD;
|
||||
import org.apache.hudi.exception.HoodieCommitException;
|
||||
import org.apache.hudi.exception.HoodieCorruptedDataException;
|
||||
@@ -360,7 +361,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends HoodieClientTestBase {
|
||||
.withPrecommitValidatorSingleResultSqlQueries(COUNT_SQL_QUERY_FOR_VALIDATION + "#" + 500)
|
||||
.build();
|
||||
HoodieWriteConfig config = getConfigBuilder()
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.NEVER).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder().withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.NEVER).build())
|
||||
.withPreCommitValidatorConfig(validatorConfig)
|
||||
.build();
|
||||
|
||||
@@ -386,7 +387,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends HoodieClientTestBase {
|
||||
.withPrecommitValidatorSingleResultSqlQueries(COUNT_SQL_QUERY_FOR_VALIDATION + "#" + numRecords)
|
||||
.build();
|
||||
config = getConfigBuilder()
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.NEVER).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder().withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.NEVER).build())
|
||||
.withPreCommitValidatorConfig(validatorConfig)
|
||||
.build();
|
||||
String instant2 = HoodieActiveTimeline.createNewInstantTime();
|
||||
@@ -921,7 +922,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends HoodieClientTestBase {
|
||||
.setTimelineLayoutVersion(VERSION_0)
|
||||
.initTable(metaClient.getHadoopConf(), metaClient.getBasePath());
|
||||
// Set rollback to LAZY so no inflights are deleted
|
||||
hoodieWriteConfig.getProps().put(HoodieCompactionConfig.FAILED_WRITES_CLEANER_POLICY.key(),
|
||||
hoodieWriteConfig.getProps().put(HoodieCleanConfig.FAILED_WRITES_CLEANER_POLICY.key(),
|
||||
HoodieFailedWritesCleaningPolicy.LAZY.name());
|
||||
SparkRDDWriteClient client = getHoodieWriteClient(hoodieWriteConfig);
|
||||
|
||||
@@ -2606,17 +2607,16 @@ public class TestHoodieClientOnCopyOnWriteStorage extends HoodieClientTestBase {
|
||||
if (!populateMetaFields) {
|
||||
builder.withIndexConfig(HoodieIndexConfig.newBuilder().withIndexType(IndexType.SIMPLE).build());
|
||||
}
|
||||
return builder
|
||||
.withCompactionConfig(
|
||||
HoodieCompactionConfig.newBuilder()
|
||||
.compactionSmallFileSize(smallFileSize)
|
||||
// Set rollback to LAZY so no inflights are deleted
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY)
|
||||
.insertSplitSize(insertSplitSize).build())
|
||||
.withStorageConfig(
|
||||
HoodieStorageConfig.newBuilder()
|
||||
.hfileMaxFileSize(dataGen.getEstimatedFileSizeInBytes(200))
|
||||
.parquetMaxFileSize(dataGen.getEstimatedFileSizeInBytes(200)).build())
|
||||
return builder.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.compactionSmallFileSize(smallFileSize)
|
||||
// Set rollback to LAZY so no inflights are deleted
|
||||
.insertSplitSize(insertSplitSize).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY)
|
||||
.build())
|
||||
.withStorageConfig(HoodieStorageConfig.newBuilder()
|
||||
.hfileMaxFileSize(dataGen.getEstimatedFileSizeInBytes(200))
|
||||
.parquetMaxFileSize(dataGen.getEstimatedFileSizeInBytes(200)).build())
|
||||
.withMergeAllowDuplicateOnInserts(mergeAllowDuplicateInserts)
|
||||
.withProps(props)
|
||||
.build();
|
||||
@@ -2636,7 +2636,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends HoodieClientTestBase {
|
||||
private HoodieWriteConfig getParallelWritingWriteConfig(HoodieFailedWritesCleaningPolicy cleaningPolicy, boolean populateMetaFields) {
|
||||
return getConfigBuilder()
|
||||
.withEmbeddedTimelineServerEnabled(false)
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(cleaningPolicy)
|
||||
.withAutoClean(false).build())
|
||||
.withTimelineLayoutVersion(1)
|
||||
|
||||
@@ -36,9 +36,11 @@ import org.apache.hudi.common.table.view.FileSystemViewStorageConfig;
|
||||
import org.apache.hudi.common.testutils.HoodieMetadataTestTable;
|
||||
import org.apache.hudi.common.testutils.HoodieTestTable;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.config.HoodieCompactionConfig;
|
||||
import org.apache.hudi.config.HoodieCleanConfig;
|
||||
import org.apache.hudi.config.HoodieArchivalConfig;
|
||||
import org.apache.hudi.config.HoodieIndexConfig;
|
||||
import org.apache.hudi.config.HoodieStorageConfig;
|
||||
import org.apache.hudi.config.HoodieCompactionConfig;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
import org.apache.hudi.config.metrics.HoodieMetricsConfig;
|
||||
import org.apache.hudi.config.metrics.HoodieMetricsGraphiteConfig;
|
||||
@@ -338,9 +340,11 @@ public class TestHoodieMetadataBase extends HoodieClientTestHarness {
|
||||
.withParallelism(2, 2).withDeleteParallelism(2).withRollbackParallelism(2).withFinalizeWriteParallelism(2)
|
||||
.withAutoCommit(autoCommit)
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().compactionSmallFileSize(1024 * 1024 * 1024)
|
||||
.withInlineCompaction(false).withMaxNumDeltaCommitsBeforeCompaction(1)
|
||||
.withInlineCompaction(false).withMaxNumDeltaCommitsBeforeCompaction(1).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(policy)
|
||||
.withAutoClean(false).retainCommits(1).retainFileVersions(1).build())
|
||||
.withAutoClean(false).retainCommits(1).retainFileVersions(1)
|
||||
.build())
|
||||
.withStorageConfig(HoodieStorageConfig.newBuilder().hfileMaxFileSize(1024 * 1024 * 1024).build())
|
||||
.withEmbeddedTimelineServerEnabled(true).forTable("test-trip-table")
|
||||
.withFileSystemViewConfig(new FileSystemViewStorageConfig.Builder()
|
||||
@@ -390,16 +394,20 @@ public class TestHoodieMetadataBase extends HoodieClientTestHarness {
|
||||
.withPath(HoodieTableMetadata.getMetadataTableBasePath(writeConfig.getBasePath()))
|
||||
.withSchema(HoodieMetadataRecord.getClassSchema().toString())
|
||||
.forTable(writeConfig.getTableName() + METADATA_TABLE_NAME_SUFFIX)
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
// we will trigger cleaning manually, to control the instant times
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withAsyncClean(writeConfig.isMetadataAsyncClean())
|
||||
// we will trigger cleaning manually, to control the instant times
|
||||
.withAutoClean(false)
|
||||
.withCleanerParallelism(parallelism)
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS)
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY)
|
||||
.retainCommits(writeConfig.getMetadataCleanerCommitsRetained())
|
||||
.archiveCommitsWith(minCommitsToKeep, maxCommitsToKeep)
|
||||
// we will trigger compaction manually, to control the instant times
|
||||
.build())
|
||||
// we will trigger archival manually, to control the instant times
|
||||
.withArchivalConfig(HoodieArchivalConfig.newBuilder()
|
||||
.archiveCommitsWith(minCommitsToKeep, maxCommitsToKeep).build())
|
||||
// we will trigger compaction manually, to control the instant times
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withInlineCompaction(false)
|
||||
.withMaxNumDeltaCommitsBeforeCompaction(writeConfig.getMetadataCompactDeltaCommitMax()).build())
|
||||
.withParallelism(parallelism, parallelism)
|
||||
|
||||
@@ -26,7 +26,8 @@ import org.apache.hudi.common.testutils.FileCreateUtils;
|
||||
import org.apache.hudi.common.testutils.HoodieMetadataTestTable;
|
||||
import org.apache.hudi.common.testutils.HoodieTestDataGenerator;
|
||||
import org.apache.hudi.common.testutils.HoodieTestTable;
|
||||
import org.apache.hudi.config.HoodieCompactionConfig;
|
||||
import org.apache.hudi.config.HoodieArchivalConfig;
|
||||
import org.apache.hudi.config.HoodieCleanConfig;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
|
||||
import org.apache.log4j.LogManager;
|
||||
@@ -275,7 +276,10 @@ public class TestHoodieMetadataBootstrap extends TestHoodieMetadataBase {
|
||||
private HoodieWriteConfig getWriteConfig(int minArchivalCommits, int maxArchivalCommits) throws Exception {
|
||||
return HoodieWriteConfig.newBuilder().withPath(basePath)
|
||||
.withSchema(HoodieTestDataGenerator.TRIP_EXAMPLE_SCHEMA).withParallelism(2, 2)
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().retainCommits(1).archiveCommitsWith(minArchivalCommits, maxArchivalCommits).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.retainCommits(1).build())
|
||||
.withArchivalConfig(HoodieArchivalConfig.newBuilder()
|
||||
.archiveCommitsWith(minArchivalCommits, maxArchivalCommits).build())
|
||||
.withFileSystemViewConfig(FileSystemViewStorageConfig.newBuilder()
|
||||
.withRemoteServerPort(timelineServicePort).build())
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(true).build())
|
||||
|
||||
@@ -32,6 +32,8 @@ import org.apache.hudi.common.table.HoodieTableMetaClient;
|
||||
import org.apache.hudi.common.testutils.HoodieTestDataGenerator;
|
||||
import org.apache.hudi.common.testutils.HoodieTestUtils;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.config.HoodieCleanConfig;
|
||||
import org.apache.hudi.config.HoodieArchivalConfig;
|
||||
import org.apache.hudi.config.HoodieCompactionConfig;
|
||||
import org.apache.hudi.config.HoodieHBaseIndexConfig;
|
||||
import org.apache.hudi.config.HoodieIndexConfig;
|
||||
@@ -477,9 +479,9 @@ public class TestSparkHoodieHBaseIndex extends SparkClientFunctionalTestHarness
|
||||
public void testHbaseTagLocationForArchivedCommits() throws Exception {
|
||||
// Load to memory
|
||||
Map<String, String> params = new HashMap<String, String>();
|
||||
params.put(HoodieCompactionConfig.CLEANER_COMMITS_RETAINED.key(), "1");
|
||||
params.put(HoodieCompactionConfig.MAX_COMMITS_TO_KEEP.key(), "3");
|
||||
params.put(HoodieCompactionConfig.MIN_COMMITS_TO_KEEP.key(), "2");
|
||||
params.put(HoodieCleanConfig.CLEANER_COMMITS_RETAINED.key(), "1");
|
||||
params.put(HoodieArchivalConfig.MAX_COMMITS_TO_KEEP.key(), "3");
|
||||
params.put(HoodieArchivalConfig.MIN_COMMITS_TO_KEEP.key(), "2");
|
||||
HoodieWriteConfig config = getConfigBuilder(100, false, false).withProps(params).build();
|
||||
|
||||
SparkHoodieHBaseIndex index = new SparkHoodieHBaseIndex(config);
|
||||
|
||||
@@ -45,6 +45,8 @@ import org.apache.hudi.common.util.FileIOUtils;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
import org.apache.hudi.config.HoodieCompactionConfig;
|
||||
import org.apache.hudi.config.HoodieArchivalConfig;
|
||||
import org.apache.hudi.config.HoodieCleanConfig;
|
||||
import org.apache.hudi.config.HoodieLockConfig;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
import org.apache.hudi.exception.HoodieException;
|
||||
@@ -181,13 +183,14 @@ public class TestHoodieTimelineArchiver extends HoodieClientTestHarness {
|
||||
init(tableType);
|
||||
HoodieWriteConfig writeConfig = HoodieWriteConfig.newBuilder().withPath(basePath)
|
||||
.withSchema(HoodieTestDataGenerator.TRIP_EXAMPLE_SCHEMA).withParallelism(2, 2)
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().retainCommits(1).archiveCommitsWith(minArchivalCommits, maxArchivalCommits)
|
||||
.withMaxNumDeltaCommitsBeforeCompaction(maxDeltaCommits)
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder().retainCommits(1).withFailedWritesCleaningPolicy(failedWritesCleaningPolicy).build())
|
||||
.withArchivalConfig(HoodieArchivalConfig.newBuilder()
|
||||
.withArchiveMergeEnable(enableArchiveMerge)
|
||||
.withArchiveMergeFilesBatchSize(archiveFilesBatch)
|
||||
.withArchiveMergeSmallFileLimit(size)
|
||||
.withFailedWritesCleaningPolicy(failedWritesCleaningPolicy)
|
||||
.build())
|
||||
.archiveCommitsWith(minArchivalCommits, maxArchivalCommits).build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withMaxNumDeltaCommitsBeforeCompaction(maxDeltaCommits).build())
|
||||
.withFileSystemViewConfig(FileSystemViewStorageConfig.newBuilder()
|
||||
.withRemoteServerPort(timelineServicePort).build())
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(enableMetadata)
|
||||
@@ -566,7 +569,8 @@ public class TestHoodieTimelineArchiver extends HoodieClientTestHarness {
|
||||
init();
|
||||
HoodieWriteConfig cfg = HoodieWriteConfig.newBuilder().withPath(basePath)
|
||||
.withSchema(HoodieTestDataGenerator.TRIP_EXAMPLE_SCHEMA).withParallelism(2, 2).forTable("test-trip-table")
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().retainCommits(1).archiveCommitsWith(2, 5).build())
|
||||
.withArchivalConfig(HoodieArchivalConfig.newBuilder().archiveCommitsWith(2, 5).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder().retainCommits(1).build())
|
||||
.withFileSystemViewConfig(FileSystemViewStorageConfig.newBuilder()
|
||||
.withRemoteServerPort(timelineServicePort).build())
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(enableMetadataTable).build())
|
||||
@@ -716,7 +720,8 @@ public class TestHoodieTimelineArchiver extends HoodieClientTestHarness {
|
||||
HoodieWriteConfig cfg =
|
||||
HoodieWriteConfig.newBuilder().withPath(basePath).withSchema(HoodieTestDataGenerator.TRIP_EXAMPLE_SCHEMA)
|
||||
.withParallelism(2, 2).forTable("test-trip-table")
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().retainCommits(1).archiveCommitsWith(2, 3).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder().retainCommits(1).build())
|
||||
.withArchivalConfig(HoodieArchivalConfig.newBuilder().archiveCommitsWith(2, 3).build())
|
||||
.withFileSystemViewConfig(FileSystemViewStorageConfig.newBuilder()
|
||||
.withRemoteServerPort(timelineServicePort).build())
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(enableMetadataTable).build())
|
||||
@@ -881,9 +886,9 @@ public class TestHoodieTimelineArchiver extends HoodieClientTestHarness {
|
||||
HoodieWriteConfig cfg =
|
||||
HoodieWriteConfig.newBuilder().withPath(basePath).withSchema(HoodieTestDataGenerator.TRIP_EXAMPLE_SCHEMA)
|
||||
.withParallelism(2, 2).forTable("test-trip-table")
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().retainCommits(1).archiveCommitsWith(minInstantsToKeep, maxInstantsToKeep).build())
|
||||
.withFileSystemViewConfig(FileSystemViewStorageConfig.newBuilder()
|
||||
.withRemoteServerPort(timelineServicePort).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder().retainCommits(1).build())
|
||||
.withArchivalConfig(HoodieArchivalConfig.newBuilder().archiveCommitsWith(minInstantsToKeep, maxInstantsToKeep).build())
|
||||
.withFileSystemViewConfig(FileSystemViewStorageConfig.newBuilder().withRemoteServerPort(timelineServicePort).build())
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(enableMetadataTable).build())
|
||||
.build();
|
||||
metaClient = HoodieTableMetaClient.reload(metaClient);
|
||||
@@ -940,7 +945,8 @@ public class TestHoodieTimelineArchiver extends HoodieClientTestHarness {
|
||||
HoodieWriteConfig cfg =
|
||||
HoodieWriteConfig.newBuilder().withPath(basePath).withSchema(HoodieTestDataGenerator.TRIP_EXAMPLE_SCHEMA)
|
||||
.withParallelism(2, 2).forTable("test-trip-table")
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().retainCommits(1).archiveCommitsWith(2, 3).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder().retainCommits(1).build())
|
||||
.withArchivalConfig(HoodieArchivalConfig.newBuilder().archiveCommitsWith(2, 3).build())
|
||||
.withFileSystemViewConfig(FileSystemViewStorageConfig.newBuilder()
|
||||
.withRemoteServerPort(timelineServicePort).build())
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(enableMetadataTable).build())
|
||||
@@ -1146,8 +1152,8 @@ public class TestHoodieTimelineArchiver extends HoodieClientTestHarness {
|
||||
// Test configs where metadata table has more aggressive archival configs than the compaction config
|
||||
HoodieWriteConfig writeConfig = HoodieWriteConfig.newBuilder().withPath(basePath)
|
||||
.withSchema(HoodieTestDataGenerator.TRIP_EXAMPLE_SCHEMA).withParallelism(2, 2)
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.retainCommits(1).archiveCommitsWith(2, 4).build())
|
||||
.withArchivalConfig(HoodieArchivalConfig.newBuilder().archiveCommitsWith(2, 4).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder().retainCommits(1).build())
|
||||
.withFileSystemViewConfig(FileSystemViewStorageConfig.newBuilder()
|
||||
.withRemoteServerPort(timelineServicePort).build())
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(true)
|
||||
|
||||
@@ -70,6 +70,7 @@ import org.apache.hudi.common.util.CompactionUtils;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.StringUtils;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
import org.apache.hudi.config.HoodieCleanConfig;
|
||||
import org.apache.hudi.config.HoodieCompactionConfig;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
import org.apache.hudi.exception.HoodieIOException;
|
||||
@@ -261,11 +262,15 @@ public class TestCleaner extends HoodieClientTestBase {
|
||||
HoodieWriteConfig writeConfig = getConfigBuilder()
|
||||
.withFileSystemViewConfig(new FileSystemViewStorageConfig.Builder()
|
||||
.withEnableBackupForRemoteFileSystemView(false).build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().compactionSmallFileSize(1024 * 1024 * 1024)
|
||||
.withInlineCompaction(false).withMaxNumDeltaCommitsBeforeCompaction(1)
|
||||
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.EAGER)
|
||||
.allowMultipleCleans(false)
|
||||
.withAutoClean(false).retainCommits(1).retainFileVersions(1).build())
|
||||
.withAutoClean(false).retainCommits(1).retainFileVersions(1)
|
||||
.build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().compactionSmallFileSize(1024 * 1024 * 1024)
|
||||
.withInlineCompaction(false).withMaxNumDeltaCommitsBeforeCompaction(1)
|
||||
.build())
|
||||
.withEmbeddedTimelineServerEnabled(false).build();
|
||||
|
||||
int index = 0;
|
||||
@@ -334,8 +339,9 @@ public class TestCleaner extends HoodieClientTestBase {
|
||||
throws Exception {
|
||||
int maxVersions = 2; // keep upto 2 versions for each file
|
||||
HoodieWriteConfig cfg = getConfigBuilder()
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS).retainFileVersions(maxVersions).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS)
|
||||
.retainFileVersions(maxVersions).build())
|
||||
.withParallelism(1, 1).withBulkInsertParallelism(1).withFinalizeWriteParallelism(1).withDeleteParallelism(1)
|
||||
.withConsistencyGuardConfig(ConsistencyGuardConfig.newBuilder().withConsistencyCheckEnabled(true).build())
|
||||
.build();
|
||||
@@ -503,7 +509,7 @@ public class TestCleaner extends HoodieClientTestBase {
|
||||
throws Exception {
|
||||
int maxCommits = 3; // keep upto 3 commits from the past
|
||||
HoodieWriteConfig cfg = getConfigBuilder()
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).retainCommits(maxCommits).build())
|
||||
.withParallelism(1, 1).withBulkInsertParallelism(1).withFinalizeWriteParallelism(1).withDeleteParallelism(1)
|
||||
.withConsistencyGuardConfig(ConsistencyGuardConfig.newBuilder().withConsistencyCheckEnabled(true).build())
|
||||
@@ -579,7 +585,7 @@ public class TestCleaner extends HoodieClientTestBase {
|
||||
HoodieWriteConfig cfg = getConfigBuilder()
|
||||
.withAutoCommit(false)
|
||||
.withHeartbeatIntervalInMs(3000)
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY)
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).retainCommits(maxCommits).build())
|
||||
.withParallelism(1, 1).withBulkInsertParallelism(1).withFinalizeWriteParallelism(1).withDeleteParallelism(1)
|
||||
@@ -732,8 +738,8 @@ public class TestCleaner extends HoodieClientTestBase {
|
||||
HoodieWriteConfig.newBuilder()
|
||||
.withPath(basePath)
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true).build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).build())
|
||||
.build();
|
||||
metaClient = HoodieTableMetaClient.reload(metaClient);
|
||||
|
||||
@@ -787,8 +793,9 @@ public class TestCleaner extends HoodieClientTestBase {
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder()
|
||||
.withMaxNumDeltaCommitsBeforeCompaction(1)
|
||||
.withAssumeDatePartitioning(true).build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).retainCommits(2).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS)
|
||||
.retainCommits(2).build())
|
||||
.build();
|
||||
|
||||
HoodieTableMetadataWriter metadataWriter = SparkHoodieBackedTableMetadataWriter.create(hadoopConf, config, context);
|
||||
@@ -1140,7 +1147,7 @@ public class TestCleaner extends HoodieClientTestBase {
|
||||
public void testCleaningWithZeroPartitionPaths() throws Exception {
|
||||
HoodieWriteConfig config = HoodieWriteConfig.newBuilder().withPath(basePath)
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true).build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).retainCommits(2).build())
|
||||
.build();
|
||||
|
||||
@@ -1164,7 +1171,7 @@ public class TestCleaner extends HoodieClientTestBase {
|
||||
public void testKeepLatestCommitsWithPendingCompactions() throws Exception {
|
||||
HoodieWriteConfig config = HoodieWriteConfig.newBuilder().withPath(basePath)
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true).build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).retainCommits(2).build())
|
||||
.build();
|
||||
// Deletions:
|
||||
@@ -1188,7 +1195,7 @@ public class TestCleaner extends HoodieClientTestBase {
|
||||
HoodieWriteConfig config =
|
||||
HoodieWriteConfig.newBuilder().withPath(basePath)
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true).build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS).retainFileVersions(2).build())
|
||||
.build();
|
||||
// Deletions:
|
||||
@@ -1212,8 +1219,8 @@ public class TestCleaner extends HoodieClientTestBase {
|
||||
HoodieWriteConfig.newBuilder()
|
||||
.withPath(basePath)
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true).build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS).retainFileVersions(1).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS).retainFileVersions(1).build())
|
||||
.build();
|
||||
|
||||
String commitTime = makeNewCommitTime(1, "%09d");
|
||||
@@ -1241,7 +1248,7 @@ public class TestCleaner extends HoodieClientTestBase {
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder()
|
||||
.withMaxNumDeltaCommitsBeforeCompaction(1)
|
||||
.withAssumeDatePartitioning(true).build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).retainCommits(2).build())
|
||||
.build();
|
||||
|
||||
@@ -1317,7 +1324,7 @@ public class TestCleaner extends HoodieClientTestBase {
|
||||
HoodieWriteConfig cfg = getConfigBuilder()
|
||||
.withAutoCommit(false)
|
||||
.withHeartbeatIntervalInMs(3000)
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY)
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS).retainFileVersions(maxVersions).build())
|
||||
.withParallelism(1, 1).withBulkInsertParallelism(1).withFinalizeWriteParallelism(1).withDeleteParallelism(1)
|
||||
|
||||
@@ -37,7 +37,7 @@ import org.apache.hudi.common.testutils.HoodieTestUtils;
|
||||
import org.apache.hudi.common.util.CollectionUtils;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
import org.apache.hudi.config.HoodieCompactionConfig;
|
||||
import org.apache.hudi.config.HoodieCleanConfig;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
import org.apache.hudi.metadata.HoodieTableMetadataWriter;
|
||||
import org.apache.hudi.metadata.SparkHoodieBackedTableMetadataWriter;
|
||||
@@ -77,14 +77,14 @@ public class TestCleanPlanExecutor extends TestCleaner {
|
||||
@Test
|
||||
public void testInvalidCleaningTriggerStrategy() {
|
||||
HoodieWriteConfig config = HoodieWriteConfig.newBuilder().withPath(basePath)
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true).enable(false).build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withIncrementalCleaningMode(true)
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.EAGER)
|
||||
.withCleanBootstrapBaseFileEnabled(true)
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).retainCommits(2)
|
||||
.withCleaningTriggerStrategy("invalid_strategy").build())
|
||||
.build();
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true).enable(false).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withIncrementalCleaningMode(true)
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.EAGER)
|
||||
.withCleanBootstrapBaseFileEnabled(true)
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).retainCommits(2)
|
||||
.withCleaningTriggerStrategy("invalid_strategy").build())
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true).enable(false).build()).build();
|
||||
Exception e = assertThrows(IllegalArgumentException.class, () -> runCleaner(config, true), "should fail when invalid trigger strategy is provided!");
|
||||
assertTrue(e.getMessage().contains("No enum constant org.apache.hudi.table.action.clean.CleaningTriggerStrategy.invalid_strategy"));
|
||||
}
|
||||
@@ -108,18 +108,15 @@ public class TestCleanPlanExecutor extends TestCleaner {
|
||||
boolean simulateFailureRetry, boolean simulateMetadataFailure,
|
||||
boolean enableIncrementalClean, boolean enableBootstrapSourceClean) throws Exception {
|
||||
HoodieWriteConfig config = HoodieWriteConfig.newBuilder().withPath(basePath)
|
||||
.withMetadataConfig(
|
||||
HoodieMetadataConfig.newBuilder()
|
||||
.withAssumeDatePartitioning(true)
|
||||
.build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withIncrementalCleaningMode(enableIncrementalClean)
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.EAGER)
|
||||
.withCleanBootstrapBaseFileEnabled(enableBootstrapSourceClean)
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS)
|
||||
.retainCommits(2)
|
||||
.withMaxCommitsBeforeCleaning(2).build())
|
||||
.build();
|
||||
.withMaxCommitsBeforeCleaning(2)
|
||||
.build()).build();
|
||||
|
||||
HoodieTestTable testTable = HoodieTestTable.of(metaClient);
|
||||
String p0 = "2020/01/01";
|
||||
@@ -274,7 +271,7 @@ public class TestCleanPlanExecutor extends TestCleaner {
|
||||
HoodieWriteConfig config =
|
||||
HoodieWriteConfig.newBuilder().withPath(basePath)
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true).build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS).retainFileVersions(1).build())
|
||||
.build();
|
||||
|
||||
@@ -353,7 +350,7 @@ public class TestCleanPlanExecutor extends TestCleaner {
|
||||
HoodieWriteConfig config =
|
||||
HoodieWriteConfig.newBuilder().withPath(basePath)
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true).build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withCleanBootstrapBaseFileEnabled(true)
|
||||
.withCleanerParallelism(1)
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS).retainFileVersions(1).build())
|
||||
@@ -453,18 +450,15 @@ public class TestCleanPlanExecutor extends TestCleaner {
|
||||
@Test
|
||||
public void testKeepLatestFileVersionsMOR() throws Exception {
|
||||
|
||||
HoodieWriteConfig config =
|
||||
HoodieWriteConfig.newBuilder().withPath(basePath)
|
||||
.withMetadataConfig(
|
||||
HoodieMetadataConfig.newBuilder()
|
||||
.withAssumeDatePartitioning(true)
|
||||
// Column Stats Index is disabled, since these tests construct tables which are
|
||||
// not valid (empty commit metadata, invalid parquet files)
|
||||
.withMetadataIndexColumnStats(false)
|
||||
.build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS).retainFileVersions(1).build())
|
||||
.build();
|
||||
HoodieWriteConfig config = HoodieWriteConfig.newBuilder().withPath(basePath)
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true)
|
||||
// Column Stats Index is disabled, since these tests construct tables which are
|
||||
// not valid (empty commit metadata, invalid parquet files)
|
||||
.withMetadataIndexColumnStats(false)
|
||||
.build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS).retainFileVersions(1)
|
||||
.build()).build();
|
||||
|
||||
HoodieTableMetaClient metaClient = HoodieTestUtils.init(hadoopConf, basePath, HoodieTableType.MERGE_ON_READ);
|
||||
HoodieTestTable testTable = HoodieTestTable.of(metaClient);
|
||||
@@ -497,18 +491,14 @@ public class TestCleanPlanExecutor extends TestCleaner {
|
||||
@Test
|
||||
public void testKeepLatestCommitsMOR() throws Exception {
|
||||
|
||||
HoodieWriteConfig config =
|
||||
HoodieWriteConfig.newBuilder().withPath(basePath)
|
||||
.withMetadataConfig(
|
||||
HoodieMetadataConfig.newBuilder()
|
||||
.withAssumeDatePartitioning(true)
|
||||
HoodieWriteConfig config = HoodieWriteConfig.newBuilder().withPath(basePath)
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true)
|
||||
// Column Stats Index is disabled, since these tests construct tables which are
|
||||
// not valid (empty commit metadata, invalid parquet files)
|
||||
.withMetadataIndexColumnStats(false)
|
||||
.build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).retainCommits(1).build())
|
||||
.build();
|
||||
.withMetadataIndexColumnStats(false).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).retainCommits(1).build())
|
||||
.build();
|
||||
|
||||
HoodieTableMetaClient metaClient = HoodieTestUtils.init(hadoopConf, basePath, HoodieTableType.MERGE_ON_READ);
|
||||
HoodieTestTable testTable = HoodieTestTable.of(metaClient);
|
||||
@@ -552,11 +542,12 @@ public class TestCleanPlanExecutor extends TestCleaner {
|
||||
boolean enableIncrementalClean, boolean enableBootstrapSourceClean) throws Exception {
|
||||
HoodieWriteConfig config = HoodieWriteConfig.newBuilder().withPath(basePath)
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true).build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withIncrementalCleaningMode(enableIncrementalClean)
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.EAGER)
|
||||
.withCleanBootstrapBaseFileEnabled(enableBootstrapSourceClean)
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_BY_HOURS).cleanerNumHoursRetained(2).build())
|
||||
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_BY_HOURS).cleanerNumHoursRetained(2)
|
||||
.build())
|
||||
.build();
|
||||
|
||||
HoodieTestTable testTable = HoodieTestTable.of(metaClient);
|
||||
|
||||
@@ -27,7 +27,8 @@ import org.apache.hudi.common.table.timeline.HoodieActiveTimeline;
|
||||
import org.apache.hudi.common.table.timeline.HoodieTimeline;
|
||||
import org.apache.hudi.common.testutils.HoodieTestDataGenerator;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.config.HoodieCompactionConfig;
|
||||
import org.apache.hudi.config.HoodieArchivalConfig;
|
||||
import org.apache.hudi.config.HoodieCleanConfig;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
import org.apache.hudi.testutils.SparkClientFunctionalTestHarness;
|
||||
|
||||
@@ -54,8 +55,9 @@ public class TestHoodieSparkCopyOnWriteTableArchiveWithReplace extends SparkClie
|
||||
public void testDeletePartitionAndArchive(boolean metadataEnabled) throws IOException {
|
||||
HoodieTableMetaClient metaClient = getHoodieMetaClient(HoodieTableType.COPY_ON_WRITE);
|
||||
HoodieWriteConfig writeConfig = getConfigBuilder(true)
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().archiveCommitsWith(2, 3).retainCommits(1).build())
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(metadataEnabled).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder().retainCommits(1).build())
|
||||
.withArchivalConfig(HoodieArchivalConfig.newBuilder().archiveCommitsWith(2, 3).build())
|
||||
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(metadataEnabled).build())
|
||||
.build();
|
||||
try (SparkRDDWriteClient client = getHoodieWriteClient(writeConfig);
|
||||
HoodieTestDataGenerator dataGen = new HoodieTestDataGenerator(DEFAULT_PARTITION_PATHS)) {
|
||||
|
||||
@@ -43,6 +43,7 @@ import org.apache.hudi.common.testutils.HoodieTestDataGenerator;
|
||||
import org.apache.hudi.common.testutils.HoodieTestTable;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
import org.apache.hudi.config.HoodieCleanConfig;
|
||||
import org.apache.hudi.config.HoodieCompactionConfig;
|
||||
import org.apache.hudi.config.HoodieStorageConfig;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
@@ -556,7 +557,7 @@ public class TestHoodieSparkMergeOnReadTableRollback extends SparkClientFunction
|
||||
|
||||
// trigger clean. creating a new client with aggresive cleaner configs so that clean will kick in immediately.
|
||||
cfgBuilder = getConfigBuilder(false)
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().retainCommits(1).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder().retainCommits(1).build())
|
||||
// Timeline-server-based markers are not used for multi-rollback tests
|
||||
.withMarkersType(MarkerType.DIRECT.name());
|
||||
addConfigsForPopulateMetaFields(cfgBuilder, populateMetaFields);
|
||||
@@ -977,10 +978,13 @@ public class TestHoodieSparkMergeOnReadTableRollback extends SparkClientFunction
|
||||
|
||||
private HoodieWriteConfig getWriteConfig(boolean autoCommit, boolean rollbackUsingMarkers) {
|
||||
HoodieWriteConfig.Builder cfgBuilder = getConfigBuilder(autoCommit).withRollbackUsingMarkers(rollbackUsingMarkers)
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder()
|
||||
.withAutoClean(false)
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY)
|
||||
.build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().compactionSmallFileSize(1024 * 1024 * 1024L)
|
||||
.withInlineCompaction(false).withMaxNumDeltaCommitsBeforeCompaction(3)
|
||||
.withAutoClean(false)
|
||||
.withFailedWritesCleaningPolicy(HoodieFailedWritesCleaningPolicy.LAZY).build());
|
||||
.build());
|
||||
return cfgBuilder.build();
|
||||
}
|
||||
|
||||
|
||||
@@ -38,10 +38,11 @@ import org.apache.hudi.common.table.view.SyncableFileSystemView;
|
||||
import org.apache.hudi.common.testutils.HoodieTestDataGenerator;
|
||||
import org.apache.hudi.common.testutils.RawTripTestPayload;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.config.HoodieCompactionConfig;
|
||||
import org.apache.hudi.config.HoodieIndexConfig;
|
||||
import org.apache.hudi.config.HoodieStorageConfig;
|
||||
import org.apache.hudi.config.HoodieCleanConfig;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
import org.apache.hudi.config.HoodieCompactionConfig;
|
||||
import org.apache.hudi.config.HoodieStorageConfig;
|
||||
import org.apache.hudi.config.HoodieIndexConfig;
|
||||
import org.apache.hudi.index.HoodieIndex;
|
||||
import org.apache.hudi.index.HoodieIndex.IndexType;
|
||||
import org.apache.hudi.index.SparkHoodieIndexFactory;
|
||||
@@ -146,8 +147,8 @@ public class HoodieClientTestBase extends HoodieClientTestHarness {
|
||||
.withTimelineLayoutVersion(TimelineLayoutVersion.CURR_VERSION)
|
||||
.withWriteStatusClass(MetadataMergeWriteStatus.class)
|
||||
.withConsistencyGuardConfig(ConsistencyGuardConfig.newBuilder().withConsistencyCheckEnabled(true).build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().withFailedWritesCleaningPolicy(cleaningPolicy)
|
||||
.compactionSmallFileSize(1024 * 1024).build())
|
||||
.withCleanConfig(HoodieCleanConfig.newBuilder().withFailedWritesCleaningPolicy(cleaningPolicy).build())
|
||||
.withCompactionConfig(HoodieCompactionConfig.newBuilder().compactionSmallFileSize(1024 * 1024).build())
|
||||
.withStorageConfig(HoodieStorageConfig.newBuilder().hfileMaxFileSize(1024 * 1024).parquetMaxFileSize(1024 * 1024).orcMaxFileSize(1024 * 1024).build())
|
||||
.forTable("test-trip-table")
|
||||
.withIndexConfig(HoodieIndexConfig.newBuilder().withIndexType(indexType).build())
|
||||
|
||||
Reference in New Issue
Block a user