1
0

[HUDI-3135] Make delete partitions lazy to be executed by the cleaner (#4489)

As of now, delete partitions will ensure all file groups are deleted, but the partition as such is not deleted. So, get all partitions might be returning the deleted partitions as well. but no data will be served since all file groups are deleted. With this patch, we are fixing it. We are letting cleaner take care of deleting the partitions when all file groups pertaining to a partitions are deleted.

- Fixed the CleanPlanActionExecutor to return meta info about list of partitions to be deleted. If there are no valid file groups for a partition, clean planner will include the partition to be deleted.
- Fixed HoodieCleanPlan avro schema to include the list of partitions to be deleted
- CleanActionExecutor is fixed to delete partitions if any (as per clean plan)
- Same info is added to HoodieCleanMetadata
- Metadata table when applying clean metadata, will check for partitions to be deleted and will update the "all_partitions" record for the deleted partitions.

Co-authored-by: sivabalan <n.siva.b@gmail.com>
This commit is contained in:
ForwardXu
2022-03-31 15:35:39 +08:00
committed by GitHub
parent 3cdb590e15
commit 80011df995
20 changed files with 306 additions and 76 deletions

View File

@@ -0,0 +1,35 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hudi.exception;
/**
* <p>
* Exception thrown for any higher level errors when doing delete partitions.
* </p>
*/
public class HoodieDeletePartitionException extends HoodieException {
public HoodieDeletePartitionException(String msg, Throwable e) {
super(msg, e);
}
public HoodieDeletePartitionException(String msg) {
super(msg);
}
}

View File

@@ -76,7 +76,8 @@ public class CleanActionExecutor<T extends HoodieRecordPayload, I, K, O> extends
Path deletePath = new Path(deletePathStr); Path deletePath = new Path(deletePathStr);
LOG.debug("Working on delete path :" + deletePath); LOG.debug("Working on delete path :" + deletePath);
try { try {
boolean deleteResult = fs.delete(deletePath, false); boolean isDirectory = fs.isDirectory(deletePath);
boolean deleteResult = fs.delete(deletePath, isDirectory);
if (deleteResult) { if (deleteResult) {
LOG.debug("Cleaned file at path :" + deletePath); LOG.debug("Cleaned file at path :" + deletePath);
} }
@@ -137,6 +138,8 @@ public class CleanActionExecutor<T extends HoodieRecordPayload, I, K, O> extends
.flatMap(x -> x.getValue().stream().map(y -> new ImmutablePair<>(x.getKey(), .flatMap(x -> x.getValue().stream().map(y -> new ImmutablePair<>(x.getKey(),
new CleanFileInfo(y.getFilePath(), y.getIsBootstrapBaseFile())))); new CleanFileInfo(y.getFilePath(), y.getIsBootstrapBaseFile()))));
List<String> partitionsToBeDeleted = cleanerPlan.getPartitionsToBeDeleted() != null ? cleanerPlan.getPartitionsToBeDeleted() : new ArrayList<>();
Stream<ImmutablePair<String, PartitionCleanStat>> partitionCleanStats = Stream<ImmutablePair<String, PartitionCleanStat>> partitionCleanStats =
context.mapPartitionsToPairAndReduceByKey(filesToBeDeletedPerPartition, context.mapPartitionsToPairAndReduceByKey(filesToBeDeletedPerPartition,
iterator -> deleteFilesFunc(iterator, table), PartitionCleanStat::merge, cleanerParallelism); iterator -> deleteFilesFunc(iterator, table), PartitionCleanStat::merge, cleanerParallelism);
@@ -144,6 +147,14 @@ public class CleanActionExecutor<T extends HoodieRecordPayload, I, K, O> extends
Map<String, PartitionCleanStat> partitionCleanStatsMap = partitionCleanStats Map<String, PartitionCleanStat> partitionCleanStatsMap = partitionCleanStats
.collect(Collectors.toMap(Pair::getKey, Pair::getValue)); .collect(Collectors.toMap(Pair::getKey, Pair::getValue));
partitionsToBeDeleted.forEach(entry -> {
try {
deleteFileAndGetResult(table.getMetaClient().getFs(), table.getMetaClient().getBasePath() + "/" + entry);
} catch (IOException e) {
LOG.warn("Partition deletion failed " + entry);
}
});
// Return PartitionCleanStat for each partition passed. // Return PartitionCleanStat for each partition passed.
return cleanerPlan.getFilePathsToBeDeletedPerPartition().keySet().stream().map(partitionPath -> { return cleanerPlan.getFilePathsToBeDeletedPerPartition().keySet().stream().map(partitionPath -> {
PartitionCleanStat partitionCleanStat = partitionCleanStatsMap.containsKey(partitionPath) PartitionCleanStat partitionCleanStat = partitionCleanStatsMap.containsKey(partitionPath)
@@ -162,6 +173,7 @@ public class CleanActionExecutor<T extends HoodieRecordPayload, I, K, O> extends
.withDeleteBootstrapBasePathPatterns(partitionCleanStat.getDeleteBootstrapBasePathPatterns()) .withDeleteBootstrapBasePathPatterns(partitionCleanStat.getDeleteBootstrapBasePathPatterns())
.withSuccessfulDeleteBootstrapBaseFiles(partitionCleanStat.getSuccessfulDeleteBootstrapBaseFiles()) .withSuccessfulDeleteBootstrapBaseFiles(partitionCleanStat.getSuccessfulDeleteBootstrapBaseFiles())
.withFailedDeleteBootstrapBaseFiles(partitionCleanStat.getFailedDeleteBootstrapBaseFiles()) .withFailedDeleteBootstrapBaseFiles(partitionCleanStat.getFailedDeleteBootstrapBaseFiles())
.isPartitionDeleted(partitionsToBeDeleted.contains(partitionPath))
.build(); .build();
}).collect(Collectors.toList()); }).collect(Collectors.toList());
} }

View File

@@ -22,6 +22,7 @@ import org.apache.hudi.avro.model.HoodieActionInstant;
import org.apache.hudi.avro.model.HoodieCleanFileInfo; import org.apache.hudi.avro.model.HoodieCleanFileInfo;
import org.apache.hudi.avro.model.HoodieCleanerPlan; import org.apache.hudi.avro.model.HoodieCleanerPlan;
import org.apache.hudi.common.engine.HoodieEngineContext; import org.apache.hudi.common.engine.HoodieEngineContext;
import org.apache.hudi.common.model.CleanFileInfo;
import org.apache.hudi.common.model.HoodieCleaningPolicy; import org.apache.hudi.common.model.HoodieCleaningPolicy;
import org.apache.hudi.common.model.HoodieRecordPayload; import org.apache.hudi.common.model.HoodieRecordPayload;
import org.apache.hudi.common.table.timeline.HoodieInstant; import org.apache.hudi.common.table.timeline.HoodieInstant;
@@ -108,15 +109,22 @@ public class CleanPlanActionExecutor<T extends HoodieRecordPayload, I, K, O> ext
context.setJobStatus(this.getClass().getSimpleName(), "Generating list of file slices to be cleaned"); context.setJobStatus(this.getClass().getSimpleName(), "Generating list of file slices to be cleaned");
Map<String, List<HoodieCleanFileInfo>> cleanOps = context Map<String, Pair<Boolean, List<CleanFileInfo>>> cleanOpsWithPartitionMeta = context
.map(partitionsToClean, partitionPathToClean -> Pair.of(partitionPathToClean, planner.getDeletePaths(partitionPathToClean)), cleanerParallelism) .map(partitionsToClean, partitionPathToClean -> Pair.of(partitionPathToClean, planner.getDeletePaths(partitionPathToClean)), cleanerParallelism)
.stream() .stream()
.collect(Collectors.toMap(Pair::getKey, y -> CleanerUtils.convertToHoodieCleanFileInfoList(y.getValue()))); .collect(Collectors.toMap(Pair::getKey, Pair::getValue));
Map<String, List<HoodieCleanFileInfo>> cleanOps = cleanOpsWithPartitionMeta.entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey,
e -> CleanerUtils.convertToHoodieCleanFileInfoList(e.getValue().getValue())));
List<String> partitionsToDelete = cleanOpsWithPartitionMeta.entrySet().stream().filter(entry -> entry.getValue().getKey()).map(Map.Entry::getKey)
.collect(Collectors.toList());
return new HoodieCleanerPlan(earliestInstant return new HoodieCleanerPlan(earliestInstant
.map(x -> new HoodieActionInstant(x.getTimestamp(), x.getAction(), x.getState().name())).orElse(null), .map(x -> new HoodieActionInstant(x.getTimestamp(), x.getAction(), x.getState().name())).orElse(null),
config.getCleanerPolicy().name(), CollectionUtils.createImmutableMap(), config.getCleanerPolicy().name(), CollectionUtils.createImmutableMap(),
CleanPlanner.LATEST_CLEAN_PLAN_VERSION, cleanOps); CleanPlanner.LATEST_CLEAN_PLAN_VERSION, cleanOps, partitionsToDelete);
} catch (IOException e) { } catch (IOException e) {
throw new HoodieIOException("Failed to schedule clean operation", e); throw new HoodieIOException("Failed to schedule clean operation", e);
} }

View File

@@ -214,7 +214,7 @@ public class CleanPlanner<T extends HoodieRecordPayload, I, K, O> implements Ser
* policy is useful, if you are simply interested in querying the table, and you don't want too many versions for a * policy is useful, if you are simply interested in querying the table, and you don't want too many versions for a
* single file (i.e run it with versionsRetained = 1) * single file (i.e run it with versionsRetained = 1)
*/ */
private List<CleanFileInfo> getFilesToCleanKeepingLatestVersions(String partitionPath) { private Pair<Boolean, List<CleanFileInfo>> getFilesToCleanKeepingLatestVersions(String partitionPath) {
LOG.info("Cleaning " + partitionPath + ", retaining latest " + config.getCleanerFileVersionsRetained() LOG.info("Cleaning " + partitionPath + ", retaining latest " + config.getCleanerFileVersionsRetained()
+ " file versions. "); + " file versions. ");
List<CleanFileInfo> deletePaths = new ArrayList<>(); List<CleanFileInfo> deletePaths = new ArrayList<>();
@@ -226,7 +226,7 @@ public class CleanPlanner<T extends HoodieRecordPayload, I, K, O> implements Ser
// In this scenario, we will assume that once replaced a file group automatically becomes eligible for cleaning completely // In this scenario, we will assume that once replaced a file group automatically becomes eligible for cleaning completely
// In other words, the file versions only apply to the active file groups. // In other words, the file versions only apply to the active file groups.
deletePaths.addAll(getReplacedFilesEligibleToClean(savepointedFiles, partitionPath, Option.empty())); deletePaths.addAll(getReplacedFilesEligibleToClean(savepointedFiles, partitionPath, Option.empty()));
boolean toDeletePartition = false;
List<HoodieFileGroup> fileGroups = fileSystemView.getAllFileGroups(partitionPath).collect(Collectors.toList()); List<HoodieFileGroup> fileGroups = fileSystemView.getAllFileGroups(partitionPath).collect(Collectors.toList());
for (HoodieFileGroup fileGroup : fileGroups) { for (HoodieFileGroup fileGroup : fileGroups) {
int keepVersions = config.getCleanerFileVersionsRetained(); int keepVersions = config.getCleanerFileVersionsRetained();
@@ -254,10 +254,14 @@ public class CleanPlanner<T extends HoodieRecordPayload, I, K, O> implements Ser
deletePaths.addAll(getCleanFileInfoForSlice(nextSlice)); deletePaths.addAll(getCleanFileInfoForSlice(nextSlice));
} }
} }
return deletePaths; // if there are no valid file groups for the partition, mark it to be deleted
if (fileGroups.isEmpty()) {
toDeletePartition = true;
}
return Pair.of(toDeletePartition, deletePaths);
} }
private List<CleanFileInfo> getFilesToCleanKeepingLatestCommits(String partitionPath) { private Pair<Boolean, List<CleanFileInfo>> getFilesToCleanKeepingLatestCommits(String partitionPath) {
return getFilesToCleanKeepingLatestCommits(partitionPath, config.getCleanerCommitsRetained(), HoodieCleaningPolicy.KEEP_LATEST_COMMITS); return getFilesToCleanKeepingLatestCommits(partitionPath, config.getCleanerCommitsRetained(), HoodieCleaningPolicy.KEEP_LATEST_COMMITS);
} }
@@ -275,7 +279,7 @@ public class CleanPlanner<T extends HoodieRecordPayload, I, K, O> implements Ser
* <p> * <p>
* This policy is the default. * This policy is the default.
*/ */
private List<CleanFileInfo> getFilesToCleanKeepingLatestCommits(String partitionPath, int commitsRetained, HoodieCleaningPolicy policy) { private Pair<Boolean, List<CleanFileInfo>> getFilesToCleanKeepingLatestCommits(String partitionPath, int commitsRetained, HoodieCleaningPolicy policy) {
LOG.info("Cleaning " + partitionPath + ", retaining latest " + commitsRetained + " commits. "); LOG.info("Cleaning " + partitionPath + ", retaining latest " + commitsRetained + " commits. ");
List<CleanFileInfo> deletePaths = new ArrayList<>(); List<CleanFileInfo> deletePaths = new ArrayList<>();
@@ -285,6 +289,7 @@ public class CleanPlanner<T extends HoodieRecordPayload, I, K, O> implements Ser
.collect(Collectors.toList()); .collect(Collectors.toList());
// determine if we have enough commits, to start cleaning. // determine if we have enough commits, to start cleaning.
boolean toDeletePartition = false;
if (commitTimeline.countInstants() > commitsRetained) { if (commitTimeline.countInstants() > commitsRetained) {
Option<HoodieInstant> earliestCommitToRetainOption = getEarliestCommitToRetain(); Option<HoodieInstant> earliestCommitToRetainOption = getEarliestCommitToRetain();
HoodieInstant earliestCommitToRetain = earliestCommitToRetainOption.get(); HoodieInstant earliestCommitToRetain = earliestCommitToRetainOption.get();
@@ -350,8 +355,12 @@ public class CleanPlanner<T extends HoodieRecordPayload, I, K, O> implements Ser
} }
} }
} }
// if there are no valid file groups for the partition, mark it to be deleted
if (fileGroups.isEmpty()) {
toDeletePartition = true;
}
} }
return deletePaths; return Pair.of(toDeletePartition, deletePaths);
} }
/** /**
@@ -362,10 +371,10 @@ public class CleanPlanner<T extends HoodieRecordPayload, I, K, O> implements Ser
* @param partitionPath partition path to check * @param partitionPath partition path to check
* @return list of files to clean * @return list of files to clean
*/ */
private List<CleanFileInfo> getFilesToCleanKeepingLatestHours(String partitionPath) { private Pair<Boolean, List<CleanFileInfo>> getFilesToCleanKeepingLatestHours(String partitionPath) {
return getFilesToCleanKeepingLatestCommits(partitionPath, 0, HoodieCleaningPolicy.KEEP_LATEST_BY_HOURS); return getFilesToCleanKeepingLatestCommits(partitionPath, 0, HoodieCleaningPolicy.KEEP_LATEST_BY_HOURS);
} }
private List<CleanFileInfo> getReplacedFilesEligibleToClean(List<String> savepointedFiles, String partitionPath, Option<HoodieInstant> earliestCommitToRetain) { private List<CleanFileInfo> getReplacedFilesEligibleToClean(List<String> savepointedFiles, String partitionPath, Option<HoodieInstant> earliestCommitToRetain) {
final Stream<HoodieFileGroup> replacedGroups; final Stream<HoodieFileGroup> replacedGroups;
if (earliestCommitToRetain.isPresent()) { if (earliestCommitToRetain.isPresent()) {
@@ -416,9 +425,9 @@ public class CleanPlanner<T extends HoodieRecordPayload, I, K, O> implements Ser
/** /**
* Returns files to be cleaned for the given partitionPath based on cleaning policy. * Returns files to be cleaned for the given partitionPath based on cleaning policy.
*/ */
public List<CleanFileInfo> getDeletePaths(String partitionPath) { public Pair<Boolean, List<CleanFileInfo>> getDeletePaths(String partitionPath) {
HoodieCleaningPolicy policy = config.getCleanerPolicy(); HoodieCleaningPolicy policy = config.getCleanerPolicy();
List<CleanFileInfo> deletePaths; Pair<Boolean, List<CleanFileInfo>> deletePaths;
if (policy == HoodieCleaningPolicy.KEEP_LATEST_COMMITS) { if (policy == HoodieCleaningPolicy.KEEP_LATEST_COMMITS) {
deletePaths = getFilesToCleanKeepingLatestCommits(partitionPath); deletePaths = getFilesToCleanKeepingLatestCommits(partitionPath);
} else if (policy == HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS) { } else if (policy == HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS) {
@@ -428,8 +437,10 @@ public class CleanPlanner<T extends HoodieRecordPayload, I, K, O> implements Ser
} else { } else {
throw new IllegalArgumentException("Unknown cleaning policy : " + policy.name()); throw new IllegalArgumentException("Unknown cleaning policy : " + policy.name());
} }
LOG.info(deletePaths.size() + " patterns used to delete in partition path:" + partitionPath); LOG.info(deletePaths.getValue().size() + " patterns used to delete in partition path:" + partitionPath);
if (deletePaths.getKey()) {
LOG.info("Partition " + partitionPath + " to be deleted");
}
return deletePaths; return deletePaths;
} }

View File

@@ -259,7 +259,7 @@ public class TestMetadataConversionUtils extends HoodieCommonTestHarness {
private void createCleanMetadata(String instantTime) throws IOException { private void createCleanMetadata(String instantTime) throws IOException {
HoodieCleanerPlan cleanerPlan = new HoodieCleanerPlan(new HoodieActionInstant("", "", ""), "", new HashMap<>(), HoodieCleanerPlan cleanerPlan = new HoodieCleanerPlan(new HoodieActionInstant("", "", ""), "", new HashMap<>(),
CleanPlanV2MigrationHandler.VERSION, new HashMap<>()); CleanPlanV2MigrationHandler.VERSION, new HashMap<>(), new ArrayList<>());
HoodieCleanStat cleanStats = new HoodieCleanStat( HoodieCleanStat cleanStats = new HoodieCleanStat(
HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS, HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS,
HoodieTestUtils.DEFAULT_PARTITION_PATHS[new Random().nextInt(HoodieTestUtils.DEFAULT_PARTITION_PATHS.length)], HoodieTestUtils.DEFAULT_PARTITION_PATHS[new Random().nextInt(HoodieTestUtils.DEFAULT_PARTITION_PATHS.length)],

View File

@@ -18,24 +18,32 @@
package org.apache.hudi.table.action.commit; package org.apache.hudi.table.action.commit;
import java.time.Duration;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.fs.Path;
import org.apache.hudi.avro.model.HoodieRequestedReplaceMetadata;
import org.apache.hudi.client.WriteStatus; import org.apache.hudi.client.WriteStatus;
import org.apache.hudi.common.data.HoodieData; import org.apache.hudi.common.data.HoodieData;
import org.apache.hudi.common.engine.HoodieEngineContext; import org.apache.hudi.common.engine.HoodieEngineContext;
import org.apache.hudi.common.model.HoodieRecordPayload; import org.apache.hudi.common.model.HoodieRecordPayload;
import org.apache.hudi.common.model.WriteOperationType; import org.apache.hudi.common.model.WriteOperationType;
import org.apache.hudi.common.table.timeline.HoodieInstant;
import org.apache.hudi.common.table.timeline.TimelineMetadataUtils;
import org.apache.hudi.common.util.HoodieTimer; import org.apache.hudi.common.util.HoodieTimer;
import org.apache.hudi.common.util.collection.Pair; import org.apache.hudi.common.util.collection.Pair;
import org.apache.hudi.config.HoodieWriteConfig; import org.apache.hudi.config.HoodieWriteConfig;
import org.apache.hudi.data.HoodieJavaPairRDD; import org.apache.hudi.data.HoodieJavaPairRDD;
import org.apache.hudi.exception.HoodieDeletePartitionException;
import org.apache.hudi.table.HoodieTable; import org.apache.hudi.table.HoodieTable;
import org.apache.hudi.table.WorkloadProfile; import org.apache.hudi.table.WorkloadProfile;
import org.apache.hudi.table.WorkloadStat; import org.apache.hudi.table.WorkloadStat;
import org.apache.hudi.table.action.HoodieWriteMetadata; import org.apache.hudi.table.action.HoodieWriteMetadata;
import java.time.Duration; import static org.apache.hudi.common.table.timeline.HoodieInstant.State.REQUESTED;
import java.util.HashMap; import static org.apache.hudi.common.table.timeline.HoodieTimeline.REPLACE_COMMIT_ACTION;
import java.util.List;
import java.util.Map;
public class SparkDeletePartitionCommitActionExecutor<T extends HoodieRecordPayload<T>> public class SparkDeletePartitionCommitActionExecutor<T extends HoodieRecordPayload<T>>
extends SparkInsertOverwriteCommitActionExecutor<T> { extends SparkInsertOverwriteCommitActionExecutor<T> {
@@ -50,16 +58,35 @@ public class SparkDeletePartitionCommitActionExecutor<T extends HoodieRecordPayl
@Override @Override
public HoodieWriteMetadata<HoodieData<WriteStatus>> execute() { public HoodieWriteMetadata<HoodieData<WriteStatus>> execute() {
HoodieTimer timer = new HoodieTimer().startTimer(); try {
context.setJobStatus(this.getClass().getSimpleName(), "Gather all file ids from all deleting partitions."); HoodieTimer timer = new HoodieTimer().startTimer();
Map<String, List<String>> partitionToReplaceFileIds = HoodieJavaPairRDD.getJavaPairRDD(context.parallelize(partitions).distinct() context.setJobStatus(this.getClass().getSimpleName(), "Gather all file ids from all deleting partitions.");
.mapToPair(partitionPath -> Pair.of(partitionPath, getAllExistingFileIds(partitionPath)))).collectAsMap(); Map<String, List<String>> partitionToReplaceFileIds =
HoodieWriteMetadata<HoodieData<WriteStatus>> result = new HoodieWriteMetadata<>(); HoodieJavaPairRDD.getJavaPairRDD(context.parallelize(partitions).distinct()
result.setPartitionToReplaceFileIds(partitionToReplaceFileIds); .mapToPair(partitionPath -> Pair.of(partitionPath, getAllExistingFileIds(partitionPath)))).collectAsMap();
result.setIndexUpdateDuration(Duration.ofMillis(timer.endTimer())); HoodieWriteMetadata<HoodieData<WriteStatus>> result = new HoodieWriteMetadata<>();
result.setWriteStatuses(context.emptyHoodieData()); result.setPartitionToReplaceFileIds(partitionToReplaceFileIds);
this.saveWorkloadProfileMetadataToInflight(new WorkloadProfile(Pair.of(new HashMap<>(), new WorkloadStat())), instantTime); result.setIndexUpdateDuration(Duration.ofMillis(timer.endTimer()));
this.commitOnAutoCommit(result); result.setWriteStatuses(context.emptyHoodieData());
return result;
// created requested
HoodieInstant dropPartitionsInstant = new HoodieInstant(REQUESTED, REPLACE_COMMIT_ACTION, instantTime);
if (!table.getMetaClient().getFs().exists(new Path(table.getMetaClient().getMetaPath(),
dropPartitionsInstant.getFileName()))) {
HoodieRequestedReplaceMetadata requestedReplaceMetadata = HoodieRequestedReplaceMetadata.newBuilder()
.setOperationType(WriteOperationType.DELETE_PARTITION.name())
.setExtraMetadata(extraMetadata.orElse(Collections.emptyMap()))
.build();
table.getMetaClient().getActiveTimeline().saveToPendingReplaceCommit(dropPartitionsInstant,
TimelineMetadataUtils.serializeRequestedReplaceMetadata(requestedReplaceMetadata));
}
this.saveWorkloadProfileMetadataToInflight(new WorkloadProfile(Pair.of(new HashMap<>(), new WorkloadStat())),
instantTime);
this.commitOnAutoCommit(result);
return result;
} catch (Exception e) {
throw new HoodieDeletePartitionException("Failed to drop partitions for commit time " + instantTime, e);
}
} }
} }

View File

@@ -18,7 +18,17 @@
package org.apache.hudi.client.functional; package org.apache.hudi.client.functional;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.generic.IndexedRecord;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.util.Time;
import org.apache.hudi.avro.HoodieAvroUtils; import org.apache.hudi.avro.HoodieAvroUtils;
import org.apache.hudi.avro.model.HoodieCleanMetadata;
import org.apache.hudi.avro.model.HoodieMetadataRecord; import org.apache.hudi.avro.model.HoodieMetadataRecord;
import org.apache.hudi.client.SparkRDDWriteClient; import org.apache.hudi.client.SparkRDDWriteClient;
import org.apache.hudi.client.WriteStatus; import org.apache.hudi.client.WriteStatus;
@@ -32,6 +42,7 @@ import org.apache.hudi.common.fs.FSUtils;
import org.apache.hudi.common.metrics.Registry; import org.apache.hudi.common.metrics.Registry;
import org.apache.hudi.common.model.FileSlice; import org.apache.hudi.common.model.FileSlice;
import org.apache.hudi.common.model.HoodieBaseFile; import org.apache.hudi.common.model.HoodieBaseFile;
import org.apache.hudi.common.model.HoodieCleaningPolicy;
import org.apache.hudi.common.model.HoodieCommitMetadata; import org.apache.hudi.common.model.HoodieCommitMetadata;
import org.apache.hudi.common.model.HoodieFailedWritesCleaningPolicy; import org.apache.hudi.common.model.HoodieFailedWritesCleaningPolicy;
import org.apache.hudi.common.model.HoodieFileFormat; import org.apache.hudi.common.model.HoodieFileFormat;
@@ -89,16 +100,6 @@ import org.apache.hudi.table.action.HoodieWriteMetadata;
import org.apache.hudi.table.upgrade.SparkUpgradeDowngradeHelper; import org.apache.hudi.table.upgrade.SparkUpgradeDowngradeHelper;
import org.apache.hudi.table.upgrade.UpgradeDowngrade; import org.apache.hudi.table.upgrade.UpgradeDowngrade;
import org.apache.hudi.testutils.MetadataMergeWriteStatus; import org.apache.hudi.testutils.MetadataMergeWriteStatus;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.generic.IndexedRecord;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.util.Time;
import org.apache.log4j.LogManager; import org.apache.log4j.LogManager;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import org.apache.parquet.avro.AvroSchemaConverter; import org.apache.parquet.avro.AvroSchemaConverter;
@@ -1727,6 +1728,61 @@ public class TestHoodieBackedMetadata extends TestHoodieMetadataBase {
} }
} }
@Test
public void testDeletePartitions() throws Exception {
init(HoodieTableType.COPY_ON_WRITE);
int maxCommits = 1;
HoodieWriteConfig cfg = getConfigBuilder(TRIP_EXAMPLE_SCHEMA, HoodieIndex.IndexType.BLOOM, HoodieFailedWritesCleaningPolicy.EAGER)
.withCompactionConfig(HoodieCompactionConfig.newBuilder()
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).retainCommits(maxCommits).build())
.withParallelism(1, 1).withBulkInsertParallelism(1).withFinalizeWriteParallelism(1).withDeleteParallelism(1)
.withConsistencyGuardConfig(ConsistencyGuardConfig.newBuilder().withConsistencyCheckEnabled(true).build())
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(true).build())
.build();
try (SparkRDDWriteClient client = getHoodieWriteClient(cfg)) {
String newCommitTime = HoodieActiveTimeline.createNewInstantTime();
client.startCommitWithTime(newCommitTime);
List<HoodieRecord> records = dataGen.generateInserts(newCommitTime, 10);
List<HoodieRecord> upsertRecords = new ArrayList<>();
for (HoodieRecord entry : records) {
if (entry.getPartitionPath().equals(HoodieTestDataGenerator.DEFAULT_FIRST_PARTITION_PATH)
|| entry.getPartitionPath().equals(HoodieTestDataGenerator.DEFAULT_SECOND_PARTITION_PATH)) {
upsertRecords.add(entry);
}
}
List<WriteStatus> writeStatuses = client.upsert(jsc.parallelize(upsertRecords, 1), newCommitTime).collect();
assertNoWriteErrors(writeStatuses);
validateMetadata(client);
// delete partitions
newCommitTime = HoodieActiveTimeline.createNewInstantTime(5000);
client.startCommitWithTime(newCommitTime);
client.deletePartitions(singletonList(HoodieTestDataGenerator.DEFAULT_FIRST_PARTITION_PATH), newCommitTime);
validateMetadata(client);
// add 1 more commit
newCommitTime = HoodieActiveTimeline.createNewInstantTime(5000);
client.startCommitWithTime(newCommitTime);
records = dataGen.generateInserts(newCommitTime, 10);
upsertRecords = new ArrayList<>();
for (HoodieRecord entry : records) {
if (entry.getPartitionPath().equals(HoodieTestDataGenerator.DEFAULT_SECOND_PARTITION_PATH)) {
upsertRecords.add(entry);
}
}
writeStatuses = client.upsert(jsc.parallelize(upsertRecords, 1), newCommitTime).collect();
assertNoWriteErrors(writeStatuses);
// trigger clean which will actually triggger deletion of the partition
newCommitTime = HoodieActiveTimeline.createNewInstantTime(5000);
HoodieCleanMetadata cleanMetadata = client.clean(newCommitTime);
validateMetadata(client);
assertEquals(1, metadata(client).getAllPartitionPaths().size());
}
}
/** /**
* Test various error scenarios. * Test various error scenarios.
*/ */

View File

@@ -79,7 +79,7 @@ public class TestHoodieSparkCopyOnWriteTableArchiveWithReplace extends SparkClie
client.startCommitWithTime(instantTime4, HoodieActiveTimeline.REPLACE_COMMIT_ACTION); client.startCommitWithTime(instantTime4, HoodieActiveTimeline.REPLACE_COMMIT_ACTION);
client.deletePartitions(Arrays.asList(DEFAULT_FIRST_PARTITION_PATH, DEFAULT_SECOND_PARTITION_PATH), instantTime4); client.deletePartitions(Arrays.asList(DEFAULT_FIRST_PARTITION_PATH, DEFAULT_SECOND_PARTITION_PATH), instantTime4);
// 2nd write batch; 4 commits for the 3rd partition; the 3rd commit to trigger archiving the replace commit // 2nd write batch; 4 commits for the 4th partition; the 4th commit to trigger archiving the replace commit
for (int i = 5; i < 9; i++) { for (int i = 5; i < 9; i++) {
String instantTime = HoodieActiveTimeline.createNewInstantTime(i * 1000); String instantTime = HoodieActiveTimeline.createNewInstantTime(i * 1000);
client.startCommitWithTime(instantTime); client.startCommitWithTime(instantTime);
@@ -97,7 +97,7 @@ public class TestHoodieSparkCopyOnWriteTableArchiveWithReplace extends SparkClie
// verify records // verify records
final HoodieTimeline timeline2 = metaClient.getCommitTimeline().filterCompletedInstants(); final HoodieTimeline timeline2 = metaClient.getCommitTimeline().filterCompletedInstants();
assertEquals(5, countRecordsOptionallySince(jsc(), basePath(), sqlContext(), timeline2, Option.empty()), assertEquals(5, countRecordsOptionallySince(jsc(), basePath(), sqlContext(), timeline2, Option.empty()),
"should only have the 4 records from the 3rd partition."); "should only have the 5 records from the 3rd partition.");
} }
} }
} }

View File

@@ -687,7 +687,7 @@ public abstract class HoodieClientTestHarness extends HoodieCommonTestHarness im
public HoodieInstant createCleanMetadata(String instantTime, boolean inflightOnly, boolean isEmpty) throws IOException { public HoodieInstant createCleanMetadata(String instantTime, boolean inflightOnly, boolean isEmpty) throws IOException {
HoodieCleanerPlan cleanerPlan = new HoodieCleanerPlan(new HoodieActionInstant("", "", ""), "", new HashMap<>(), HoodieCleanerPlan cleanerPlan = new HoodieCleanerPlan(new HoodieActionInstant("", "", ""), "", new HashMap<>(),
CleanPlanV2MigrationHandler.VERSION, new HashMap<>()); CleanPlanV2MigrationHandler.VERSION, new HashMap<>(), new ArrayList<>());
if (inflightOnly) { if (inflightOnly) {
HoodieTestTable.of(metaClient).addInflightClean(instantTime, cleanerPlan); HoodieTestTable.of(metaClient).addInflightClean(instantTime, cleanerPlan);
} else { } else {

View File

@@ -24,6 +24,7 @@
{"name": "policy", "type": "string"}, {"name": "policy", "type": "string"},
{"name": "deletePathPatterns", "type": {"type": "array", "items": "string"}}, {"name": "deletePathPatterns", "type": {"type": "array", "items": "string"}},
{"name": "successDeleteFiles", "type": {"type": "array", "items": "string"}}, {"name": "successDeleteFiles", "type": {"type": "array", "items": "string"}},
{"name": "failedDeleteFiles", "type": {"type": "array", "items": "string"}} {"name": "failedDeleteFiles", "type": {"type": "array", "items": "string"}},
{"name": "isPartitionDeleted", "type":["null", "boolean"], "default": null }
] ]
} }

View File

@@ -92,6 +92,14 @@
} }
}}], }}],
"default" : null "default" : null
},
{
"name": "partitionsToBeDeleted",
"doc": "partitions to be deleted",
"type":["null",
{ "type":"array", "items":"string"}
],
"default": null
} }
] ]
} }

View File

@@ -47,19 +47,22 @@ public class HoodieCleanStat implements Serializable {
private final List<String> failedDeleteBootstrapBaseFiles; private final List<String> failedDeleteBootstrapBaseFiles;
// Earliest commit that was retained in this clean // Earliest commit that was retained in this clean
private final String earliestCommitToRetain; private final String earliestCommitToRetain;
// set to true if partition is deleted
private final boolean isPartitionDeleted;
public HoodieCleanStat(HoodieCleaningPolicy policy, String partitionPath, List<String> deletePathPatterns, public HoodieCleanStat(HoodieCleaningPolicy policy, String partitionPath, List<String> deletePathPatterns,
List<String> successDeleteFiles, List<String> failedDeleteFiles, String earliestCommitToRetain) { List<String> successDeleteFiles, List<String> failedDeleteFiles, String earliestCommitToRetain) {
this(policy, partitionPath, deletePathPatterns, successDeleteFiles, failedDeleteFiles, earliestCommitToRetain, this(policy, partitionPath, deletePathPatterns, successDeleteFiles, failedDeleteFiles, earliestCommitToRetain,
CollectionUtils.createImmutableList(), CollectionUtils.createImmutableList(), CollectionUtils.createImmutableList(), CollectionUtils.createImmutableList(),
CollectionUtils.createImmutableList()); CollectionUtils.createImmutableList(), false);
} }
public HoodieCleanStat(HoodieCleaningPolicy policy, String partitionPath, List<String> deletePathPatterns, public HoodieCleanStat(HoodieCleaningPolicy policy, String partitionPath, List<String> deletePathPatterns,
List<String> successDeleteFiles, List<String> failedDeleteFiles, List<String> successDeleteFiles, List<String> failedDeleteFiles,
String earliestCommitToRetain, List<String> deleteBootstrapBasePathPatterns, String earliestCommitToRetain, List<String> deleteBootstrapBasePathPatterns,
List<String> successDeleteBootstrapBaseFiles, List<String> successDeleteBootstrapBaseFiles,
List<String> failedDeleteBootstrapBaseFiles) { List<String> failedDeleteBootstrapBaseFiles,
boolean isPartitionDeleted) {
this.policy = policy; this.policy = policy;
this.partitionPath = partitionPath; this.partitionPath = partitionPath;
this.deletePathPatterns = deletePathPatterns; this.deletePathPatterns = deletePathPatterns;
@@ -69,6 +72,7 @@ public class HoodieCleanStat implements Serializable {
this.deleteBootstrapBasePathPatterns = deleteBootstrapBasePathPatterns; this.deleteBootstrapBasePathPatterns = deleteBootstrapBasePathPatterns;
this.successDeleteBootstrapBaseFiles = successDeleteBootstrapBaseFiles; this.successDeleteBootstrapBaseFiles = successDeleteBootstrapBaseFiles;
this.failedDeleteBootstrapBaseFiles = failedDeleteBootstrapBaseFiles; this.failedDeleteBootstrapBaseFiles = failedDeleteBootstrapBaseFiles;
this.isPartitionDeleted = isPartitionDeleted;
} }
public HoodieCleaningPolicy getPolicy() { public HoodieCleaningPolicy getPolicy() {
@@ -107,6 +111,10 @@ public class HoodieCleanStat implements Serializable {
return earliestCommitToRetain; return earliestCommitToRetain;
} }
public boolean isPartitionDeleted() {
return isPartitionDeleted;
}
public static HoodieCleanStat.Builder newBuilder() { public static HoodieCleanStat.Builder newBuilder() {
return new Builder(); return new Builder();
} }
@@ -125,6 +133,7 @@ public class HoodieCleanStat implements Serializable {
private List<String> deleteBootstrapBasePathPatterns; private List<String> deleteBootstrapBasePathPatterns;
private List<String> successDeleteBootstrapBaseFiles; private List<String> successDeleteBootstrapBaseFiles;
private List<String> failedDeleteBootstrapBaseFiles; private List<String> failedDeleteBootstrapBaseFiles;
private boolean isPartitionDeleted;
public Builder withPolicy(HoodieCleaningPolicy policy) { public Builder withPolicy(HoodieCleaningPolicy policy) {
this.policy = policy; this.policy = policy;
@@ -172,10 +181,15 @@ public class HoodieCleanStat implements Serializable {
return this; return this;
} }
public Builder isPartitionDeleted(boolean isPartitionDeleted) {
this.isPartitionDeleted = isPartitionDeleted;
return this;
}
public HoodieCleanStat build() { public HoodieCleanStat build() {
return new HoodieCleanStat(policy, partitionPath, deletePathPatterns, successDeleteFiles, failedDeleteFiles, return new HoodieCleanStat(policy, partitionPath, deletePathPatterns, successDeleteFiles, failedDeleteFiles,
earliestCommitToRetain, deleteBootstrapBasePathPatterns, successDeleteBootstrapBaseFiles, earliestCommitToRetain, deleteBootstrapBasePathPatterns, successDeleteBootstrapBaseFiles,
failedDeleteBootstrapBaseFiles); failedDeleteBootstrapBaseFiles, isPartitionDeleted);
} }
} }
@@ -190,7 +204,8 @@ public class HoodieCleanStat implements Serializable {
+ ", earliestCommitToRetain='" + earliestCommitToRetain + ", earliestCommitToRetain='" + earliestCommitToRetain
+ ", deleteBootstrapBasePathPatterns=" + deleteBootstrapBasePathPatterns + ", deleteBootstrapBasePathPatterns=" + deleteBootstrapBasePathPatterns
+ ", successDeleteBootstrapBaseFiles=" + successDeleteBootstrapBaseFiles + ", successDeleteBootstrapBaseFiles=" + successDeleteBootstrapBaseFiles
+ ", failedDeleteBootstrapBaseFiles=" + failedDeleteBootstrapBaseFiles + '\'' + ", failedDeleteBootstrapBaseFiles=" + failedDeleteBootstrapBaseFiles
+ ", isPartitionDeleted=" + isPartitionDeleted + '\''
+ '}'; + '}';
} }
} }

View File

@@ -18,6 +18,7 @@
package org.apache.hudi.common.table.timeline.versioning.clean; package org.apache.hudi.common.table.timeline.versioning.clean;
import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import org.apache.hudi.avro.model.HoodieCleanerPlan; import org.apache.hudi.avro.model.HoodieCleanerPlan;
import org.apache.hudi.common.table.HoodieTableMetaClient; import org.apache.hudi.common.table.HoodieTableMetaClient;
@@ -61,6 +62,6 @@ public class CleanPlanV1MigrationHandler extends AbstractMigratorBase<HoodieClea
.collect(Collectors.toList())); .collect(Collectors.toList()));
}).collect(Collectors.toMap(Pair::getKey, Pair::getValue)); }).collect(Collectors.toMap(Pair::getKey, Pair::getValue));
return new HoodieCleanerPlan(plan.getEarliestInstantToRetain(), plan.getPolicy(), filesPerPartition, VERSION, return new HoodieCleanerPlan(plan.getEarliestInstantToRetain(), plan.getPolicy(), filesPerPartition, VERSION,
new HashMap<>()); new HashMap<>(), new ArrayList<>());
} }
} }

View File

@@ -27,6 +27,7 @@ import org.apache.hudi.common.util.collection.Pair;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@@ -53,7 +54,7 @@ public class CleanPlanV2MigrationHandler extends AbstractMigratorBase<HoodieClea
new Path(FSUtils.getPartitionPath(metaClient.getBasePath(), e.getKey()), v).toString(), false)) new Path(FSUtils.getPartitionPath(metaClient.getBasePath(), e.getKey()), v).toString(), false))
.collect(Collectors.toList()))).collect(Collectors.toMap(Pair::getKey, Pair::getValue)); .collect(Collectors.toList()))).collect(Collectors.toMap(Pair::getKey, Pair::getValue));
return new HoodieCleanerPlan(plan.getEarliestInstantToRetain(), plan.getPolicy(), new HashMap<>(), VERSION, return new HoodieCleanerPlan(plan.getEarliestInstantToRetain(), plan.getPolicy(), new HashMap<>(), VERSION,
filePathsPerPartition); filePathsPerPartition, new ArrayList<>());
} }
@Override @Override

View File

@@ -64,13 +64,13 @@ public class CleanerUtils {
for (HoodieCleanStat stat : cleanStats) { for (HoodieCleanStat stat : cleanStats) {
HoodieCleanPartitionMetadata metadata = HoodieCleanPartitionMetadata metadata =
new HoodieCleanPartitionMetadata(stat.getPartitionPath(), stat.getPolicy().name(), new HoodieCleanPartitionMetadata(stat.getPartitionPath(), stat.getPolicy().name(),
stat.getDeletePathPatterns(), stat.getSuccessDeleteFiles(), stat.getFailedDeleteFiles()); stat.getDeletePathPatterns(), stat.getSuccessDeleteFiles(), stat.getFailedDeleteFiles(), stat.isPartitionDeleted());
partitionMetadataMap.put(stat.getPartitionPath(), metadata); partitionMetadataMap.put(stat.getPartitionPath(), metadata);
if ((null != stat.getDeleteBootstrapBasePathPatterns()) if ((null != stat.getDeleteBootstrapBasePathPatterns())
&& (!stat.getDeleteBootstrapBasePathPatterns().isEmpty())) { && (!stat.getDeleteBootstrapBasePathPatterns().isEmpty())) {
HoodieCleanPartitionMetadata bootstrapMetadata = new HoodieCleanPartitionMetadata(stat.getPartitionPath(), HoodieCleanPartitionMetadata bootstrapMetadata = new HoodieCleanPartitionMetadata(stat.getPartitionPath(),
stat.getPolicy().name(), stat.getDeleteBootstrapBasePathPatterns(), stat.getSuccessDeleteBootstrapBaseFiles(), stat.getPolicy().name(), stat.getDeleteBootstrapBasePathPatterns(), stat.getSuccessDeleteBootstrapBaseFiles(),
stat.getFailedDeleteBootstrapBaseFiles()); stat.getFailedDeleteBootstrapBaseFiles(), stat.isPartitionDeleted());
partitionBootstrapMetadataMap.put(stat.getPartitionPath(), bootstrapMetadata); partitionBootstrapMetadataMap.put(stat.getPartitionPath(), bootstrapMetadata);
} }
totalDeleted += stat.getSuccessDeleteFiles().size(); totalDeleted += stat.getSuccessDeleteFiles().size();

View File

@@ -18,6 +18,13 @@
package org.apache.hudi.metadata; package org.apache.hudi.metadata;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.generic.IndexedRecord;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hudi.avro.model.HoodieMetadataBloomFilter; import org.apache.hudi.avro.model.HoodieMetadataBloomFilter;
import org.apache.hudi.avro.model.HoodieMetadataColumnStats; import org.apache.hudi.avro.model.HoodieMetadataColumnStats;
import org.apache.hudi.avro.model.HoodieMetadataFileInfo; import org.apache.hudi.avro.model.HoodieMetadataFileInfo;
@@ -35,14 +42,6 @@ import org.apache.hudi.common.util.hash.PartitionIndexID;
import org.apache.hudi.exception.HoodieMetadataException; import org.apache.hudi.exception.HoodieMetadataException;
import org.apache.hudi.io.storage.HoodieHFileReader; import org.apache.hudi.io.storage.HoodieHFileReader;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.generic.IndexedRecord;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.IOException; import java.io.IOException;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.Arrays; import java.util.Arrays;
@@ -222,8 +221,17 @@ public class HoodieMetadataPayload implements HoodieRecordPayload<HoodieMetadata
* @param partitions The list of partitions * @param partitions The list of partitions
*/ */
public static HoodieRecord<HoodieMetadataPayload> createPartitionListRecord(List<String> partitions) { public static HoodieRecord<HoodieMetadataPayload> createPartitionListRecord(List<String> partitions) {
return createPartitionListRecord(partitions, false);
}
/**
* Create and return a {@code HoodieMetadataPayload} to save list of partitions.
*
* @param partitions The list of partitions
*/
public static HoodieRecord<HoodieMetadataPayload> createPartitionListRecord(List<String> partitions, boolean isDeleted) {
Map<String, HoodieMetadataFileInfo> fileInfo = new HashMap<>(); Map<String, HoodieMetadataFileInfo> fileInfo = new HashMap<>();
partitions.forEach(partition -> fileInfo.put(getPartition(partition), new HoodieMetadataFileInfo(0L, false))); partitions.forEach(partition -> fileInfo.put(getPartition(partition), new HoodieMetadataFileInfo(0L, isDeleted)));
HoodieKey key = new HoodieKey(RECORDKEY_PARTITION_LIST, MetadataPartitionType.FILES.getPartitionPath()); HoodieKey key = new HoodieKey(RECORDKEY_PARTITION_LIST, MetadataPartitionType.FILES.getPartitionPath());
HoodieMetadataPayload payload = new HoodieMetadataPayload(key.getRecordKey(), METADATA_TYPE_PARTITION_LIST, HoodieMetadataPayload payload = new HoodieMetadataPayload(key.getRecordKey(), METADATA_TYPE_PARTITION_LIST,

View File

@@ -18,6 +18,11 @@
package org.apache.hudi.metadata; package org.apache.hudi.metadata;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.generic.IndexedRecord;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hudi.avro.model.HoodieCleanMetadata; import org.apache.hudi.avro.model.HoodieCleanMetadata;
import org.apache.hudi.avro.model.HoodieMetadataColumnStats; import org.apache.hudi.avro.model.HoodieMetadataColumnStats;
import org.apache.hudi.avro.model.HoodieRestoreMetadata; import org.apache.hudi.avro.model.HoodieRestoreMetadata;
@@ -53,17 +58,10 @@ import org.apache.hudi.exception.HoodieIOException;
import org.apache.hudi.exception.HoodieMetadataException; import org.apache.hudi.exception.HoodieMetadataException;
import org.apache.hudi.io.storage.HoodieFileReader; import org.apache.hudi.io.storage.HoodieFileReader;
import org.apache.hudi.io.storage.HoodieFileReaderFactory; import org.apache.hudi.io.storage.HoodieFileReaderFactory;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.generic.IndexedRecord;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.log4j.LogManager; import org.apache.log4j.LogManager;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import javax.annotation.Nonnull; import javax.annotation.Nonnull;
import java.io.IOException; import java.io.IOException;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.ArrayList; import java.util.ArrayList;
@@ -162,10 +160,10 @@ public class HoodieTableMetadataUtil {
String instantTime) { String instantTime) {
List<HoodieRecord> records = new ArrayList<>(commitMetadata.getPartitionToWriteStats().size()); List<HoodieRecord> records = new ArrayList<>(commitMetadata.getPartitionToWriteStats().size());
// Add record bearing partitions list // Add record bearing added partitions list
ArrayList<String> partitionsList = new ArrayList<>(commitMetadata.getPartitionToWriteStats().keySet()); ArrayList<String> partitionsAdded = new ArrayList<>(commitMetadata.getPartitionToWriteStats().keySet());
records.add(HoodieMetadataPayload.createPartitionListRecord(partitionsList)); records.add(HoodieMetadataPayload.createPartitionListRecord(partitionsAdded));
// Update files listing records for each individual partition // Update files listing records for each individual partition
List<HoodieRecord<HoodieMetadataPayload>> updatedPartitionFilesRecords = List<HoodieRecord<HoodieMetadataPayload>> updatedPartitionFilesRecords =
@@ -318,6 +316,7 @@ public class HoodieTableMetadataUtil {
String instantTime) { String instantTime) {
List<HoodieRecord> records = new LinkedList<>(); List<HoodieRecord> records = new LinkedList<>();
int[] fileDeleteCount = {0}; int[] fileDeleteCount = {0};
List<String> deletedPartitions = new ArrayList<>();
cleanMetadata.getPartitionMetadata().forEach((partitionName, partitionMetadata) -> { cleanMetadata.getPartitionMetadata().forEach((partitionName, partitionMetadata) -> {
final String partition = getPartition(partitionName); final String partition = getPartition(partitionName);
// Files deleted from a partition // Files deleted from a partition
@@ -327,8 +326,16 @@ public class HoodieTableMetadataUtil {
records.add(record); records.add(record);
fileDeleteCount[0] += deletedFiles.size(); fileDeleteCount[0] += deletedFiles.size();
boolean isPartitionDeleted = partitionMetadata.getIsPartitionDeleted();
if (isPartitionDeleted) {
deletedPartitions.add(partitionName);
}
}); });
if (!deletedPartitions.isEmpty()) {
// if there are partitions to be deleted, add them to delete list
records.add(HoodieMetadataPayload.createPartitionListRecord(deletedPartitions, true));
}
LOG.info("Updating at " + instantTime + " from Clean. #partitions_updated=" + records.size() LOG.info("Updating at " + instantTime + " from Clean. #partitions_updated=" + records.size()
+ ", #files_deleted=" + fileDeleteCount[0]); + ", #files_deleted=" + fileDeleteCount[0]);
return records; return records;

View File

@@ -291,7 +291,7 @@ public class HoodieTestTable {
public HoodieTestTable addClean(String instantTime) throws IOException { public HoodieTestTable addClean(String instantTime) throws IOException {
HoodieCleanerPlan cleanerPlan = new HoodieCleanerPlan(new HoodieActionInstant(EMPTY_STRING, EMPTY_STRING, EMPTY_STRING), EMPTY_STRING, new HashMap<>(), HoodieCleanerPlan cleanerPlan = new HoodieCleanerPlan(new HoodieActionInstant(EMPTY_STRING, EMPTY_STRING, EMPTY_STRING), EMPTY_STRING, new HashMap<>(),
CleanPlanV2MigrationHandler.VERSION, new HashMap<>()); CleanPlanV2MigrationHandler.VERSION, new HashMap<>(), new ArrayList<>());
HoodieCleanStat cleanStats = new HoodieCleanStat( HoodieCleanStat cleanStats = new HoodieCleanStat(
HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS, HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS,
HoodieTestUtils.DEFAULT_PARTITION_PATHS[RANDOM.nextInt(HoodieTestUtils.DEFAULT_PARTITION_PATHS.length)], HoodieTestUtils.DEFAULT_PARTITION_PATHS[RANDOM.nextInt(HoodieTestUtils.DEFAULT_PARTITION_PATHS.length)],
@@ -305,7 +305,7 @@ public class HoodieTestTable {
public Pair<HoodieCleanerPlan, HoodieCleanMetadata> getHoodieCleanMetadata(String commitTime, HoodieTestTableState testTableState) { public Pair<HoodieCleanerPlan, HoodieCleanMetadata> getHoodieCleanMetadata(String commitTime, HoodieTestTableState testTableState) {
HoodieCleanerPlan cleanerPlan = new HoodieCleanerPlan(new HoodieActionInstant(commitTime, CLEAN_ACTION, EMPTY_STRING), EMPTY_STRING, new HashMap<>(), HoodieCleanerPlan cleanerPlan = new HoodieCleanerPlan(new HoodieActionInstant(commitTime, CLEAN_ACTION, EMPTY_STRING), EMPTY_STRING, new HashMap<>(),
CleanPlanV2MigrationHandler.VERSION, new HashMap<>()); CleanPlanV2MigrationHandler.VERSION, new HashMap<>(), new ArrayList<>());
List<HoodieCleanStat> cleanStats = new ArrayList<>(); List<HoodieCleanStat> cleanStats = new ArrayList<>();
for (Map.Entry<String, List<String>> entry : testTableState.getPartitionToFileIdMapForCleaner(commitTime).entrySet()) { for (Map.Entry<String, List<String>> entry : testTableState.getPartitionToFileIdMapForCleaner(commitTime).entrySet()) {
cleanStats.add(new HoodieCleanStat(HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS, cleanStats.add(new HoodieCleanStat(HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS,

View File

@@ -45,8 +45,11 @@ import org.apache.hudi.sync.common.HoodieSyncConfig
import org.apache.hudi.sync.common.util.SyncUtilHelpers import org.apache.hudi.sync.common.util.SyncUtilHelpers
import org.apache.hudi.table.BulkInsertPartitioner import org.apache.hudi.table.BulkInsertPartitioner
import org.apache.log4j.LogManager import org.apache.log4j.LogManager
import org.apache.spark.SPARK_VERSION
import org.apache.spark.api.java.JavaSparkContext import org.apache.spark.api.java.JavaSparkContext
import org.apache.spark.rdd.RDD import org.apache.spark.rdd.RDD
import org.apache.spark.sql.internal.{SQLConf, StaticSQLConf}
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql._ import org.apache.spark.sql._
import org.apache.spark.sql.internal.StaticSQLConf import org.apache.spark.sql.internal.StaticSQLConf
import org.apache.spark.sql.types.StructType import org.apache.spark.sql.types.StructType

View File

@@ -47,6 +47,9 @@ class TestAlterTableDropPartition extends TestHoodieSqlBase {
checkExceptionContain(s"alter table $tableName drop partition (dt='2021-10-01')")( checkExceptionContain(s"alter table $tableName drop partition (dt='2021-10-01')")(
s"$tableName is a non-partitioned table that is not allowed to drop partition") s"$tableName is a non-partitioned table that is not allowed to drop partition")
// show partitions
checkAnswer(s"show partitions $tableName")(Seq.empty: _*)
} }
test("Purge drop non-partitioned table") { test("Purge drop non-partitioned table") {
@@ -71,6 +74,9 @@ class TestAlterTableDropPartition extends TestHoodieSqlBase {
checkExceptionContain(s"alter table $tableName drop partition (dt='2021-10-01') purge")( checkExceptionContain(s"alter table $tableName drop partition (dt='2021-10-01') purge")(
s"$tableName is a non-partitioned table that is not allowed to drop partition") s"$tableName is a non-partitioned table that is not allowed to drop partition")
// show partitions
checkAnswer(s"show partitions $tableName")(Seq.empty: _*)
} }
Seq(false, true).foreach { urlencode => Seq(false, true).foreach { urlencode =>
@@ -113,6 +119,13 @@ class TestAlterTableDropPartition extends TestHoodieSqlBase {
} }
checkAnswer(s"select dt from $tableName")(Seq(s"2021/10/02")) checkAnswer(s"select dt from $tableName")(Seq(s"2021/10/02"))
assertResult(true)(existsPath(s"${tmp.getCanonicalPath}/$tableName/$partitionPath")) assertResult(true)(existsPath(s"${tmp.getCanonicalPath}/$tableName/$partitionPath"))
// show partitions
if (urlencode) {
checkAnswer(s"show partitions $tableName")(Seq(PartitionPathEncodeUtils.escapePathName("2021/10/02")))
} else {
checkAnswer(s"show partitions $tableName")(Seq("2021/10/02"))
}
} }
} }
} }
@@ -157,6 +170,13 @@ class TestAlterTableDropPartition extends TestHoodieSqlBase {
} }
checkAnswer(s"select dt from $tableName")(Seq(s"2021/10/02")) checkAnswer(s"select dt from $tableName")(Seq(s"2021/10/02"))
assertResult(false)(existsPath(s"${tmp.getCanonicalPath}/$tableName/$partitionPath")) assertResult(false)(existsPath(s"${tmp.getCanonicalPath}/$tableName/$partitionPath"))
// show partitions
if (urlencode) {
checkAnswer(s"show partitions $tableName")(Seq(PartitionPathEncodeUtils.escapePathName("2021/10/02")))
} else {
checkAnswer(s"show partitions $tableName")(Seq("2021/10/02"))
}
} }
} }
} }
@@ -189,7 +209,10 @@ class TestAlterTableDropPartition extends TestHoodieSqlBase {
// drop 2021-10-01 partition // drop 2021-10-01 partition
spark.sql(s"alter table $tableName drop partition (dt='2021-10-01')") spark.sql(s"alter table $tableName drop partition (dt='2021-10-01')")
checkAnswer(s"select id, name, ts, dt from $tableName") (Seq(2, "l4", "v1", "2021-10-02")) checkAnswer(s"select id, name, ts, dt from $tableName")(Seq(2, "l4", "v1", "2021-10-02"))
// show partitions
checkAnswer(s"show partitions $tableName")(Seq("dt=2021-10-02"))
} }
Seq(false, true).foreach { hiveStyle => Seq(false, true).foreach { hiveStyle =>
@@ -232,6 +255,13 @@ class TestAlterTableDropPartition extends TestHoodieSqlBase {
checkAnswer(s"select id, name, ts, year, month, day from $tableName")( checkAnswer(s"select id, name, ts, year, month, day from $tableName")(
Seq(2, "l4", "v1", "2021", "10", "02") Seq(2, "l4", "v1", "2021", "10", "02")
) )
// show partitions
if (hiveStyle) {
checkAnswer(s"show partitions $tableName")(Seq("year=2021/month=10/day=02"))
} else {
checkAnswer(s"show partitions $tableName")(Seq("2021/10/02"))
}
} }
} }
} }
@@ -274,6 +304,13 @@ class TestAlterTableDropPartition extends TestHoodieSqlBase {
) )
assertResult(false)(existsPath( assertResult(false)(existsPath(
s"${tmp.getCanonicalPath}/$tableName/year=2021/month=10/day=01")) s"${tmp.getCanonicalPath}/$tableName/year=2021/month=10/day=01"))
// show partitions
if (hiveStyle) {
checkAnswer(s"show partitions $tableName")(Seq("year=2021/month=10/day=02"))
} else {
checkAnswer(s"show partitions $tableName")(Seq("2021/10/02"))
}
} }
} }
} }