[HUDI-629]: Replace Guava's Hashing with an equivalent in NumericUtils.java (#1350)
* [HUDI-629]: Replace Guava's Hashing with an equivalent in NumericUtils.java
This commit is contained in:
@@ -118,11 +118,6 @@
|
||||
<artifactId>metrics-core</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.beust</groupId>
|
||||
<artifactId>jcommander</artifactId>
|
||||
|
||||
@@ -36,13 +36,13 @@ import org.apache.hudi.common.util.AvroUtils;
|
||||
import org.apache.hudi.common.util.CompactionUtils;
|
||||
import org.apache.hudi.common.util.FSUtils;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
import org.apache.hudi.exception.HoodieException;
|
||||
import org.apache.hudi.exception.HoodieIOException;
|
||||
import org.apache.hudi.table.compact.OperationResult;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.log4j.LogManager;
|
||||
@@ -138,7 +138,7 @@ public class CompactionAdminClient extends AbstractHoodieClient {
|
||||
// TODO: Add a rollback instant but for compaction
|
||||
HoodieInstant instant = new HoodieInstant(State.REQUESTED, COMPACTION_ACTION, compactionInstant);
|
||||
boolean deleted = metaClient.getFs().delete(new Path(metaClient.getMetaPath(), instant.getFileName()), false);
|
||||
Preconditions.checkArgument(deleted, "Unable to delete compaction instant.");
|
||||
ValidationUtils.checkArgument(deleted, "Unable to delete compaction instant.");
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@@ -247,7 +247,7 @@ public class CompactionAdminClient extends AbstractHoodieClient {
|
||||
List<HoodieLogFile> logFilesToBeMoved =
|
||||
merged.getLogFiles().filter(lf -> lf.getLogVersion() > maxVersion).collect(Collectors.toList());
|
||||
return logFilesToBeMoved.stream().map(lf -> {
|
||||
Preconditions.checkArgument(lf.getLogVersion() - maxVersion > 0, "Expect new log version to be sane");
|
||||
ValidationUtils.checkArgument(lf.getLogVersion() - maxVersion > 0, "Expect new log version to be sane");
|
||||
HoodieLogFile newLogFile = new HoodieLogFile(new Path(lf.getPath().getParent(),
|
||||
FSUtils.makeLogFileName(lf.getFileId(), "." + FSUtils.getFileExtensionFromLog(lf.getPath()),
|
||||
compactionInstant, lf.getLogVersion() - maxVersion, HoodieLogFormat.UNKNOWN_WRITE_TOKEN)));
|
||||
@@ -266,9 +266,9 @@ public class CompactionAdminClient extends AbstractHoodieClient {
|
||||
protected static void renameLogFile(HoodieTableMetaClient metaClient, HoodieLogFile oldLogFile,
|
||||
HoodieLogFile newLogFile) throws IOException {
|
||||
FileStatus[] statuses = metaClient.getFs().listStatus(oldLogFile.getPath());
|
||||
Preconditions.checkArgument(statuses.length == 1, "Only one status must be present");
|
||||
Preconditions.checkArgument(statuses[0].isFile(), "Source File must exist");
|
||||
Preconditions.checkArgument(oldLogFile.getPath().getParent().equals(newLogFile.getPath().getParent()),
|
||||
ValidationUtils.checkArgument(statuses.length == 1, "Only one status must be present");
|
||||
ValidationUtils.checkArgument(statuses[0].isFile(), "Source File must exist");
|
||||
ValidationUtils.checkArgument(oldLogFile.getPath().getParent().equals(newLogFile.getPath().getParent()),
|
||||
"Log file must only be moved within the parent directory");
|
||||
metaClient.getFs().rename(oldLogFile.getPath(), newLogFile.getPath());
|
||||
}
|
||||
@@ -300,9 +300,9 @@ public class CompactionAdminClient extends AbstractHoodieClient {
|
||||
new Path(FSUtils.getPartitionPath(metaClient.getBasePath(), operation.getPartitionPath()),
|
||||
new Path(operation.getDataFileName().get())))
|
||||
.getPath().toString();
|
||||
Preconditions.checkArgument(df.isPresent(),
|
||||
ValidationUtils.checkArgument(df.isPresent(),
|
||||
"Data File must be present. File Slice was : " + fs + ", operation :" + operation);
|
||||
Preconditions.checkArgument(df.get().getPath().equals(expPath),
|
||||
ValidationUtils.checkArgument(df.get().getPath().equals(expPath),
|
||||
"Base Path in operation is specified as " + expPath + " but got path " + df.get().getPath());
|
||||
}
|
||||
Set<HoodieLogFile> logFilesInFileSlice = fs.getLogFiles().collect(Collectors.toSet());
|
||||
@@ -310,7 +310,7 @@ public class CompactionAdminClient extends AbstractHoodieClient {
|
||||
try {
|
||||
FileStatus[] fileStatuses = metaClient.getFs().listStatus(new Path(
|
||||
FSUtils.getPartitionPath(metaClient.getBasePath(), operation.getPartitionPath()), new Path(dp)));
|
||||
Preconditions.checkArgument(fileStatuses.length == 1, "Expect only 1 file-status");
|
||||
ValidationUtils.checkArgument(fileStatuses.length == 1, "Expect only 1 file-status");
|
||||
return new HoodieLogFile(fileStatuses[0]);
|
||||
} catch (FileNotFoundException fe) {
|
||||
throw new CompactionValidationException(fe.getMessage());
|
||||
@@ -320,12 +320,12 @@ public class CompactionAdminClient extends AbstractHoodieClient {
|
||||
}).collect(Collectors.toSet());
|
||||
Set<HoodieLogFile> missing = logFilesInCompactionOp.stream().filter(lf -> !logFilesInFileSlice.contains(lf))
|
||||
.collect(Collectors.toSet());
|
||||
Preconditions.checkArgument(missing.isEmpty(),
|
||||
ValidationUtils.checkArgument(missing.isEmpty(),
|
||||
"All log files specified in compaction operation is not present. Missing :" + missing + ", Exp :"
|
||||
+ logFilesInCompactionOp + ", Got :" + logFilesInFileSlice);
|
||||
Set<HoodieLogFile> diff = logFilesInFileSlice.stream().filter(lf -> !logFilesInCompactionOp.contains(lf))
|
||||
.collect(Collectors.toSet());
|
||||
Preconditions.checkArgument(diff.stream().allMatch(lf -> lf.getBaseCommitTime().equals(compactionInstant)),
|
||||
ValidationUtils.checkArgument(diff.stream().allMatch(lf -> lf.getBaseCommitTime().equals(compactionInstant)),
|
||||
"There are some log-files which are neither specified in compaction plan "
|
||||
+ "nor present after compaction request instant. Some of these :" + diff);
|
||||
} else {
|
||||
|
||||
@@ -31,13 +31,13 @@ import org.apache.hudi.common.table.timeline.HoodieInstant.State;
|
||||
import org.apache.hudi.common.util.AvroUtils;
|
||||
import org.apache.hudi.common.util.CleanerUtils;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
import org.apache.hudi.exception.HoodieIOException;
|
||||
import org.apache.hudi.metrics.HoodieMetrics;
|
||||
import org.apache.hudi.table.HoodieTable;
|
||||
|
||||
import com.codahale.metrics.Timer;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.log4j.LogManager;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.spark.api.java.JavaSparkContext;
|
||||
@@ -147,7 +147,7 @@ public class HoodieCleanClient<T extends HoodieRecordPayload> extends AbstractHo
|
||||
|
||||
private HoodieCleanMetadata runClean(HoodieTable<T> table, HoodieInstant cleanInstant,
|
||||
HoodieCleanerPlan cleanerPlan) {
|
||||
Preconditions.checkArgument(
|
||||
ValidationUtils.checkArgument(
|
||||
cleanInstant.getState().equals(State.REQUESTED) || cleanInstant.getState().equals(State.INFLIGHT));
|
||||
|
||||
try {
|
||||
|
||||
@@ -43,6 +43,7 @@ import org.apache.hudi.common.util.AvroUtils;
|
||||
import org.apache.hudi.common.util.CompactionUtils;
|
||||
import org.apache.hudi.common.util.FSUtils;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.config.HoodieCompactionConfig;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
import org.apache.hudi.exception.HoodieCommitException;
|
||||
@@ -62,7 +63,6 @@ import org.apache.hudi.table.WorkloadProfile;
|
||||
import org.apache.hudi.table.WorkloadStat;
|
||||
|
||||
import com.codahale.metrics.Timer;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.apache.log4j.LogManager;
|
||||
import org.apache.log4j.Logger;
|
||||
@@ -588,7 +588,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
|
||||
}
|
||||
|
||||
// Cannot allow savepoint time on a commit that could have been cleaned
|
||||
Preconditions.checkArgument(
|
||||
ValidationUtils.checkArgument(
|
||||
HoodieTimeline.compareTimestamps(commitTime, lastCommitRetained, HoodieTimeline.GREATER_OR_EQUAL),
|
||||
"Could not savepoint commit " + commitTime + " as this is beyond the lookup window " + lastCommitRetained);
|
||||
|
||||
@@ -704,8 +704,8 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
|
||||
// Make sure the rollback was successful
|
||||
Option<HoodieInstant> lastInstant =
|
||||
activeTimeline.reload().getCommitsAndCompactionTimeline().filterCompletedAndCompactionInstants().lastInstant();
|
||||
Preconditions.checkArgument(lastInstant.isPresent());
|
||||
Preconditions.checkArgument(lastInstant.get().getTimestamp().equals(savepointTime),
|
||||
ValidationUtils.checkArgument(lastInstant.isPresent());
|
||||
ValidationUtils.checkArgument(lastInstant.get().getTimestamp().equals(savepointTime),
|
||||
savepointTime + "is not the last commit after rolling back " + commitsToRollback + ", last commit was "
|
||||
+ lastInstant.get().getTimestamp());
|
||||
return true;
|
||||
@@ -876,7 +876,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
|
||||
HoodieTableMetaClient metaClient = createMetaClient(true);
|
||||
// if there are pending compactions, their instantTime must not be greater than that of this instant time
|
||||
metaClient.getActiveTimeline().filterPendingCompactionTimeline().lastInstant().ifPresent(latestPending ->
|
||||
Preconditions.checkArgument(
|
||||
ValidationUtils.checkArgument(
|
||||
HoodieTimeline.compareTimestamps(latestPending.getTimestamp(), instantTime, HoodieTimeline.LESSER),
|
||||
"Latest pending compaction instant time must be earlier than this instant time. Latest Compaction :"
|
||||
+ latestPending + ", Ingesting at " + instantTime));
|
||||
@@ -909,7 +909,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
|
||||
HoodieTableMetaClient metaClient = createMetaClient(true);
|
||||
// if there are inflight writes, their instantTime must not be less than that of compaction instant time
|
||||
metaClient.getCommitsTimeline().filterPendingExcludingCompaction().firstInstant().ifPresent(earliestInflight -> {
|
||||
Preconditions.checkArgument(
|
||||
ValidationUtils.checkArgument(
|
||||
HoodieTimeline.compareTimestamps(earliestInflight.getTimestamp(), instantTime, HoodieTimeline.GREATER),
|
||||
"Earliest write inflight instant time must be later than compaction time. Earliest :" + earliestInflight
|
||||
+ ", Compaction scheduled at " + instantTime);
|
||||
@@ -919,7 +919,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
|
||||
.getActiveTimeline().getCommitsAndCompactionTimeline().getInstants().filter(instant -> HoodieTimeline
|
||||
.compareTimestamps(instant.getTimestamp(), instantTime, HoodieTimeline.GREATER_OR_EQUAL))
|
||||
.collect(Collectors.toList());
|
||||
Preconditions.checkArgument(conflictingInstants.isEmpty(),
|
||||
ValidationUtils.checkArgument(conflictingInstants.isEmpty(),
|
||||
"Following instants have timestamps >= compactionInstant (" + instantTime + ") Instants :"
|
||||
+ conflictingInstants);
|
||||
HoodieTable<T> table = HoodieTable.getHoodieTable(metaClient, config, jsc);
|
||||
|
||||
@@ -20,11 +20,10 @@ package org.apache.hudi.config;
|
||||
|
||||
import org.apache.hudi.common.model.HoodieCleaningPolicy;
|
||||
import org.apache.hudi.common.model.OverwriteWithLatestAvroPayload;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.table.compact.strategy.CompactionStrategy;
|
||||
import org.apache.hudi.table.compact.strategy.LogFileSizeBasedCompactionStrategy;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
import javax.annotation.concurrent.Immutable;
|
||||
|
||||
import java.io.File;
|
||||
@@ -121,12 +120,9 @@ public class HoodieCompactionConfig extends DefaultHoodieConfig {
|
||||
private final Properties props = new Properties();
|
||||
|
||||
public Builder fromFile(File propertiesFile) throws IOException {
|
||||
FileReader reader = new FileReader(propertiesFile);
|
||||
try {
|
||||
try (FileReader reader = new FileReader(propertiesFile)) {
|
||||
this.props.load(reader);
|
||||
return this;
|
||||
} finally {
|
||||
reader.close();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -292,8 +288,8 @@ public class HoodieCompactionConfig extends DefaultHoodieConfig {
|
||||
int maxInstantsToKeep = Integer.parseInt(props.getProperty(HoodieCompactionConfig.MAX_COMMITS_TO_KEEP_PROP));
|
||||
int cleanerCommitsRetained =
|
||||
Integer.parseInt(props.getProperty(HoodieCompactionConfig.CLEANER_COMMITS_RETAINED_PROP));
|
||||
Preconditions.checkArgument(maxInstantsToKeep > minInstantsToKeep);
|
||||
Preconditions.checkArgument(minInstantsToKeep > cleanerCommitsRetained,
|
||||
ValidationUtils.checkArgument(maxInstantsToKeep > minInstantsToKeep);
|
||||
ValidationUtils.checkArgument(minInstantsToKeep > cleanerCommitsRetained,
|
||||
String.format(
|
||||
"Increase %s=%d to be greater than %s=%d. Otherwise, there is risk of incremental pull "
|
||||
+ "missing data from few instants.",
|
||||
|
||||
@@ -96,7 +96,6 @@ public class HoodieWriteConfig extends DefaultHoodieConfig {
|
||||
private static final String MAX_CONSISTENCY_CHECKS_PROP = "hoodie.consistency.check.max_checks";
|
||||
private static int DEFAULT_MAX_CONSISTENCY_CHECKS = 7;
|
||||
|
||||
|
||||
private ConsistencyGuardConfig consistencyGuardConfig;
|
||||
|
||||
// Hoodie Write Client transparently rewrites File System View config when embedded mode is enabled
|
||||
|
||||
@@ -18,14 +18,13 @@
|
||||
|
||||
package org.apache.hudi.index.bloom;
|
||||
|
||||
import org.apache.hudi.common.util.NumericUtils;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
|
||||
import com.google.common.hash.Hashing;
|
||||
import org.apache.log4j.LogManager;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.spark.Partitioner;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
@@ -144,7 +143,7 @@ public class BucketizedBloomCheckPartitioner extends Partitioner {
|
||||
@Override
|
||||
public int getPartition(Object key) {
|
||||
final Pair<String, String> parts = (Pair<String, String>) key;
|
||||
final long hashOfKey = Hashing.md5().hashString(parts.getRight(), StandardCharsets.UTF_8).asLong();
|
||||
final long hashOfKey = NumericUtils.getMessageDigestHash("MD5", parts.getRight());
|
||||
final List<Integer> candidatePartitions = fileGroupToPartitions.get(parts.getLeft());
|
||||
final int idx = (int) Math.floorMod(hashOfKey, candidatePartitions.size());
|
||||
assert idx >= 0;
|
||||
|
||||
@@ -37,6 +37,7 @@ import org.apache.hudi.common.table.timeline.HoodieActiveTimeline;
|
||||
import org.apache.hudi.common.table.timeline.HoodieInstant;
|
||||
import org.apache.hudi.common.table.timeline.HoodieInstant.State;
|
||||
import org.apache.hudi.common.util.FSUtils;
|
||||
import org.apache.hudi.common.util.NumericUtils;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
import org.apache.hudi.common.util.queue.BoundedInMemoryExecutor;
|
||||
@@ -52,7 +53,6 @@ import org.apache.hudi.execution.SparkBoundedInMemoryExecutor;
|
||||
import org.apache.hudi.io.HoodieCreateHandle;
|
||||
import org.apache.hudi.io.HoodieMergeHandle;
|
||||
|
||||
import com.google.common.hash.Hashing;
|
||||
import org.apache.avro.generic.GenericRecord;
|
||||
import org.apache.avro.generic.IndexedRecord;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
@@ -72,7 +72,6 @@ import org.apache.spark.api.java.function.PairFlatMapFunction;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
@@ -732,8 +731,7 @@ public class HoodieCopyOnWriteTable<T extends HoodieRecordPayload> extends Hoodi
|
||||
// pick the target bucket to use based on the weights.
|
||||
double totalWeight = 0.0;
|
||||
final long totalInserts = Math.max(1, globalStat.getNumInserts());
|
||||
final long hashOfKey =
|
||||
Hashing.md5().hashString(keyLocation._1().getRecordKey(), StandardCharsets.UTF_8).asLong();
|
||||
final long hashOfKey = NumericUtils.getMessageDigestHash("MD5", keyLocation._1().getRecordKey());
|
||||
final double r = 1.0 * Math.floorMod(hashOfKey, totalInserts) / totalInserts;
|
||||
for (InsertBucket insertBucket : targetBuckets) {
|
||||
totalWeight += insertBucket.weight;
|
||||
|
||||
@@ -34,6 +34,7 @@ import org.apache.hudi.common.table.timeline.HoodieActiveTimeline;
|
||||
import org.apache.hudi.common.table.timeline.HoodieInstant;
|
||||
import org.apache.hudi.common.util.FSUtils;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
import org.apache.hudi.exception.HoodieCompactionException;
|
||||
import org.apache.hudi.exception.HoodieIOException;
|
||||
@@ -42,7 +43,6 @@ import org.apache.hudi.execution.MergeOnReadLazyInsertIterable;
|
||||
import org.apache.hudi.io.HoodieAppendHandle;
|
||||
import org.apache.hudi.table.compact.HoodieMergeOnReadTableCompactor;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hudi.table.rollback.RollbackHelper;
|
||||
import org.apache.hudi.table.rollback.RollbackRequest;
|
||||
import org.apache.log4j.LogManager;
|
||||
@@ -422,7 +422,7 @@ public class HoodieMergeOnReadTable<T extends HoodieRecordPayload> extends Hoodi
|
||||
|
||||
private List<RollbackRequest> generateAppendRollbackBlocksAction(String partitionPath, HoodieInstant rollbackInstant,
|
||||
HoodieCommitMetadata commitMetadata) {
|
||||
Preconditions.checkArgument(rollbackInstant.getAction().equals(HoodieTimeline.DELTA_COMMIT_ACTION));
|
||||
ValidationUtils.checkArgument(rollbackInstant.getAction().equals(HoodieTimeline.DELTA_COMMIT_ACTION));
|
||||
|
||||
// wStat.getPrevCommit() might not give the right commit time in the following
|
||||
// scenario : If a compaction was scheduled, the new commitTime associated with the requested compaction will be
|
||||
@@ -439,9 +439,9 @@ public class HoodieMergeOnReadTable<T extends HoodieRecordPayload> extends Hoodi
|
||||
|
||||
if (validForRollback) {
|
||||
// For sanity, log instant time can never be less than base-commit on which we are rolling back
|
||||
Preconditions
|
||||
ValidationUtils
|
||||
.checkArgument(HoodieTimeline.compareTimestamps(fileIdToBaseCommitTimeForLogMap.get(wStat.getFileId()),
|
||||
rollbackInstant.getTimestamp(), HoodieTimeline.LESSER_OR_EQUAL));
|
||||
rollbackInstant.getTimestamp(), HoodieTimeline.LESSER_OR_EQUAL));
|
||||
}
|
||||
|
||||
return validForRollback && HoodieTimeline.compareTimestamps(fileIdToBaseCommitTimeForLogMap.get(
|
||||
|
||||
@@ -35,14 +35,13 @@ import org.apache.hudi.common.util.CompactionUtils;
|
||||
import org.apache.hudi.common.util.FSUtils;
|
||||
import org.apache.hudi.common.util.HoodieAvroUtils;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
import org.apache.hudi.table.compact.strategy.CompactionStrategy;
|
||||
import org.apache.hudi.table.HoodieCopyOnWriteTable;
|
||||
import org.apache.hudi.table.HoodieTable;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Sets;
|
||||
import org.apache.avro.Schema;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
@@ -56,6 +55,7 @@ import org.apache.spark.util.AccumulatorV2;
|
||||
import org.apache.spark.util.LongAccumulator;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
@@ -125,7 +125,7 @@ public class HoodieMergeOnReadTableCompactor implements HoodieCompactor {
|
||||
config.getCompactionReverseLogReadEnabled(), config.getMaxDFSStreamBufferSize(),
|
||||
config.getSpillableMapBasePath());
|
||||
if (!scanner.iterator().hasNext()) {
|
||||
return Lists.<WriteStatus>newArrayList();
|
||||
return new ArrayList<>();
|
||||
}
|
||||
|
||||
Option<HoodieBaseFile> oldDataFileOpt =
|
||||
@@ -169,7 +169,7 @@ public class HoodieMergeOnReadTableCompactor implements HoodieCompactor {
|
||||
jsc.sc().register(totalLogFiles);
|
||||
jsc.sc().register(totalFileSlices);
|
||||
|
||||
Preconditions.checkArgument(hoodieTable.getMetaClient().getTableType() == HoodieTableType.MERGE_ON_READ,
|
||||
ValidationUtils.checkArgument(hoodieTable.getMetaClient().getTableType() == HoodieTableType.MERGE_ON_READ,
|
||||
"Can only compact table of type " + HoodieTableType.MERGE_ON_READ + " and not "
|
||||
+ hoodieTable.getMetaClient().getTableType().name());
|
||||
|
||||
@@ -214,7 +214,7 @@ public class HoodieMergeOnReadTableCompactor implements HoodieCompactor {
|
||||
// compactions only
|
||||
HoodieCompactionPlan compactionPlan = config.getCompactionStrategy().generateCompactionPlan(config, operations,
|
||||
CompactionUtils.getAllPendingCompactionPlans(metaClient).stream().map(Pair::getValue).collect(toList()));
|
||||
Preconditions.checkArgument(
|
||||
ValidationUtils.checkArgument(
|
||||
compactionPlan.getOperations().stream().noneMatch(
|
||||
op -> fgIdsInPendingCompactions.contains(new HoodieFileGroupId(op.getPartitionPath(), op.getFileId()))),
|
||||
"Bad Compaction Plan. FileId MUST NOT have multiple pending compactions. "
|
||||
|
||||
@@ -28,10 +28,10 @@ import org.apache.hudi.common.table.log.block.HoodieCommandBlock.HoodieCommandBl
|
||||
import org.apache.hudi.common.table.log.block.HoodieLogBlock.HeaderMetadataType;
|
||||
import org.apache.hudi.common.table.timeline.HoodieInstant;
|
||||
import org.apache.hudi.common.util.FSUtils;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
import org.apache.hudi.exception.HoodieRollbackException;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.PathFilter;
|
||||
@@ -147,7 +147,7 @@ public class RollbackHelper implements Serializable {
|
||||
* @return Merged HoodieRollbackStat
|
||||
*/
|
||||
private HoodieRollbackStat mergeRollbackStat(HoodieRollbackStat stat1, HoodieRollbackStat stat2) {
|
||||
Preconditions.checkArgument(stat1.getPartitionPath().equals(stat2.getPartitionPath()));
|
||||
ValidationUtils.checkArgument(stat1.getPartitionPath().equals(stat2.getPartitionPath()));
|
||||
final List<String> successDeleteFiles = new ArrayList<>();
|
||||
final List<String> failedDeleteFiles = new ArrayList<>();
|
||||
final Map<FileStatus, Long> commandBlocksCount = new HashMap<>();
|
||||
|
||||
@@ -37,7 +37,7 @@ public class TestHoodieWriteConfig {
|
||||
@Test
|
||||
public void testPropertyLoading() throws IOException {
|
||||
Builder builder = HoodieWriteConfig.newBuilder().withPath("/tmp");
|
||||
Map<String, String> params = new HashMap<>();
|
||||
Map<String, String> params = new HashMap<>(3);
|
||||
params.put(HoodieCompactionConfig.CLEANER_COMMITS_RETAINED_PROP, "1");
|
||||
params.put(HoodieCompactionConfig.MAX_COMMITS_TO_KEEP_PROP, "5");
|
||||
params.put(HoodieCompactionConfig.MIN_COMMITS_TO_KEEP_PROP, "2");
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
|
||||
package org.apache.hudi.common.model;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Objects;
|
||||
@@ -37,8 +37,8 @@ public class TimelineLayoutVersion implements Serializable, Comparable<TimelineL
|
||||
private Integer version;
|
||||
|
||||
public TimelineLayoutVersion(Integer version) {
|
||||
Preconditions.checkArgument(version <= CURR_VERSION);
|
||||
Preconditions.checkArgument(version >= VERSION_0);
|
||||
ValidationUtils.checkArgument(version <= CURR_VERSION);
|
||||
ValidationUtils.checkArgument(version >= VERSION_0);
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
|
||||
@@ -26,14 +26,14 @@ import org.apache.hudi.common.table.timeline.HoodieActiveTimeline;
|
||||
import org.apache.hudi.common.table.timeline.HoodieArchivedTimeline;
|
||||
import org.apache.hudi.common.table.timeline.HoodieInstant;
|
||||
import org.apache.hudi.common.util.ConsistencyGuardConfig;
|
||||
import org.apache.hudi.common.util.FSUtils;
|
||||
import org.apache.hudi.common.util.FailSafeConsistencyGuard;
|
||||
import org.apache.hudi.common.util.FSUtils;
|
||||
import org.apache.hudi.common.util.NoOpConsistencyGuard;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.exception.TableNotFoundException;
|
||||
import org.apache.hudi.exception.HoodieException;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
@@ -119,7 +119,7 @@ public class HoodieTableMetaClient implements Serializable {
|
||||
Option<TimelineLayoutVersion> tableConfigVersion = tableConfig.getTimelineLayoutVersion();
|
||||
if (layoutVersion.isPresent() && tableConfigVersion.isPresent()) {
|
||||
// Ensure layout version passed in config is not lower than the one seen in hoodie.properties
|
||||
Preconditions.checkArgument(layoutVersion.get().compareTo(tableConfigVersion.get()) >= 0,
|
||||
ValidationUtils.checkArgument(layoutVersion.get().compareTo(tableConfigVersion.get()) >= 0,
|
||||
"Layout Version defined in hoodie properties has higher version (" + tableConfigVersion.get()
|
||||
+ ") than the one passed in config (" + layoutVersion.get() + ")");
|
||||
}
|
||||
@@ -233,7 +233,7 @@ public class HoodieTableMetaClient implements Serializable {
|
||||
public HoodieWrapperFileSystem getFs() {
|
||||
if (fs == null) {
|
||||
FileSystem fileSystem = FSUtils.getFs(metaPath, hadoopConf.newCopy());
|
||||
Preconditions.checkArgument(!(fileSystem instanceof HoodieWrapperFileSystem),
|
||||
ValidationUtils.checkArgument(!(fileSystem instanceof HoodieWrapperFileSystem),
|
||||
"File System not expected to be that of HoodieWrapperFileSystem");
|
||||
fs = new HoodieWrapperFileSystem(fileSystem,
|
||||
consistencyGuardConfig.isConsistencyCheckEnabled()
|
||||
|
||||
@@ -28,11 +28,11 @@ import org.apache.hudi.common.table.log.block.HoodieLogBlock.HeaderMetadataType;
|
||||
import org.apache.hudi.common.table.log.block.HoodieLogBlock.HoodieLogBlockType;
|
||||
import org.apache.hudi.common.util.FSUtils;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.exception.CorruptedLogFileException;
|
||||
import org.apache.hudi.exception.HoodieIOException;
|
||||
import org.apache.hudi.exception.HoodieNotSupportedException;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.avro.Schema;
|
||||
import org.apache.hadoop.fs.BufferedFSInputStream;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
@@ -61,7 +61,6 @@ class HoodieLogFileReader implements HoodieLogFormat.Reader {
|
||||
private final HoodieLogFile logFile;
|
||||
private static final byte[] MAGIC_BUFFER = new byte[6];
|
||||
private final Schema readerSchema;
|
||||
private HoodieLogFormat.LogFormatVersion nextBlockVersion;
|
||||
private boolean readBlockLazily;
|
||||
private long reverseLogFilePosition;
|
||||
private long lastReverseLogFilePosition;
|
||||
@@ -145,13 +144,13 @@ class HoodieLogFileReader implements HoodieLogFormat.Reader {
|
||||
}
|
||||
|
||||
// 2. Read the version for this log format
|
||||
this.nextBlockVersion = readVersion();
|
||||
HoodieLogFormat.LogFormatVersion nextBlockVersion = readVersion();
|
||||
|
||||
// 3. Read the block type for a log block
|
||||
if (nextBlockVersion.getVersion() != HoodieLogFormatVersion.DEFAULT_VERSION) {
|
||||
type = inputStream.readInt();
|
||||
|
||||
Preconditions.checkArgument(type < HoodieLogBlockType.values().length, "Invalid block byte type found " + type);
|
||||
ValidationUtils.checkArgument(type < HoodieLogBlockType.values().length, "Invalid block byte type found " + type);
|
||||
blockType = HoodieLogBlockType.values()[type];
|
||||
}
|
||||
|
||||
|
||||
@@ -23,9 +23,9 @@ import org.apache.hudi.common.table.HoodieTimeline;
|
||||
import org.apache.hudi.common.table.timeline.HoodieInstant.State;
|
||||
import org.apache.hudi.common.util.FileIOUtils;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.exception.HoodieIOException;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
@@ -140,7 +140,7 @@ public class HoodieActiveTimeline extends HoodieDefaultTimeline {
|
||||
|
||||
public void saveAsComplete(HoodieInstant instant, Option<byte[]> data) {
|
||||
LOG.info("Marking instant complete " + instant);
|
||||
Preconditions.checkArgument(instant.isInflight(),
|
||||
ValidationUtils.checkArgument(instant.isInflight(),
|
||||
"Could not mark an already completed instant as complete again " + instant);
|
||||
transitionState(instant, HoodieTimeline.getCompletedInstant(instant), data);
|
||||
LOG.info("Completed " + instant);
|
||||
@@ -155,18 +155,18 @@ public class HoodieActiveTimeline extends HoodieDefaultTimeline {
|
||||
}
|
||||
|
||||
public void deleteInflight(HoodieInstant instant) {
|
||||
Preconditions.checkArgument(instant.isInflight());
|
||||
ValidationUtils.checkArgument(instant.isInflight());
|
||||
deleteInstantFile(instant);
|
||||
}
|
||||
|
||||
public void deletePending(HoodieInstant instant) {
|
||||
Preconditions.checkArgument(!instant.isCompleted());
|
||||
ValidationUtils.checkArgument(!instant.isCompleted());
|
||||
deleteInstantFile(instant);
|
||||
}
|
||||
|
||||
public void deleteCompactionRequested(HoodieInstant instant) {
|
||||
Preconditions.checkArgument(instant.isRequested());
|
||||
Preconditions.checkArgument(instant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
|
||||
ValidationUtils.checkArgument(instant.isRequested());
|
||||
ValidationUtils.checkArgument(instant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
|
||||
deleteInstantFile(instant);
|
||||
}
|
||||
|
||||
@@ -222,8 +222,8 @@ public class HoodieActiveTimeline extends HoodieDefaultTimeline {
|
||||
* @return requested instant
|
||||
*/
|
||||
public HoodieInstant revertCompactionInflightToRequested(HoodieInstant inflightInstant) {
|
||||
Preconditions.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
|
||||
Preconditions.checkArgument(inflightInstant.isInflight());
|
||||
ValidationUtils.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
|
||||
ValidationUtils.checkArgument(inflightInstant.isInflight());
|
||||
HoodieInstant requestedInstant =
|
||||
new HoodieInstant(State.REQUESTED, COMPACTION_ACTION, inflightInstant.getTimestamp());
|
||||
if (metaClient.getTimelineLayoutVersion().isNullVersion()) {
|
||||
@@ -242,8 +242,8 @@ public class HoodieActiveTimeline extends HoodieDefaultTimeline {
|
||||
* @return inflight instant
|
||||
*/
|
||||
public HoodieInstant transitionCompactionRequestedToInflight(HoodieInstant requestedInstant) {
|
||||
Preconditions.checkArgument(requestedInstant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
|
||||
Preconditions.checkArgument(requestedInstant.isRequested());
|
||||
ValidationUtils.checkArgument(requestedInstant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
|
||||
ValidationUtils.checkArgument(requestedInstant.isRequested());
|
||||
HoodieInstant inflightInstant =
|
||||
new HoodieInstant(State.INFLIGHT, COMPACTION_ACTION, requestedInstant.getTimestamp());
|
||||
transitionState(requestedInstant, inflightInstant, Option.empty());
|
||||
@@ -258,8 +258,8 @@ public class HoodieActiveTimeline extends HoodieDefaultTimeline {
|
||||
* @return commit instant
|
||||
*/
|
||||
public HoodieInstant transitionCompactionInflightToComplete(HoodieInstant inflightInstant, Option<byte[]> data) {
|
||||
Preconditions.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
|
||||
Preconditions.checkArgument(inflightInstant.isInflight());
|
||||
ValidationUtils.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
|
||||
ValidationUtils.checkArgument(inflightInstant.isInflight());
|
||||
HoodieInstant commitInstant = new HoodieInstant(State.COMPLETED, COMMIT_ACTION, inflightInstant.getTimestamp());
|
||||
transitionState(inflightInstant, commitInstant, data);
|
||||
return commitInstant;
|
||||
@@ -283,8 +283,8 @@ public class HoodieActiveTimeline extends HoodieDefaultTimeline {
|
||||
* @return commit instant
|
||||
*/
|
||||
public HoodieInstant transitionCleanInflightToComplete(HoodieInstant inflightInstant, Option<byte[]> data) {
|
||||
Preconditions.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.CLEAN_ACTION));
|
||||
Preconditions.checkArgument(inflightInstant.isInflight());
|
||||
ValidationUtils.checkArgument(inflightInstant.getAction().equals(HoodieTimeline.CLEAN_ACTION));
|
||||
ValidationUtils.checkArgument(inflightInstant.isInflight());
|
||||
HoodieInstant commitInstant = new HoodieInstant(State.COMPLETED, CLEAN_ACTION, inflightInstant.getTimestamp());
|
||||
// Then write to timeline
|
||||
transitionState(inflightInstant, commitInstant, data);
|
||||
@@ -299,15 +299,15 @@ public class HoodieActiveTimeline extends HoodieDefaultTimeline {
|
||||
* @return commit instant
|
||||
*/
|
||||
public HoodieInstant transitionCleanRequestedToInflight(HoodieInstant requestedInstant, Option<byte[]> data) {
|
||||
Preconditions.checkArgument(requestedInstant.getAction().equals(HoodieTimeline.CLEAN_ACTION));
|
||||
Preconditions.checkArgument(requestedInstant.isRequested());
|
||||
ValidationUtils.checkArgument(requestedInstant.getAction().equals(HoodieTimeline.CLEAN_ACTION));
|
||||
ValidationUtils.checkArgument(requestedInstant.isRequested());
|
||||
HoodieInstant inflight = new HoodieInstant(State.INFLIGHT, CLEAN_ACTION, requestedInstant.getTimestamp());
|
||||
transitionState(requestedInstant, inflight, data);
|
||||
return inflight;
|
||||
}
|
||||
|
||||
private void transitionState(HoodieInstant fromInstant, HoodieInstant toInstant, Option<byte[]> data) {
|
||||
Preconditions.checkArgument(fromInstant.getTimestamp().equals(toInstant.getTimestamp()));
|
||||
ValidationUtils.checkArgument(fromInstant.getTimestamp().equals(toInstant.getTimestamp()));
|
||||
try {
|
||||
if (metaClient.getTimelineLayoutVersion().isNullVersion()) {
|
||||
// Re-create the .inflight file by opening a new file and write the commit metadata in
|
||||
@@ -321,7 +321,7 @@ public class HoodieActiveTimeline extends HoodieDefaultTimeline {
|
||||
} else {
|
||||
// Ensures old state exists in timeline
|
||||
LOG.info("Checking for file exists ?" + new Path(metaClient.getMetaPath(), fromInstant.getFileName()));
|
||||
Preconditions.checkArgument(metaClient.getFs().exists(new Path(metaClient.getMetaPath(),
|
||||
ValidationUtils.checkArgument(metaClient.getFs().exists(new Path(metaClient.getMetaPath(),
|
||||
fromInstant.getFileName())));
|
||||
// Use Write Once to create Target File
|
||||
createImmutableFileInPath(new Path(metaClient.getMetaPath(), toInstant.getFileName()), data);
|
||||
@@ -333,7 +333,7 @@ public class HoodieActiveTimeline extends HoodieDefaultTimeline {
|
||||
}
|
||||
|
||||
private void revertCompleteToInflight(HoodieInstant completed, HoodieInstant inflight) {
|
||||
Preconditions.checkArgument(completed.getTimestamp().equals(inflight.getTimestamp()));
|
||||
ValidationUtils.checkArgument(completed.getTimestamp().equals(inflight.getTimestamp()));
|
||||
Path inFlightCommitFilePath = new Path(metaClient.getMetaPath(), inflight.getFileName());
|
||||
Path commitFilePath = new Path(metaClient.getMetaPath(), completed.getFileName());
|
||||
try {
|
||||
@@ -359,7 +359,7 @@ public class HoodieActiveTimeline extends HoodieDefaultTimeline {
|
||||
}
|
||||
|
||||
boolean success = metaClient.getFs().delete(commitFilePath, false);
|
||||
Preconditions.checkArgument(success, "State Reverting failed");
|
||||
ValidationUtils.checkArgument(success, "State Reverting failed");
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new HoodieIOException("Could not complete revert " + completed, e);
|
||||
@@ -368,7 +368,7 @@ public class HoodieActiveTimeline extends HoodieDefaultTimeline {
|
||||
|
||||
public void transitionRequestedToInflight(HoodieInstant requested, Option<byte[]> content) {
|
||||
HoodieInstant inflight = new HoodieInstant(State.INFLIGHT, requested.getAction(), requested.getTimestamp());
|
||||
Preconditions.checkArgument(requested.isRequested(), "Instant " + requested + " in wrong state");
|
||||
ValidationUtils.checkArgument(requested.isRequested(), "Instant " + requested + " in wrong state");
|
||||
transitionState(requested, inflight, content);
|
||||
}
|
||||
|
||||
@@ -377,15 +377,15 @@ public class HoodieActiveTimeline extends HoodieDefaultTimeline {
|
||||
}
|
||||
|
||||
public void saveToCompactionRequested(HoodieInstant instant, Option<byte[]> content, boolean overwrite) {
|
||||
Preconditions.checkArgument(instant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
|
||||
ValidationUtils.checkArgument(instant.getAction().equals(HoodieTimeline.COMPACTION_ACTION));
|
||||
// Write workload to auxiliary folder
|
||||
createFileInAuxiliaryFolder(instant, content);
|
||||
createFileInMetaPath(instant.getFileName(), content, overwrite);
|
||||
}
|
||||
|
||||
public void saveToCleanRequested(HoodieInstant instant, Option<byte[]> content) {
|
||||
Preconditions.checkArgument(instant.getAction().equals(HoodieTimeline.CLEAN_ACTION));
|
||||
Preconditions.checkArgument(instant.getState().equals(State.REQUESTED));
|
||||
ValidationUtils.checkArgument(instant.getAction().equals(HoodieTimeline.CLEAN_ACTION));
|
||||
ValidationUtils.checkArgument(instant.getState().equals(State.REQUESTED));
|
||||
// Plan is stored in meta path
|
||||
createFileInMetaPath(instant.getFileName(), content, false);
|
||||
}
|
||||
|
||||
@@ -32,10 +32,10 @@ import org.apache.hudi.common.util.CompactionUtils;
|
||||
import org.apache.hudi.common.util.FSUtils;
|
||||
import org.apache.hudi.common.util.HoodieTimer;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
import org.apache.hudi.exception.HoodieIOException;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.log4j.LogManager;
|
||||
@@ -116,10 +116,9 @@ public abstract class AbstractTableFileSystemView implements SyncableFileSystemV
|
||||
long fgBuildTimeTakenMs = timer.endTimer();
|
||||
timer.startTimer();
|
||||
// Group by partition for efficient updates for both InMemory and DiskBased stuctures.
|
||||
fileGroups.stream().collect(Collectors.groupingBy(HoodieFileGroup::getPartitionPath)).entrySet().forEach(entry -> {
|
||||
String partition = entry.getKey();
|
||||
fileGroups.stream().collect(Collectors.groupingBy(HoodieFileGroup::getPartitionPath)).forEach((partition, value) -> {
|
||||
if (!isPartitionAvailableInStore(partition)) {
|
||||
storePartitionView(partition, entry.getValue());
|
||||
storePartitionView(partition, value);
|
||||
}
|
||||
});
|
||||
long storePartitionsTs = timer.endTimer();
|
||||
@@ -209,7 +208,7 @@ public abstract class AbstractTableFileSystemView implements SyncableFileSystemV
|
||||
*/
|
||||
private void ensurePartitionLoadedCorrectly(String partition) {
|
||||
|
||||
Preconditions.checkArgument(!isClosed(), "View is already closed");
|
||||
ValidationUtils.checkArgument(!isClosed(), "View is already closed");
|
||||
|
||||
// ensure we list files only once even in the face of concurrency
|
||||
addedPartitions.computeIfAbsent(partition, (partitionPathStr) -> {
|
||||
@@ -397,11 +396,9 @@ public abstract class AbstractTableFileSystemView implements SyncableFileSystemV
|
||||
public final Stream<HoodieBaseFile> getLatestBaseFilesInRange(List<String> commitsToReturn) {
|
||||
try {
|
||||
readLock.lock();
|
||||
return fetchAllStoredFileGroups().map(fileGroup -> {
|
||||
return Option.fromJavaOptional(
|
||||
fileGroup.getAllBaseFiles().filter(baseFile -> commitsToReturn.contains(baseFile.getCommitTime())
|
||||
&& !isBaseFileDueToPendingCompaction(baseFile)).findFirst());
|
||||
}).filter(Option::isPresent).map(Option::get);
|
||||
return fetchAllStoredFileGroups().map(fileGroup -> Option.fromJavaOptional(
|
||||
fileGroup.getAllBaseFiles().filter(baseFile -> commitsToReturn.contains(baseFile.getCommitTime())
|
||||
&& !isBaseFileDueToPendingCompaction(baseFile)).findFirst())).filter(Option::isPresent).map(Option::get);
|
||||
} finally {
|
||||
readLock.unlock();
|
||||
}
|
||||
@@ -443,7 +440,7 @@ public abstract class AbstractTableFileSystemView implements SyncableFileSystemV
|
||||
String partitionPath = formatPartitionKey(partitionStr);
|
||||
ensurePartitionLoadedCorrectly(partitionPath);
|
||||
Option<FileSlice> fs = fetchLatestFileSlice(partitionPath, fileId);
|
||||
return fs.map(f -> filterBaseFileAfterPendingCompaction(f));
|
||||
return fs.map(this::filterBaseFileAfterPendingCompaction);
|
||||
} finally {
|
||||
readLock.unlock();
|
||||
}
|
||||
@@ -480,7 +477,7 @@ public abstract class AbstractTableFileSystemView implements SyncableFileSystemV
|
||||
ensurePartitionLoadedCorrectly(partitionPath);
|
||||
Stream<FileSlice> fileSliceStream = fetchLatestFileSlicesBeforeOrOn(partitionPath, maxCommitTime);
|
||||
if (includeFileSlicesInPendingCompaction) {
|
||||
return fileSliceStream.map(fs -> filterBaseFileAfterPendingCompaction(fs));
|
||||
return fileSliceStream.map(this::filterBaseFileAfterPendingCompaction);
|
||||
} else {
|
||||
return fileSliceStream.filter(fs -> !isPendingCompactionScheduledForFileId(fs.getFileGroupId()));
|
||||
}
|
||||
@@ -815,7 +812,7 @@ public abstract class AbstractTableFileSystemView implements SyncableFileSystemV
|
||||
/**
|
||||
* Return Only Commits and Compaction timeline for building file-groups.
|
||||
*
|
||||
* @return
|
||||
* @return {@code HoodieTimeline}
|
||||
*/
|
||||
public HoodieTimeline getVisibleCommitsAndCompactionTimeline() {
|
||||
return visibleCommitsAndCompactionTimeline;
|
||||
|
||||
@@ -18,10 +18,9 @@
|
||||
|
||||
package org.apache.hudi.common.table.view;
|
||||
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.config.DefaultHoodieConfig;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileReader;
|
||||
import java.io.IOException;
|
||||
@@ -192,7 +191,7 @@ public class FileSystemViewStorageConfig extends DefaultHoodieConfig {
|
||||
// Validations
|
||||
FileSystemViewStorageType.valueOf(props.getProperty(FILESYSTEM_VIEW_STORAGE_TYPE));
|
||||
FileSystemViewStorageType.valueOf(props.getProperty(FILESYSTEM_SECONDARY_VIEW_STORAGE_TYPE));
|
||||
Preconditions.checkArgument(Integer.parseInt(props.getProperty(FILESYSTEM_VIEW_REMOTE_PORT)) > 0);
|
||||
ValidationUtils.checkArgument(Integer.parseInt(props.getProperty(FILESYSTEM_VIEW_REMOTE_PORT)) > 0);
|
||||
return new FileSystemViewStorageConfig(props);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,15 +25,16 @@ import org.apache.hudi.common.table.HoodieTableMetaClient;
|
||||
import org.apache.hudi.common.table.HoodieTimeline;
|
||||
import org.apache.hudi.common.table.TableFileSystemView;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.log4j.LogManager;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
@@ -134,15 +135,14 @@ public class HoodieTableFileSystemView extends IncrementalTimelineSyncFileSystem
|
||||
@Override
|
||||
protected void resetPendingCompactionOperations(Stream<Pair<String, CompactionOperation>> operations) {
|
||||
// Build fileId to Pending Compaction Instants
|
||||
this.fgIdToPendingCompaction = createFileIdToPendingCompactionMap(operations.map(entry -> {
|
||||
return Pair.of(entry.getValue().getFileGroupId(), Pair.of(entry.getKey(), entry.getValue()));
|
||||
}).collect(Collectors.toMap(Pair::getKey, Pair::getValue)));
|
||||
this.fgIdToPendingCompaction = createFileIdToPendingCompactionMap(operations.map(entry ->
|
||||
Pair.of(entry.getValue().getFileGroupId(), Pair.of(entry.getKey(), entry.getValue()))).collect(Collectors.toMap(Pair::getKey, Pair::getValue)));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void addPendingCompactionOperations(Stream<Pair<String, CompactionOperation>> operations) {
|
||||
operations.forEach(opInstantPair -> {
|
||||
Preconditions.checkArgument(!fgIdToPendingCompaction.containsKey(opInstantPair.getValue().getFileGroupId()),
|
||||
ValidationUtils.checkArgument(!fgIdToPendingCompaction.containsKey(opInstantPair.getValue().getFileGroupId()),
|
||||
"Duplicate FileGroupId found in pending compaction operations. FgId :"
|
||||
+ opInstantPair.getValue().getFileGroupId());
|
||||
fgIdToPendingCompaction.put(opInstantPair.getValue().getFileGroupId(),
|
||||
@@ -153,7 +153,7 @@ public class HoodieTableFileSystemView extends IncrementalTimelineSyncFileSystem
|
||||
@Override
|
||||
protected void removePendingCompactionOperations(Stream<Pair<String, CompactionOperation>> operations) {
|
||||
operations.forEach(opInstantPair -> {
|
||||
Preconditions.checkArgument(fgIdToPendingCompaction.containsKey(opInstantPair.getValue().getFileGroupId()),
|
||||
ValidationUtils.checkArgument(fgIdToPendingCompaction.containsKey(opInstantPair.getValue().getFileGroupId()),
|
||||
"Trying to remove a FileGroupId which is not found in pending compaction operations. FgId :"
|
||||
+ opInstantPair.getValue().getFileGroupId());
|
||||
fgIdToPendingCompaction.remove(opInstantPair.getValue().getFileGroupId());
|
||||
@@ -166,8 +166,7 @@ public class HoodieTableFileSystemView extends IncrementalTimelineSyncFileSystem
|
||||
*/
|
||||
@Override
|
||||
Stream<HoodieFileGroup> fetchAllStoredFileGroups(String partition) {
|
||||
final List<HoodieFileGroup> fileGroups = new ArrayList<>();
|
||||
fileGroups.addAll(partitionToFileGroupsMap.get(partition));
|
||||
final List<HoodieFileGroup> fileGroups = new ArrayList<>(partitionToFileGroupsMap.get(partition));
|
||||
return fileGroups.stream();
|
||||
}
|
||||
|
||||
@@ -200,9 +199,7 @@ public class HoodieTableFileSystemView extends IncrementalTimelineSyncFileSystem
|
||||
|
||||
@Override
|
||||
public Stream<HoodieFileGroup> fetchAllStoredFileGroups() {
|
||||
return partitionToFileGroupsMap.values().stream().flatMap(fg -> {
|
||||
return fg.stream();
|
||||
});
|
||||
return partitionToFileGroupsMap.values().stream().flatMap(Collection::stream);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -34,12 +34,12 @@ import org.apache.hudi.common.table.timeline.dto.InstantDTO;
|
||||
import org.apache.hudi.common.table.timeline.dto.TimelineDTO;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.StringUtils;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
import org.apache.hudi.exception.HoodieRemoteException;
|
||||
|
||||
import com.fasterxml.jackson.core.type.TypeReference;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.http.client.fluent.Request;
|
||||
import org.apache.http.client.fluent.Response;
|
||||
import org.apache.http.client.utils.URIBuilder;
|
||||
@@ -134,14 +134,12 @@ public class RemoteHoodieTableFileSystemView implements SyncableFileSystemView,
|
||||
|
||||
private <T> T executeRequest(String requestPath, Map<String, String> queryParameters, TypeReference reference,
|
||||
RequestMethod method) throws IOException {
|
||||
Preconditions.checkArgument(!closed, "View already closed");
|
||||
ValidationUtils.checkArgument(!closed, "View already closed");
|
||||
|
||||
URIBuilder builder =
|
||||
new URIBuilder().setHost(serverHost).setPort(serverPort).setPath(requestPath).setScheme("http");
|
||||
|
||||
queryParameters.entrySet().stream().forEach(entry -> {
|
||||
builder.addParameter(entry.getKey(), entry.getValue());
|
||||
});
|
||||
queryParameters.forEach(builder::addParameter);
|
||||
|
||||
// Adding mandatory parameters - Last instants affecting file-slice
|
||||
timeline.lastInstant().ifPresent(instant -> builder.addParameter(LAST_INSTANT_TS, instant.getTimestamp()));
|
||||
@@ -149,7 +147,7 @@ public class RemoteHoodieTableFileSystemView implements SyncableFileSystemView,
|
||||
|
||||
String url = builder.toString();
|
||||
LOG.info("Sending request : (" + url + ")");
|
||||
Response response = null;
|
||||
Response response;
|
||||
int timeout = 1000 * 300; // 5 min timeout
|
||||
switch (method) {
|
||||
case GET:
|
||||
@@ -197,7 +195,7 @@ public class RemoteHoodieTableFileSystemView implements SyncableFileSystemView,
|
||||
Map<String, String> paramsMap = new HashMap<>();
|
||||
paramsMap.put(BASEPATH_PARAM, basePath);
|
||||
paramsMap.put(PARTITION_PARAM, partitionPath);
|
||||
Preconditions.checkArgument(paramNames.length == paramVals.length);
|
||||
ValidationUtils.checkArgument(paramNames.length == paramVals.length);
|
||||
for (int i = 0; i < paramNames.length; i++) {
|
||||
paramsMap.put(paramNames[i], paramVals[i]);
|
||||
}
|
||||
|
||||
@@ -29,9 +29,9 @@ import org.apache.hudi.common.table.HoodieTimeline;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.RocksDBDAO;
|
||||
import org.apache.hudi.common.util.RocksDBSchemaHelper;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.log4j.LogManager;
|
||||
@@ -110,7 +110,7 @@ public class RocksDbBasedFileSystemView extends IncrementalTimelineSyncFileSyste
|
||||
protected void addPendingCompactionOperations(Stream<Pair<String, CompactionOperation>> operations) {
|
||||
rocksDB.writeBatch(batch ->
|
||||
operations.forEach(opInstantPair -> {
|
||||
Preconditions.checkArgument(!isPendingCompactionScheduledForFileId(opInstantPair.getValue().getFileGroupId()),
|
||||
ValidationUtils.checkArgument(!isPendingCompactionScheduledForFileId(opInstantPair.getValue().getFileGroupId()),
|
||||
"Duplicate FileGroupId found in pending compaction operations. FgId :"
|
||||
+ opInstantPair.getValue().getFileGroupId());
|
||||
rocksDB.putInBatch(batch, schemaHelper.getColFamilyForPendingCompaction(),
|
||||
@@ -123,7 +123,7 @@ public class RocksDbBasedFileSystemView extends IncrementalTimelineSyncFileSyste
|
||||
void removePendingCompactionOperations(Stream<Pair<String, CompactionOperation>> operations) {
|
||||
rocksDB.writeBatch(batch ->
|
||||
operations.forEach(opInstantPair -> {
|
||||
Preconditions.checkArgument(
|
||||
ValidationUtils.checkArgument(
|
||||
getPendingCompactionOperationWithInstant(opInstantPair.getValue().getFileGroupId()) != null,
|
||||
"Trying to remove a FileGroupId which is not found in pending compaction operations. FgId :"
|
||||
+ opInstantPair.getValue().getFileGroupId());
|
||||
|
||||
@@ -28,7 +28,6 @@ import org.apache.hudi.avro.model.HoodieSavepointMetadata;
|
||||
import org.apache.hudi.avro.model.HoodieSavepointPartitionMetadata;
|
||||
import org.apache.hudi.common.HoodieRollbackStat;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.apache.avro.file.DataFileReader;
|
||||
import org.apache.avro.file.DataFileWriter;
|
||||
@@ -145,7 +144,7 @@ public class AvroUtils {
|
||||
throws IOException {
|
||||
DatumReader<T> reader = new SpecificDatumReader<>(clazz);
|
||||
FileReader<T> fileReader = DataFileReader.openReader(new SeekableByteArrayInput(bytes), reader);
|
||||
Preconditions.checkArgument(fileReader.hasNext(), "Could not deserialize metadata of type " + clazz);
|
||||
ValidationUtils.checkArgument(fileReader.hasNext(), "Could not deserialize metadata of type " + clazz);
|
||||
return fileReader.next();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,7 +28,6 @@ import org.apache.hudi.exception.HoodieException;
|
||||
import org.apache.hudi.exception.HoodieIOException;
|
||||
import org.apache.hudi.exception.InvalidHoodiePathException;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
@@ -115,11 +114,11 @@ public class FSUtils {
|
||||
}
|
||||
|
||||
public static String translateMarkerToDataPath(String basePath, String markerPath, String instantTs) {
|
||||
Preconditions.checkArgument(markerPath.endsWith(HoodieTableMetaClient.MARKER_EXTN));
|
||||
ValidationUtils.checkArgument(markerPath.endsWith(HoodieTableMetaClient.MARKER_EXTN));
|
||||
String markerRootPath = Path.getPathWithoutSchemeAndAuthority(
|
||||
new Path(String.format("%s/%s/%s", basePath, HoodieTableMetaClient.TEMPFOLDER_NAME, instantTs))).toString();
|
||||
int begin = markerPath.indexOf(markerRootPath);
|
||||
Preconditions.checkArgument(begin >= 0,
|
||||
ValidationUtils.checkArgument(begin >= 0,
|
||||
"Not in marker dir. Marker Path=" + markerPath + ", Expected Marker Root=" + markerRootPath);
|
||||
String rPath = markerPath.substring(begin + markerRootPath.length() + 1);
|
||||
return String.format("%s/%s%s", basePath, rPath.replace(HoodieTableMetaClient.MARKER_EXTN, ""),
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
|
||||
package org.apache.hudi.common.util;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
@@ -47,7 +46,7 @@ public class FailSafeConsistencyGuard implements ConsistencyGuard {
|
||||
public FailSafeConsistencyGuard(FileSystem fs, ConsistencyGuardConfig consistencyGuardConfig) {
|
||||
this.fs = fs;
|
||||
this.consistencyGuardConfig = consistencyGuardConfig;
|
||||
Preconditions.checkArgument(consistencyGuardConfig.isConsistencyCheckEnabled());
|
||||
ValidationUtils.checkArgument(consistencyGuardConfig.isConsistencyCheckEnabled());
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -115,8 +114,8 @@ public class FailSafeConsistencyGuard implements ConsistencyGuard {
|
||||
*
|
||||
* @param filePath File Path
|
||||
* @param visibility Visibility
|
||||
* @return
|
||||
* @throws IOException
|
||||
* @return true (if file visible in Path), false (otherwise)
|
||||
* @throws IOException -
|
||||
*/
|
||||
private boolean checkFileVisibility(Path filePath, FileVisibility visibility) throws IOException {
|
||||
try {
|
||||
|
||||
@@ -18,6 +18,13 @@
|
||||
|
||||
package org.apache.hudi.common.util;
|
||||
|
||||
import org.apache.hudi.exception.HoodieException;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.security.MessageDigest;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A utility class for numeric.
|
||||
*/
|
||||
@@ -31,4 +38,27 @@ public class NumericUtils {
|
||||
String pre = "KMGTPE".charAt(exp - 1) + "";
|
||||
return String.format("%.1f %sB", bytes / Math.pow(1024, exp), pre);
|
||||
}
|
||||
|
||||
public static long getMessageDigestHash(final String algorithmName, final String string) {
|
||||
MessageDigest md;
|
||||
try {
|
||||
md = MessageDigest.getInstance(algorithmName);
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
throw new HoodieException(e);
|
||||
}
|
||||
return asLong(Objects.requireNonNull(md).digest(string.getBytes(StandardCharsets.UTF_8)));
|
||||
}
|
||||
|
||||
public static long asLong(byte[] bytes) {
|
||||
ValidationUtils.checkState(bytes.length >= 8, "HashCode#asLong() requires >= 8 bytes.");
|
||||
return padToLong(bytes);
|
||||
}
|
||||
|
||||
public static long padToLong(byte[] bytes) {
|
||||
long retVal = (bytes[0] & 0xFF);
|
||||
for (int i = 1; i < Math.min(bytes.length, 8); i++) {
|
||||
retVal |= (bytes[i] & 0xFFL) << (i * 8);
|
||||
}
|
||||
return retVal;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@ import org.apache.hudi.common.util.collection.Pair;
|
||||
import org.apache.hudi.exception.HoodieException;
|
||||
import org.apache.hudi.exception.HoodieIOException;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.log4j.LogManager;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.rocksdb.AbstractImmutableNativeReference;
|
||||
@@ -105,7 +104,7 @@ public class RocksDBDAO {
|
||||
FileIOUtils.mkdir(new File(rocksDBBasePath));
|
||||
rocksDB = RocksDB.open(dbOptions, rocksDBBasePath, managedColumnFamilies, managedHandles);
|
||||
|
||||
Preconditions.checkArgument(managedHandles.size() == managedColumnFamilies.size(),
|
||||
ValidationUtils.checkArgument(managedHandles.size() == managedColumnFamilies.size(),
|
||||
"Unexpected number of handles are returned");
|
||||
for (int index = 0; index < managedHandles.size(); index++) {
|
||||
ColumnFamilyHandle handle = managedHandles.get(index);
|
||||
@@ -113,7 +112,7 @@ public class RocksDBDAO {
|
||||
String familyNameFromHandle = new String(handle.getName());
|
||||
String familyNameFromDescriptor = new String(descriptor.getName());
|
||||
|
||||
Preconditions.checkArgument(familyNameFromDescriptor.equals(familyNameFromHandle),
|
||||
ValidationUtils.checkArgument(familyNameFromDescriptor.equals(familyNameFromHandle),
|
||||
"Family Handles not in order with descriptors");
|
||||
managedHandlesMap.put(familyNameFromHandle, handle);
|
||||
managedDescriptorMap.put(familyNameFromDescriptor, descriptor);
|
||||
@@ -297,7 +296,7 @@ public class RocksDBDAO {
|
||||
* @param <T> Type of object stored.
|
||||
*/
|
||||
public <T extends Serializable> T get(String columnFamilyName, String key) {
|
||||
Preconditions.checkArgument(!closed);
|
||||
ValidationUtils.checkArgument(!closed);
|
||||
try {
|
||||
byte[] val = getRocksDB().get(managedHandlesMap.get(columnFamilyName), key.getBytes());
|
||||
return val == null ? null : SerializationUtils.deserialize(val);
|
||||
@@ -314,7 +313,7 @@ public class RocksDBDAO {
|
||||
* @param <T> Type of object stored.
|
||||
*/
|
||||
public <K extends Serializable, T extends Serializable> T get(String columnFamilyName, K key) {
|
||||
Preconditions.checkArgument(!closed);
|
||||
ValidationUtils.checkArgument(!closed);
|
||||
try {
|
||||
byte[] val = getRocksDB().get(managedHandlesMap.get(columnFamilyName), SerializationUtils.serialize(key));
|
||||
return val == null ? null : SerializationUtils.deserialize(val);
|
||||
@@ -331,7 +330,7 @@ public class RocksDBDAO {
|
||||
* @param <T> Type of value stored
|
||||
*/
|
||||
public <T extends Serializable> Stream<Pair<String, T>> prefixSearch(String columnFamilyName, String prefix) {
|
||||
Preconditions.checkArgument(!closed);
|
||||
ValidationUtils.checkArgument(!closed);
|
||||
final HoodieTimer timer = new HoodieTimer();
|
||||
timer.startTimer();
|
||||
long timeTakenMicro = 0;
|
||||
@@ -360,7 +359,7 @@ public class RocksDBDAO {
|
||||
* @param <T> Type of value stored
|
||||
*/
|
||||
public <T extends Serializable> void prefixDelete(String columnFamilyName, String prefix) {
|
||||
Preconditions.checkArgument(!closed);
|
||||
ValidationUtils.checkArgument(!closed);
|
||||
LOG.info("Prefix DELETE (query=" + prefix + ") on " + columnFamilyName);
|
||||
final RocksIterator it = getRocksDB().newIterator(managedHandlesMap.get(columnFamilyName));
|
||||
it.seek(prefix.getBytes());
|
||||
@@ -396,7 +395,7 @@ public class RocksDBDAO {
|
||||
* @param columnFamilyName Column family name
|
||||
*/
|
||||
public void addColumnFamily(String columnFamilyName) {
|
||||
Preconditions.checkArgument(!closed);
|
||||
ValidationUtils.checkArgument(!closed);
|
||||
|
||||
managedDescriptorMap.computeIfAbsent(columnFamilyName, colFamilyName -> {
|
||||
try {
|
||||
@@ -416,7 +415,7 @@ public class RocksDBDAO {
|
||||
* @param columnFamilyName Column Family Name
|
||||
*/
|
||||
public void dropColumnFamily(String columnFamilyName) {
|
||||
Preconditions.checkArgument(!closed);
|
||||
ValidationUtils.checkArgument(!closed);
|
||||
|
||||
managedDescriptorMap.computeIfPresent(columnFamilyName, (colFamilyName, descriptor) -> {
|
||||
ColumnFamilyHandle handle = managedHandlesMap.get(colFamilyName);
|
||||
|
||||
@@ -0,0 +1,70 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hudi.common.util;
|
||||
|
||||
/*
|
||||
* Simple utility to test validation conditions (to replace Guava's PreConditions)
|
||||
*/
|
||||
public class ValidationUtils {
|
||||
|
||||
/**
|
||||
* Ensures the truth of an expression.
|
||||
*/
|
||||
public static void checkArgument(final boolean expression) {
|
||||
if (!expression) {
|
||||
throw new IllegalArgumentException();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures the truth of an expression, throwing the custom errorMessage otherwise.
|
||||
*/
|
||||
public static void checkArgument(final boolean expression, final String errorMessage) {
|
||||
if (!expression) {
|
||||
throw new IllegalArgumentException(errorMessage);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures the truth of an expression involving the state of the calling instance, but not
|
||||
* involving any parameters to the calling method.
|
||||
*
|
||||
* @param expression a boolean expression
|
||||
* @throws IllegalStateException if {@code expression} is false
|
||||
*/
|
||||
public static void checkState(final boolean expression) {
|
||||
if (!expression) {
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures the truth of an expression involving the state of the calling instance, but not
|
||||
* involving any parameters to the calling method.
|
||||
*
|
||||
* @param expression a boolean expression
|
||||
* @param errorMessage - error message
|
||||
* @throws IllegalStateException if {@code expression} is false
|
||||
*/
|
||||
public static void checkState(final boolean expression, String errorMessage) {
|
||||
if (!expression) {
|
||||
throw new IllegalStateException(errorMessage);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -21,9 +21,9 @@ package org.apache.hudi.common.util.queue;
|
||||
import org.apache.hudi.common.util.DefaultSizeEstimator;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.SizeEstimator;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.exception.HoodieException;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.log4j.LogManager;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
@@ -262,7 +262,7 @@ public class BoundedInMemoryQueue<I, O> implements Iterable<O> {
|
||||
|
||||
@Override
|
||||
public O next() {
|
||||
Preconditions.checkState(hasNext() && this.nextRecord != null);
|
||||
ValidationUtils.checkState(hasNext() && this.nextRecord != null);
|
||||
final O ret = this.nextRecord;
|
||||
this.nextRecord = null;
|
||||
return ret;
|
||||
|
||||
@@ -19,10 +19,9 @@
|
||||
package org.apache.hudi.common.versioning;
|
||||
|
||||
import org.apache.hudi.common.table.HoodieTableMetaClient;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
@@ -75,8 +74,8 @@ public class MetadataMigrator<T> {
|
||||
* @return Metadata conforming to the target version
|
||||
*/
|
||||
public T migrateToVersion(T metadata, int metadataVersion, int targetVersion) {
|
||||
Preconditions.checkArgument(targetVersion >= oldestVersion);
|
||||
Preconditions.checkArgument(targetVersion <= latestVersion);
|
||||
ValidationUtils.checkArgument(targetVersion >= oldestVersion);
|
||||
ValidationUtils.checkArgument(targetVersion <= latestVersion);
|
||||
if (metadataVersion == targetVersion) {
|
||||
return metadata;
|
||||
} else if (metadataVersion > targetVersion) {
|
||||
|
||||
@@ -22,10 +22,10 @@ import org.apache.hudi.avro.model.HoodieCleanMetadata;
|
||||
import org.apache.hudi.avro.model.HoodieCleanPartitionMetadata;
|
||||
import org.apache.hudi.common.table.HoodieTableMetaClient;
|
||||
import org.apache.hudi.common.util.FSUtils;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
import org.apache.hudi.common.versioning.AbstractMigratorBase;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
||||
import java.util.Map;
|
||||
@@ -52,7 +52,7 @@ public class CleanV1MigrationHandler extends AbstractMigratorBase<HoodieCleanMet
|
||||
|
||||
@Override
|
||||
public HoodieCleanMetadata downgradeFrom(HoodieCleanMetadata input) {
|
||||
Preconditions.checkArgument(input.getVersion() == 2,
|
||||
ValidationUtils.checkArgument(input.getVersion() == 2,
|
||||
"Input version is " + input.getVersion() + ". Must be 2");
|
||||
final Path basePath = new Path(metaClient.getBasePath());
|
||||
|
||||
@@ -81,16 +81,13 @@ public class CleanV1MigrationHandler extends AbstractMigratorBase<HoodieCleanMet
|
||||
return Pair.of(partitionPath, cleanPartitionMetadata);
|
||||
}).collect(Collectors.toMap(Pair::getKey, Pair::getValue));
|
||||
|
||||
HoodieCleanMetadata metadata = HoodieCleanMetadata.newBuilder()
|
||||
return HoodieCleanMetadata.newBuilder()
|
||||
.setEarliestCommitToRetain(input.getEarliestCommitToRetain())
|
||||
.setStartCleanTime(input.getStartCleanTime())
|
||||
.setTimeTakenInMillis(input.getTimeTakenInMillis())
|
||||
.setTotalFilesDeleted(input.getTotalFilesDeleted())
|
||||
.setPartitionMetadata(partitionMetadataMap)
|
||||
.setVersion(getManagedVersion()).build();
|
||||
|
||||
return metadata;
|
||||
|
||||
}
|
||||
|
||||
private static String convertToV1Path(Path basePath, String partitionPath, String fileName) {
|
||||
|
||||
@@ -21,10 +21,10 @@ package org.apache.hudi.common.versioning.clean;
|
||||
import org.apache.hudi.avro.model.HoodieCleanMetadata;
|
||||
import org.apache.hudi.avro.model.HoodieCleanPartitionMetadata;
|
||||
import org.apache.hudi.common.table.HoodieTableMetaClient;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
import org.apache.hudi.common.versioning.AbstractMigratorBase;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
||||
import java.util.List;
|
||||
@@ -46,7 +46,7 @@ public class CleanV2MigrationHandler extends AbstractMigratorBase<HoodieCleanMet
|
||||
|
||||
@Override
|
||||
public HoodieCleanMetadata upgradeFrom(HoodieCleanMetadata input) {
|
||||
Preconditions.checkArgument(input.getVersion() == 1,
|
||||
ValidationUtils.checkArgument(input.getVersion() == 1,
|
||||
"Input version is " + input.getVersion() + ". Must be 1");
|
||||
HoodieCleanMetadata metadata = new HoodieCleanMetadata();
|
||||
metadata.setEarliestCommitToRetain(input.getEarliestCommitToRetain());
|
||||
|
||||
@@ -22,9 +22,9 @@ import org.apache.hudi.avro.model.HoodieCompactionOperation;
|
||||
import org.apache.hudi.avro.model.HoodieCompactionPlan;
|
||||
import org.apache.hudi.common.table.HoodieTableMetaClient;
|
||||
import org.apache.hudi.common.util.FSUtils;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.common.versioning.AbstractMigratorBase;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
||||
import java.util.ArrayList;
|
||||
@@ -54,19 +54,18 @@ public class CompactionV1MigrationHandler extends AbstractMigratorBase<HoodieCom
|
||||
|
||||
@Override
|
||||
public HoodieCompactionPlan downgradeFrom(HoodieCompactionPlan input) {
|
||||
Preconditions.checkArgument(input.getVersion() == 2, "Input version is " + input.getVersion() + ". Must be 2");
|
||||
ValidationUtils.checkArgument(input.getVersion() == 2, "Input version is " + input.getVersion() + ". Must be 2");
|
||||
HoodieCompactionPlan compactionPlan = new HoodieCompactionPlan();
|
||||
final Path basePath = new Path(metaClient.getBasePath());
|
||||
List<HoodieCompactionOperation> v1CompactionOperationList = new ArrayList<>();
|
||||
if (null != input.getOperations()) {
|
||||
v1CompactionOperationList = input.getOperations().stream().map(inp -> {
|
||||
return HoodieCompactionOperation.newBuilder().setBaseInstantTime(inp.getBaseInstantTime())
|
||||
v1CompactionOperationList = input.getOperations().stream().map(inp ->
|
||||
HoodieCompactionOperation.newBuilder().setBaseInstantTime(inp.getBaseInstantTime())
|
||||
.setFileId(inp.getFileId()).setPartitionPath(inp.getPartitionPath()).setMetrics(inp.getMetrics())
|
||||
.setDataFilePath(convertToV1Path(basePath, inp.getPartitionPath(), inp.getDataFilePath()))
|
||||
.setDeltaFilePaths(inp.getDeltaFilePaths().stream()
|
||||
.map(s -> convertToV1Path(basePath, inp.getPartitionPath(), s)).collect(Collectors.toList()))
|
||||
.build();
|
||||
}).collect(Collectors.toList());
|
||||
.build()).collect(Collectors.toList());
|
||||
}
|
||||
compactionPlan.setOperations(v1CompactionOperationList);
|
||||
compactionPlan.setExtraMetadata(input.getExtraMetadata());
|
||||
|
||||
@@ -21,9 +21,9 @@ package org.apache.hudi.common.versioning.compaction;
|
||||
import org.apache.hudi.avro.model.HoodieCompactionOperation;
|
||||
import org.apache.hudi.avro.model.HoodieCompactionPlan;
|
||||
import org.apache.hudi.common.table.HoodieTableMetaClient;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.common.versioning.AbstractMigratorBase;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
||||
import java.util.ArrayList;
|
||||
@@ -48,17 +48,16 @@ public class CompactionV2MigrationHandler extends AbstractMigratorBase<HoodieCom
|
||||
|
||||
@Override
|
||||
public HoodieCompactionPlan upgradeFrom(HoodieCompactionPlan input) {
|
||||
Preconditions.checkArgument(input.getVersion() == 1, "Input version is " + input.getVersion() + ". Must be 1");
|
||||
ValidationUtils.checkArgument(input.getVersion() == 1, "Input version is " + input.getVersion() + ". Must be 1");
|
||||
HoodieCompactionPlan compactionPlan = new HoodieCompactionPlan();
|
||||
List<HoodieCompactionOperation> v2CompactionOperationList = new ArrayList<>();
|
||||
if (null != input.getOperations()) {
|
||||
v2CompactionOperationList = input.getOperations().stream().map(inp -> {
|
||||
return HoodieCompactionOperation.newBuilder().setBaseInstantTime(inp.getBaseInstantTime())
|
||||
v2CompactionOperationList = input.getOperations().stream().map(inp ->
|
||||
HoodieCompactionOperation.newBuilder().setBaseInstantTime(inp.getBaseInstantTime())
|
||||
.setFileId(inp.getFileId()).setPartitionPath(inp.getPartitionPath()).setMetrics(inp.getMetrics())
|
||||
.setDataFilePath(new Path(inp.getDataFilePath()).getName()).setDeltaFilePaths(
|
||||
inp.getDeltaFilePaths().stream().map(s -> new Path(s).getName()).collect(Collectors.toList()))
|
||||
.build();
|
||||
}).collect(Collectors.toList());
|
||||
.build()).collect(Collectors.toList());
|
||||
}
|
||||
compactionPlan.setOperations(v2CompactionOperationList);
|
||||
compactionPlan.setExtraMetadata(input.getExtraMetadata());
|
||||
|
||||
@@ -21,7 +21,6 @@ package org.apache.hudi.common.minicluster;
|
||||
import org.apache.hudi.common.model.HoodieTestUtils;
|
||||
import org.apache.hudi.common.util.FileIOUtils;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
@@ -33,6 +32,7 @@ import org.apache.log4j.Logger;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
import java.net.ServerSocket;
|
||||
|
||||
/**
|
||||
@@ -70,7 +70,7 @@ public class HdfsTestService {
|
||||
}
|
||||
|
||||
public MiniDFSCluster start(boolean format) throws IOException {
|
||||
Preconditions.checkState(workDir != null, "The work dir must be set before starting cluster.");
|
||||
Objects.requireNonNull(workDir, "The work dir must be set before starting cluster.");
|
||||
hadoopConf = HoodieTestUtils.getDefaultHadoopConf();
|
||||
|
||||
// If clean, then remove the work dir so we can start fresh.
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
|
||||
package org.apache.hudi.common.minicluster;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
@@ -36,6 +35,7 @@ import java.io.OutputStream;
|
||||
import java.io.Reader;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.Socket;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A Zookeeper minicluster service implementation.
|
||||
@@ -85,7 +85,7 @@ public class ZookeeperTestService {
|
||||
}
|
||||
|
||||
public ZooKeeperServer start() throws IOException, InterruptedException {
|
||||
Preconditions.checkState(workDir != null, "The localBaseFsLocation must be set before starting cluster.");
|
||||
Objects.requireNonNull(workDir, "The localBaseFsLocation must be set before starting cluster.");
|
||||
|
||||
setupTestEnv();
|
||||
stop();
|
||||
@@ -171,13 +171,10 @@ public class ZookeeperTestService {
|
||||
long start = System.currentTimeMillis();
|
||||
while (true) {
|
||||
try {
|
||||
Socket sock = new Socket("localhost", port);
|
||||
try {
|
||||
try (Socket sock = new Socket("localhost", port)) {
|
||||
OutputStream outstream = sock.getOutputStream();
|
||||
outstream.write("stat".getBytes());
|
||||
outstream.flush();
|
||||
} finally {
|
||||
sock.close();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
return true;
|
||||
|
||||
@@ -57,8 +57,8 @@ import org.junit.runners.Parameterized;
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.Arrays;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
|
||||
@@ -44,10 +44,10 @@ import org.apache.hudi.common.util.CleanerUtils;
|
||||
import org.apache.hudi.common.util.CompactionUtils;
|
||||
import org.apache.hudi.common.util.FSUtils;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
import org.apache.hudi.exception.HoodieException;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
@@ -201,7 +201,7 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
|
||||
view1.sync();
|
||||
Map<String, List<String>> instantsToFiles;
|
||||
|
||||
/**
|
||||
/*
|
||||
* Case where incremental syncing is catching up on more than one ingestion at a time
|
||||
*/
|
||||
// Run 1 ingestion on MOR table (1 delta commits). View1 is now sync up to this point
|
||||
@@ -222,7 +222,7 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
|
||||
view3.sync();
|
||||
areViewsConsistent(view1, view2, partitions.size() * fileIdsPerPartition.size());
|
||||
|
||||
/**
|
||||
/*
|
||||
* Case where a compaction is scheduled and then unscheduled
|
||||
*/
|
||||
scheduleCompaction(view2, "15");
|
||||
@@ -233,7 +233,7 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
|
||||
getFileSystemView(new HoodieTableMetaClient(metaClient.getHadoopConf(), metaClient.getBasePath()));
|
||||
view4.sync();
|
||||
|
||||
/**
|
||||
/*
|
||||
* Case where a compaction is scheduled, 2 ingestion happens and then a compaction happens
|
||||
*/
|
||||
scheduleCompaction(view2, "16");
|
||||
@@ -247,7 +247,7 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
|
||||
getFileSystemView(new HoodieTableMetaClient(metaClient.getHadoopConf(), metaClient.getBasePath()));
|
||||
view5.sync();
|
||||
|
||||
/**
|
||||
/*
|
||||
* Case where a clean happened and then rounds of ingestion and compaction happened
|
||||
*/
|
||||
testCleans(view2, Collections.singletonList("19"),
|
||||
@@ -266,7 +266,7 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
|
||||
getFileSystemView(new HoodieTableMetaClient(metaClient.getHadoopConf(), metaClient.getBasePath()));
|
||||
view6.sync();
|
||||
|
||||
/**
|
||||
/*
|
||||
* Case where multiple restores and ingestions happened
|
||||
*/
|
||||
testRestore(view2, Collections.singletonList("25"), true, new HashMap<>(), Collections.singletonList("24"), "29", true);
|
||||
@@ -528,7 +528,7 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
|
||||
String newBaseInstant) throws IOException {
|
||||
HoodieInstant instant = new HoodieInstant(State.REQUESTED, COMPACTION_ACTION, compactionInstantTime);
|
||||
boolean deleted = metaClient.getFs().delete(new Path(metaClient.getMetaPath(), instant.getFileName()), false);
|
||||
Preconditions.checkArgument(deleted, "Unable to delete compaction instant.");
|
||||
ValidationUtils.checkArgument(deleted, "Unable to delete compaction instant.");
|
||||
|
||||
view.sync();
|
||||
Assert.assertEquals(newLastInstant, view.getLastInstant().get().getTimestamp());
|
||||
@@ -719,7 +719,7 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
|
||||
metaClient.getActiveTimeline().createNewInstant(inflightInstant);
|
||||
metaClient.getActiveTimeline().saveAsComplete(inflightInstant,
|
||||
Option.of(metadata.toJsonString().getBytes(StandardCharsets.UTF_8)));
|
||||
/**
|
||||
/*
|
||||
// Delete pending compaction if present
|
||||
metaClient.getFs().delete(new Path(metaClient.getMetaPath(),
|
||||
new HoodieInstant(State.REQUESTED, HoodieTimeline.COMPACTION_ACTION, instant).getFileName()));
|
||||
|
||||
@@ -20,7 +20,10 @@ package org.apache.hudi.common.util;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotEquals;
|
||||
|
||||
/**
|
||||
* Tests numeric utils.
|
||||
@@ -37,6 +40,29 @@ public class TestNumericUtils {
|
||||
assertEquals("27.0 GB", NumericUtils.humanReadableByteCount(28991029248L));
|
||||
assertEquals("1.7 TB", NumericUtils.humanReadableByteCount(1855425871872L));
|
||||
assertEquals("8.0 EB", NumericUtils.humanReadableByteCount(9223372036854775807L));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetMessageDigestHash() {
|
||||
assertEquals(6808551913422584641L, NumericUtils.getMessageDigestHash("MD5", "This is a string"));
|
||||
assertEquals(2549749777095932358L, NumericUtils.getMessageDigestHash("MD5", "This is a test string"));
|
||||
assertNotEquals(1L, NumericUtils.getMessageDigestHash("MD5", "This"));
|
||||
assertNotEquals(6808551913422584641L, NumericUtils.getMessageDigestHash("SHA-256", "This is a string"));
|
||||
}
|
||||
|
||||
private static byte[] byteArrayWithNum(int size, int num) {
|
||||
byte[] bytez = new byte[size];
|
||||
Arrays.fill(bytez, (byte) num);
|
||||
return bytez;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPadToLong() {
|
||||
assertEquals(0x0000000099999999L, NumericUtils.padToLong(byteArrayWithNum(4, 0x99)));
|
||||
assertEquals(0x0000999999999999L, NumericUtils.padToLong(byteArrayWithNum(6, 0x99)));
|
||||
assertEquals(0x9999999999999999L, NumericUtils.padToLong(byteArrayWithNum(8, 0x99)));
|
||||
assertEquals(0x1111111111111111L, NumericUtils.padToLong(byteArrayWithNum(8, 0x11)));
|
||||
assertEquals(0x0000000011111111L, NumericUtils.padToLong(byteArrayWithNum(4, 0x11)));
|
||||
assertEquals(0x0000181818181818L, NumericUtils.padToLong(byteArrayWithNum(6, 0x18)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,12 +27,12 @@ import org.apache.hudi.common.table.timeline.HoodieInstant;
|
||||
import org.apache.hudi.common.table.view.HoodieTableFileSystemView;
|
||||
import org.apache.hudi.common.util.FSUtils;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.exception.HoodieException;
|
||||
import org.apache.hudi.exception.HoodieIOException;
|
||||
import org.apache.hudi.hadoop.HoodieParquetInputFormat;
|
||||
import org.apache.hudi.hadoop.UseFileSplitsFromInputFormat;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Sets;
|
||||
import org.apache.hadoop.conf.Configurable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
@@ -244,7 +244,7 @@ public class HoodieParquetRealtimeInputFormat extends HoodieParquetInputFormat i
|
||||
LOG.info("Creating record reader with readCols :" + jobConf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR)
|
||||
+ ", Ids :" + jobConf.get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR));
|
||||
// sanity check
|
||||
Preconditions.checkArgument(split instanceof HoodieRealtimeFileSplit,
|
||||
ValidationUtils.checkArgument(split instanceof HoodieRealtimeFileSplit,
|
||||
"HoodieRealtimeRecordReader can only work on HoodieRealtimeFileSplit and not with " + split);
|
||||
|
||||
return new HoodieRealtimeRecordReader((HoodieRealtimeFileSplit) split, jobConf,
|
||||
|
||||
@@ -55,11 +55,6 @@
|
||||
<artifactId>parquet-avro</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>joda-time</groupId>
|
||||
<artifactId>joda-time</artifactId>
|
||||
|
||||
@@ -28,12 +28,12 @@ import org.apache.hudi.common.table.HoodieTimeline;
|
||||
import org.apache.hudi.common.table.timeline.HoodieInstant;
|
||||
import org.apache.hudi.common.util.FSUtils;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
import org.apache.hudi.exception.HoodieIOException;
|
||||
import org.apache.hudi.exception.InvalidTableException;
|
||||
import org.apache.hudi.hive.util.SchemaUtil;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hive.conf.HiveConf;
|
||||
@@ -179,7 +179,7 @@ public class HoodieHiveClient {
|
||||
*/
|
||||
private String getPartitionClause(String partition) {
|
||||
List<String> partitionValues = partitionValueExtractor.extractPartitionValuesInPath(partition);
|
||||
Preconditions.checkArgument(syncConfig.partitionFields.size() == partitionValues.size(),
|
||||
ValidationUtils.checkArgument(syncConfig.partitionFields.size() == partitionValues.size(),
|
||||
"Partition key parts " + syncConfig.partitionFields + " does not match with partition values " + partitionValues
|
||||
+ ". Check partition strategy. ");
|
||||
List<String> partBuilder = new ArrayList<>();
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
|
||||
package org.apache.hudi.hive;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
@@ -35,7 +35,7 @@ public class MultiPartKeysValueExtractor implements PartitionValueExtractor {
|
||||
return Arrays.stream(splits).map(s -> {
|
||||
if (s.contains("=")) {
|
||||
String[] moreSplit = s.split("=");
|
||||
Preconditions.checkArgument(moreSplit.length == 2, "Partition Field (" + s + ") not in expected format");
|
||||
ValidationUtils.checkArgument(moreSplit.length == 2, "Partition Field (" + s + ") not in expected format");
|
||||
return moreSplit[1];
|
||||
}
|
||||
return s;
|
||||
|
||||
@@ -44,7 +44,7 @@ public class SchemaDifference {
|
||||
this.tableSchema = tableSchema;
|
||||
this.deleteColumns = Collections.unmodifiableList(deleteColumns);
|
||||
this.updateColumnTypes = Collections.unmodifiableMap(updateColumnTypes);
|
||||
this.addColumnTypes = Collections.unmodifiableMap(addColumnTypes);
|
||||
this.addColumnTypes = Collections.unmodifiableMap(addColumnTypes);
|
||||
}
|
||||
|
||||
public List<String> getDeleteColumns() {
|
||||
|
||||
@@ -21,8 +21,6 @@ package org.apache.hudi.hive.util;
|
||||
import org.apache.hudi.common.model.HoodieTestUtils;
|
||||
import org.apache.hudi.common.util.FileIOUtils;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.io.Files;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
@@ -53,7 +51,9 @@ import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.SocketException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
||||
@@ -73,7 +73,7 @@ public class HiveTestService {
|
||||
private int serverPort = 9999;
|
||||
private boolean clean = true;
|
||||
|
||||
private Map<String, String> sysProps = Maps.newHashMap();
|
||||
private Map<String, String> sysProps = new HashMap<>();
|
||||
private ExecutorService executorService;
|
||||
private TServer tServer;
|
||||
private HiveServer2 hiveServer;
|
||||
@@ -87,7 +87,7 @@ public class HiveTestService {
|
||||
}
|
||||
|
||||
public HiveServer2 start() throws IOException {
|
||||
Preconditions.checkState(workDir != null, "The work dir must be set before starting cluster.");
|
||||
Objects.requireNonNull(workDir, "The work dir must be set before starting cluster.");
|
||||
|
||||
if (hadoopConf == null) {
|
||||
hadoopConf = HoodieTestUtils.getDefaultHadoopConf();
|
||||
|
||||
@@ -114,13 +114,6 @@
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
<version>20.0</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
<artifactId>junit</artifactId>
|
||||
|
||||
@@ -28,13 +28,13 @@ import org.apache.hudi.common.table.timeline.dto.InstantDTO;
|
||||
import org.apache.hudi.common.table.timeline.dto.TimelineDTO;
|
||||
import org.apache.hudi.common.table.view.FileSystemViewManager;
|
||||
import org.apache.hudi.common.table.view.RemoteHoodieTableFileSystemView;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.timeline.service.handlers.BaseFileHandler;
|
||||
import org.apache.hudi.timeline.service.handlers.FileSliceHandler;
|
||||
import org.apache.hudi.timeline.service.handlers.TimelineHandler;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.base.Preconditions;
|
||||
import io.javalin.Context;
|
||||
import io.javalin.Handler;
|
||||
import io.javalin.Javalin;
|
||||
@@ -339,7 +339,7 @@ public class FileSystemViewHandler {
|
||||
+ " but server has the following timeline "
|
||||
+ viewManager.getFileSystemView(context.queryParam(RemoteHoodieTableFileSystemView.BASEPATH_PARAM))
|
||||
.getTimeline().getInstants().collect(Collectors.toList());
|
||||
Preconditions.checkArgument(!isLocalViewBehind(context), errMsg);
|
||||
ValidationUtils.checkArgument(!isLocalViewBehind(context), errMsg);
|
||||
long endFinalCheck = System.currentTimeMillis();
|
||||
finalCheckTimeTaken = endFinalCheck - beginFinalCheck;
|
||||
}
|
||||
|
||||
@@ -20,12 +20,12 @@ package org.apache.hudi.utilities;
|
||||
|
||||
import com.beust.jcommander.JCommander;
|
||||
import com.beust.jcommander.Parameter;
|
||||
import com.google.common.base.Preconditions;
|
||||
import io.javalin.Javalin;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.HttpClientBuilder;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.spark.api.java.JavaSparkContext;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
@@ -87,7 +87,7 @@ public class HoodieWithTimelineServer implements Serializable {
|
||||
IntStream.range(0, cfg.numPartitions).forEach(i -> messages.add("Hello World"));
|
||||
List<String> gotMessages = jsc.parallelize(messages).map(msg -> sendRequest(driverHost, cfg.serverPort)).collect();
|
||||
System.out.println("Got Messages :" + gotMessages);
|
||||
Preconditions.checkArgument(gotMessages.equals(messages), "Got expected reply from Server");
|
||||
ValidationUtils.checkArgument(gotMessages.equals(messages), "Got expected reply from Server");
|
||||
}
|
||||
|
||||
public String sendRequest(String driverHost, int port) {
|
||||
|
||||
@@ -26,6 +26,7 @@ import org.apache.hudi.common.util.DFSPropertiesConfiguration;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.ReflectionUtils;
|
||||
import org.apache.hudi.common.util.TypedProperties;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.config.HoodieCompactionConfig;
|
||||
import org.apache.hudi.config.HoodieIndexConfig;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
@@ -36,7 +37,6 @@ import org.apache.hudi.utilities.schema.SchemaProvider;
|
||||
import org.apache.hudi.utilities.sources.Source;
|
||||
import org.apache.hudi.utilities.transform.Transformer;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
@@ -137,7 +137,7 @@ public class UtilHelpers {
|
||||
TypedProperties properties = new TypedProperties();
|
||||
props.forEach(x -> {
|
||||
String[] kv = x.split("=");
|
||||
Preconditions.checkArgument(kv.length == 2);
|
||||
ValidationUtils.checkArgument(kv.length == 2);
|
||||
properties.setProperty(kv[0], kv[1]);
|
||||
});
|
||||
return properties;
|
||||
|
||||
@@ -20,6 +20,7 @@ package org.apache.hudi.utilities.deltastreamer;
|
||||
|
||||
import org.apache.hudi.AvroConversionUtils;
|
||||
import org.apache.hudi.DataSourceUtils;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.client.HoodieWriteClient;
|
||||
import org.apache.hudi.keygen.KeyGenerator;
|
||||
import org.apache.hudi.client.WriteStatus;
|
||||
@@ -49,7 +50,6 @@ import org.apache.hudi.utilities.sources.InputBatch;
|
||||
import org.apache.hudi.utilities.transform.Transformer;
|
||||
|
||||
import com.codahale.metrics.Timer;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.avro.Schema;
|
||||
import org.apache.avro.generic.GenericRecord;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
@@ -502,10 +502,10 @@ public class DeltaSync implements Serializable {
|
||||
HoodieWriteConfig config = builder.build();
|
||||
|
||||
// Validate what deltastreamer assumes of write-config to be really safe
|
||||
Preconditions.checkArgument(config.isInlineCompaction() == cfg.isInlineCompactionEnabled());
|
||||
Preconditions.checkArgument(!config.shouldAutoCommit());
|
||||
Preconditions.checkArgument(config.shouldCombineBeforeInsert() == cfg.filterDupes);
|
||||
Preconditions.checkArgument(config.shouldCombineBeforeUpsert());
|
||||
ValidationUtils.checkArgument(config.isInlineCompaction() == cfg.isInlineCompactionEnabled());
|
||||
ValidationUtils.checkArgument(!config.shouldAutoCommit());
|
||||
ValidationUtils.checkArgument(config.shouldCombineBeforeInsert() == cfg.filterDupes);
|
||||
ValidationUtils.checkArgument(config.shouldCombineBeforeUpsert());
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ import org.apache.hudi.common.util.CompactionUtils;
|
||||
import org.apache.hudi.common.util.FSUtils;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.TypedProperties;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
import org.apache.hudi.exception.HoodieException;
|
||||
import org.apache.hudi.exception.HoodieIOException;
|
||||
@@ -41,7 +42,6 @@ import com.beust.jcommander.IStringConverter;
|
||||
import com.beust.jcommander.JCommander;
|
||||
import com.beust.jcommander.Parameter;
|
||||
import com.beust.jcommander.ParameterException;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
@@ -353,7 +353,7 @@ public class HoodieDeltaStreamer implements Serializable {
|
||||
new HoodieTableMetaClient(new Configuration(fs.getConf()), cfg.targetBasePath, false);
|
||||
tableType = meta.getTableType();
|
||||
// This will guarantee there is no surprise with table type
|
||||
Preconditions.checkArgument(tableType.equals(HoodieTableType.valueOf(cfg.tableType)),
|
||||
ValidationUtils.checkArgument(tableType.equals(HoodieTableType.valueOf(cfg.tableType)),
|
||||
"Hoodie table is of type " + tableType + " but passed in CLI argument is " + cfg.tableType);
|
||||
} else {
|
||||
tableType = HoodieTableType.valueOf(cfg.tableType);
|
||||
|
||||
@@ -22,12 +22,14 @@ import org.apache.hudi.common.table.HoodieTableMetaClient;
|
||||
import org.apache.hudi.common.table.HoodieTimeline;
|
||||
import org.apache.hudi.common.table.timeline.HoodieInstant;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.common.util.ValidationUtils;
|
||||
import org.apache.hudi.common.util.collection.Pair;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.spark.api.java.JavaSparkContext;
|
||||
import org.apache.spark.sql.Row;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
public class IncrSourceHelper {
|
||||
|
||||
/**
|
||||
@@ -37,7 +39,7 @@ public class IncrSourceHelper {
|
||||
*/
|
||||
private static String getStrictlyLowerTimestamp(String timestamp) {
|
||||
long ts = Long.parseLong(timestamp);
|
||||
Preconditions.checkArgument(ts > 0, "Timestamp must be positive");
|
||||
ValidationUtils.checkArgument(ts > 0, "Timestamp must be positive");
|
||||
long lower = ts - 1;
|
||||
return "" + lower;
|
||||
}
|
||||
@@ -54,7 +56,7 @@ public class IncrSourceHelper {
|
||||
*/
|
||||
public static Pair<String, String> calculateBeginAndEndInstants(JavaSparkContext jssc, String srcBasePath,
|
||||
int numInstantsPerFetch, Option<String> beginInstant, boolean readLatestOnMissingBeginInstant) {
|
||||
Preconditions.checkArgument(numInstantsPerFetch > 0,
|
||||
ValidationUtils.checkArgument(numInstantsPerFetch > 0,
|
||||
"Make sure the config hoodie.deltastreamer.source.hoodieincr.num_instants is set to a positive value");
|
||||
HoodieTableMetaClient srcMetaClient = new HoodieTableMetaClient(jssc.hadoopConfiguration(), srcBasePath, true);
|
||||
|
||||
@@ -85,11 +87,11 @@ public class IncrSourceHelper {
|
||||
* @param endInstant end instant of the batch
|
||||
*/
|
||||
public static void validateInstantTime(Row row, String instantTime, String sinceInstant, String endInstant) {
|
||||
Preconditions.checkNotNull(instantTime);
|
||||
Preconditions.checkArgument(HoodieTimeline.compareTimestamps(instantTime, sinceInstant, HoodieTimeline.GREATER),
|
||||
Objects.requireNonNull(instantTime);
|
||||
ValidationUtils.checkArgument(HoodieTimeline.compareTimestamps(instantTime, sinceInstant, HoodieTimeline.GREATER),
|
||||
"Instant time(_hoodie_commit_time) in row (" + row + ") was : " + instantTime + "but expected to be between "
|
||||
+ sinceInstant + "(excl) - " + endInstant + "(incl)");
|
||||
Preconditions.checkArgument(
|
||||
ValidationUtils.checkArgument(
|
||||
HoodieTimeline.compareTimestamps(instantTime, endInstant, HoodieTimeline.LESSER_OR_EQUAL),
|
||||
"Instant time(_hoodie_commit_time) in row (" + row + ") was : " + instantTime + "but expected to be between "
|
||||
+ sinceInstant + "(excl) - " + endInstant + "(incl)");
|
||||
|
||||
Reference in New Issue
Block a user