Cleanup calls to HoodieTimeline.compareTimeStamps
This commit is contained in:
@@ -470,7 +470,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> implements Seriali
|
||||
}
|
||||
|
||||
// Cannot allow savepoint time on a commit that could have been cleaned
|
||||
Preconditions.checkArgument(table.getActiveTimeline()
|
||||
Preconditions.checkArgument(HoodieTimeline
|
||||
.compareTimestamps(commitTime, lastCommitRetained, HoodieTimeline.GREATER_OR_EQUAL),
|
||||
"Could not savepoint commit " + commitTime + " as this is beyond the lookup window "
|
||||
+ lastCommitRetained);
|
||||
|
||||
@@ -187,8 +187,9 @@ public class HoodieCleaner<T extends HoodieRecordPayload<T>> {
|
||||
}
|
||||
|
||||
// Always keep the last commit
|
||||
if (commitTimeline
|
||||
.compareTimestamps(earliestCommitToRetain.getTimestamp(), fileCommitTime,
|
||||
if (HoodieTimeline.compareTimestamps(
|
||||
earliestCommitToRetain.getTimestamp(),
|
||||
fileCommitTime,
|
||||
HoodieTimeline.GREATER)) {
|
||||
// this is a commit, that should be cleaned.
|
||||
deletePaths.add(String
|
||||
@@ -217,7 +218,7 @@ public class HoodieCleaner<T extends HoodieRecordPayload<T>> {
|
||||
HoodieInstant commitTime) {
|
||||
for (HoodieDataFile file : fileList) {
|
||||
String fileCommitTime = FSUtils.getCommitTime(file.getFileName());
|
||||
if (commitTimeline.compareTimestamps(commitTime.getTimestamp(), fileCommitTime,
|
||||
if (HoodieTimeline.compareTimestamps(commitTime.getTimestamp(), fileCommitTime,
|
||||
HoodieTimeline.GREATER)) {
|
||||
// fileList is sorted on the reverse, so the first commit we find <= commitTime is the one we want
|
||||
return fileCommitTime;
|
||||
|
||||
@@ -35,9 +35,7 @@ import org.apache.log4j.Logger;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.JavaRDD;
|
||||
import org.apache.spark.api.java.JavaSparkContext;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
* Driver program that uses the Hoodie client with synthetic workload, and performs basic
|
||||
|
||||
@@ -175,7 +175,7 @@ public class TestMergeOnReadTable {
|
||||
HoodieReadClient readClient = new HoodieReadClient(jsc, basePath, sqlContext);
|
||||
assertEquals("Expecting a single commit.", 1, readClient.listCommitsSince("000").size());
|
||||
String latestCompactionCommitTime = readClient.latestCommit();
|
||||
assertTrue(metaClient.getActiveTimeline()
|
||||
assertTrue(HoodieTimeline
|
||||
.compareTimestamps("000", latestCompactionCommitTime, HoodieTimeline.LESSER));
|
||||
assertEquals("Must contain 200 records", 200, readClient.readSince("000").count());
|
||||
}
|
||||
|
||||
@@ -183,8 +183,8 @@ public class TestHoodieCompactor {
|
||||
table = HoodieTable.getHoodieTable(metaClient, config);
|
||||
HoodieActiveTimeline timeline = metaClient.getActiveTimeline();
|
||||
|
||||
assertTrue("Compaction commit should be > than last insert", timeline
|
||||
.compareTimestamps(timeline.lastInstant().get().getTimestamp(), newCommitTime,
|
||||
assertTrue("Compaction commit should be > than last insert",
|
||||
HoodieTimeline.compareTimestamps(timeline.lastInstant().get().getTimestamp(), newCommitTime,
|
||||
HoodieTimeline.GREATER));
|
||||
|
||||
for (String partitionPath : dataGen.getPartitionPaths()) {
|
||||
|
||||
@@ -186,9 +186,8 @@ public class TestCopyOnWriteTable {
|
||||
for (File file : new File(basePath + "/2016/01/31").listFiles()) {
|
||||
if (file.getName().endsWith(".parquet")) {
|
||||
if (FSUtils.getFileId(file.getName())
|
||||
.equals(FSUtils.getFileId(parquetFile.getName())) && metadata
|
||||
.getActiveTimeline().getCommitTimeline()
|
||||
.compareTimestamps(FSUtils.getCommitTime(file.getName()),
|
||||
.equals(FSUtils.getFileId(parquetFile.getName())) &&
|
||||
HoodieTimeline.compareTimestamps(FSUtils.getCommitTime(file.getName()),
|
||||
FSUtils.getCommitTime(parquetFile.getName()), HoodieTimeline.GREATER)) {
|
||||
updatedParquetFile = file;
|
||||
break;
|
||||
|
||||
Reference in New Issue
Block a user