[MINOR] Fix wrong javadoc and refactor some naming issues (#2156)
This commit is contained in:
@@ -268,7 +268,7 @@ public abstract class AbstractHoodieWriteClient<T extends HoodieRecordPayload, I
|
||||
*
|
||||
* @param preppedRecords Prepared HoodieRecords to upsert
|
||||
* @param instantTime Instant time of the commit
|
||||
* @return JavaRDD[WriteStatus] - RDD of WriteStatus to inspect errors and counts
|
||||
* @return Collection of WriteStatus to inspect errors and counts
|
||||
*/
|
||||
public abstract O upsertPreppedRecords(I preppedRecords, final String instantTime);
|
||||
|
||||
@@ -280,7 +280,7 @@ public abstract class AbstractHoodieWriteClient<T extends HoodieRecordPayload, I
|
||||
*
|
||||
* @param records HoodieRecords to insert
|
||||
* @param instantTime Instant time of the commit
|
||||
* @return JavaRDD[WriteStatus] - RDD of WriteStatus to inspect errors and counts
|
||||
* @return Collection of WriteStatus to inspect errors and counts
|
||||
*/
|
||||
public abstract O insert(I records, final String instantTime);
|
||||
|
||||
@@ -293,7 +293,7 @@ public abstract class AbstractHoodieWriteClient<T extends HoodieRecordPayload, I
|
||||
*
|
||||
* @param preppedRecords HoodieRecords to insert
|
||||
* @param instantTime Instant time of the commit
|
||||
* @return JavaRDD[WriteStatus] - RDD of WriteStatus to inspect errors and counts
|
||||
* @return Collection of WriteStatus to inspect errors and counts
|
||||
*/
|
||||
public abstract O insertPreppedRecords(I preppedRecords, final String instantTime);
|
||||
|
||||
@@ -306,7 +306,7 @@ public abstract class AbstractHoodieWriteClient<T extends HoodieRecordPayload, I
|
||||
*
|
||||
* @param records HoodieRecords to insert
|
||||
* @param instantTime Instant time of the commit
|
||||
* @return JavaRDD[WriteStatus] - RDD of WriteStatus to inspect errors and counts
|
||||
* @return Collection of WriteStatus to inspect errors and counts
|
||||
*/
|
||||
public abstract O bulkInsert(I records, final String instantTime);
|
||||
|
||||
@@ -323,7 +323,7 @@ public abstract class AbstractHoodieWriteClient<T extends HoodieRecordPayload, I
|
||||
* @param instantTime Instant time of the commit
|
||||
* @param userDefinedBulkInsertPartitioner If specified then it will be used to partition input records before they are inserted
|
||||
* into hoodie.
|
||||
* @return JavaRDD[WriteStatus] - RDD of WriteStatus to inspect errors and counts
|
||||
* @return Collection of WriteStatus to inspect errors and counts
|
||||
*/
|
||||
public abstract O bulkInsert(I records, final String instantTime,
|
||||
Option<BulkInsertPartitioner<I>> userDefinedBulkInsertPartitioner);
|
||||
@@ -343,7 +343,7 @@ public abstract class AbstractHoodieWriteClient<T extends HoodieRecordPayload, I
|
||||
* @param instantTime Instant time of the commit
|
||||
* @param bulkInsertPartitioner If specified then it will be used to partition input records before they are inserted
|
||||
* into hoodie.
|
||||
* @return JavaRDD[WriteStatus] - RDD of WriteStatus to inspect errors and counts
|
||||
* @return Collection of WriteStatus to inspect errors and counts
|
||||
*/
|
||||
public abstract O bulkInsertPreppedRecords(I preppedRecords, final String instantTime,
|
||||
Option<BulkInsertPartitioner<I>> bulkInsertPartitioner);
|
||||
@@ -354,7 +354,7 @@ public abstract class AbstractHoodieWriteClient<T extends HoodieRecordPayload, I
|
||||
*
|
||||
* @param keys {@link List} of {@link HoodieKey}s to be deleted
|
||||
* @param instantTime Commit time handle
|
||||
* @return JavaRDD[WriteStatus] - RDD of WriteStatus to inspect errors and counts
|
||||
* @return Collection of WriteStatus to inspect errors and counts
|
||||
*/
|
||||
public abstract O delete(K keys, final String instantTime);
|
||||
|
||||
@@ -653,7 +653,7 @@ public abstract class AbstractHoodieWriteClient<T extends HoodieRecordPayload, I
|
||||
* Performs Compaction for the workload stored in instant-time.
|
||||
*
|
||||
* @param compactionInstantTime Compaction Instant Time
|
||||
* @return RDD of WriteStatus to inspect errors and counts
|
||||
* @return Collection of WriteStatus to inspect errors and counts
|
||||
*/
|
||||
public O compact(String compactionInstantTime) {
|
||||
return compact(compactionInstantTime, config.shouldAutoCommit());
|
||||
@@ -663,7 +663,7 @@ public abstract class AbstractHoodieWriteClient<T extends HoodieRecordPayload, I
|
||||
* Commit a compaction operation. Allow passing additional meta-data to be stored in commit instant file.
|
||||
*
|
||||
* @param compactionInstantTime Compaction Instant Time
|
||||
* @param writeStatuses RDD of WriteStatus to inspect errors and counts
|
||||
* @param writeStatuses Collection of WriteStatus to inspect errors and counts
|
||||
* @param extraMetadata Extra Metadata to be stored
|
||||
*/
|
||||
public abstract void commitCompaction(String compactionInstantTime, O writeStatuses,
|
||||
@@ -710,7 +710,7 @@ public abstract class AbstractHoodieWriteClient<T extends HoodieRecordPayload, I
|
||||
* Ensures compaction instant is in expected state and performs Compaction for the workload stored in instant-time.
|
||||
*
|
||||
* @param compactionInstantTime Compaction Instant Time
|
||||
* @return RDD of Write Status
|
||||
* @return Collection of Write Status
|
||||
*/
|
||||
protected abstract O compact(String compactionInstantTime, boolean shouldComplete);
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ public abstract class FullRecordBootstrapDataProvider<I> implements Serializable
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a list of input partition and files and returns a RDD representing source.
|
||||
* Generates a list of input partition and files and returns a collection representing source.
|
||||
* @param tableName Hudi Table Name
|
||||
* @param sourceBasePath Source Base Path
|
||||
* @param partitionPaths Partition Paths
|
||||
|
||||
@@ -62,7 +62,7 @@ public abstract class HoodieIndex<T extends HoodieRecordPayload, I, K, O> implem
|
||||
* TODO(vc): We may need to propagate the record as well in a WriteStatus class
|
||||
*/
|
||||
@PublicAPIMethod(maturity = ApiMaturityLevel.STABLE)
|
||||
public abstract O updateLocation(O writeStatusRDD, HoodieEngineContext context,
|
||||
public abstract O updateLocation(O writeStatuses, HoodieEngineContext context,
|
||||
HoodieTable<T, I, K, O> hoodieTable) throws HoodieIndexException;
|
||||
|
||||
/**
|
||||
|
||||
@@ -73,7 +73,7 @@ public class HoodieIndexUtils {
|
||||
public static HoodieRecord getTaggedRecord(HoodieRecord inputRecord, Option<HoodieRecordLocation> location) {
|
||||
HoodieRecord record = inputRecord;
|
||||
if (location.isPresent()) {
|
||||
// When you have a record in multiple files in the same partition, then rowKeyRecordPairRDD
|
||||
// When you have a record in multiple files in the same partition, then <row key, record> collection
|
||||
// will have 2 entries with the same exact in memory copy of the HoodieRecord and the 2
|
||||
// separate filenames that the record is found in. This will result in setting
|
||||
// currentLocation 2 times and it will fail the second time. So creating a new in memory
|
||||
|
||||
@@ -35,7 +35,7 @@ public interface BulkInsertPartitioner<I> {
|
||||
I repartitionRecords(I records, int outputSparkPartitions);
|
||||
|
||||
/**
|
||||
* @return {@code true} if the records within a RDD partition are sorted; {@code false} otherwise.
|
||||
* @return {@code true} if the records within a partition are sorted; {@code false} otherwise.
|
||||
*/
|
||||
boolean arePartitionRecordsSorted();
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ import java.time.Instant;
|
||||
public abstract class AbstractWriteHelper<T extends HoodieRecordPayload, I, K, O, R> {
|
||||
|
||||
public HoodieWriteMetadata<O> write(String instantTime,
|
||||
I inputRecordsRDD,
|
||||
I inputRecords,
|
||||
HoodieEngineContext context,
|
||||
HoodieTable<T, I, K, O> table,
|
||||
boolean shouldCombine,
|
||||
@@ -42,7 +42,7 @@ public abstract class AbstractWriteHelper<T extends HoodieRecordPayload, I, K, O
|
||||
try {
|
||||
// De-dupe/merge if needed
|
||||
I dedupedRecords =
|
||||
combineOnCondition(shouldCombine, inputRecordsRDD, shuffleParallelism, table);
|
||||
combineOnCondition(shouldCombine, inputRecords, shuffleParallelism, table);
|
||||
|
||||
Instant lookupBegin = Instant.now();
|
||||
I taggedRecords = dedupedRecords;
|
||||
@@ -79,7 +79,7 @@ public abstract class AbstractWriteHelper<T extends HoodieRecordPayload, I, K, O
|
||||
*
|
||||
* @param records hoodieRecords to deduplicate
|
||||
* @param parallelism parallelism or partitions to be used while reducing/deduplicating
|
||||
* @return RDD of HoodieRecord already be deduplicated
|
||||
* @return Collection of HoodieRecord already be deduplicated
|
||||
*/
|
||||
public I deduplicateRecords(
|
||||
I records, HoodieTable<T, I, K, O> table, int parallelism) {
|
||||
|
||||
Reference in New Issue
Block a user