1
0

[HUDI-379] Refactor the codes based on new JavadocStyle code style rule (#1079)

This commit is contained in:
lamber-ken
2019-12-06 12:59:28 +08:00
committed by leesf
parent c06d89b648
commit 2745b7552f
137 changed files with 434 additions and 433 deletions

View File

@@ -61,7 +61,7 @@ import java.util.stream.Collectors;
import static org.apache.hudi.common.table.HoodieTimeline.COMPACTION_ACTION;
/**
* Client to perform admin operations related to compaction
* Client to perform admin operations related to compaction.
*/
public class CompactionAdminClient extends AbstractHoodieClient {
@@ -214,7 +214,7 @@ public class CompactionAdminClient extends AbstractHoodieClient {
}
/**
* Construction Compaction Plan from compaction instant
* Construction Compaction Plan from compaction instant.
*/
private static HoodieCompactionPlan getCompactionPlan(HoodieTableMetaClient metaClient, String compactionInstant)
throws IOException {
@@ -273,7 +273,7 @@ public class CompactionAdminClient extends AbstractHoodieClient {
}
/**
* Check if a compaction operation is valid
* Check if a compaction operation is valid.
*
* @param metaClient Hoodie Table Meta client
* @param compactionInstant Compaction Instant
@@ -342,7 +342,7 @@ public class CompactionAdminClient extends AbstractHoodieClient {
}
/**
* Execute Renaming operation
* Execute Renaming operation.
*
* @param metaClient HoodieTable MetaClient
* @param renameActions List of rename operations
@@ -484,7 +484,7 @@ public class CompactionAdminClient extends AbstractHoodieClient {
}
/**
* Holds Operation result for Renaming
* Holds Operation result for Renaming.
*/
public static class RenameOpResult extends OperationResult<RenameInfo> {
@@ -505,7 +505,7 @@ public class CompactionAdminClient extends AbstractHoodieClient {
}
/**
* Holds Operation result for Renaming
* Holds Operation result for Renaming.
*/
public static class ValidationOpResult extends OperationResult<CompactionOperation> {

View File

@@ -103,7 +103,7 @@ public class HoodieCleanClient<T extends HoodieRecordPayload> extends AbstractHo
}
/**
* Creates a Cleaner plan if there are files to be cleaned and stores them in instant file
* Creates a Cleaner plan if there are files to be cleaned and stores them in instant file.
*
* @param startCleanTime Cleaner Instant Time
* @return Cleaner Plan if generated
@@ -133,7 +133,7 @@ public class HoodieCleanClient<T extends HoodieRecordPayload> extends AbstractHo
}
/**
* Executes the Cleaner plan stored in the instant metadata
* Executes the Cleaner plan stored in the instant metadata.
*
* @param table Hoodie Table
* @param cleanInstantTs Cleaner Instant Timestamp

View File

@@ -145,7 +145,7 @@ public class HoodieReadClient<T extends HoodieRecordPayload> extends AbstractHoo
}
/**
* Given a bunch of hoodie keys, fetches all the individual records out as a data frame
* Given a bunch of hoodie keys, fetches all the individual records out as a data frame.
*
* @return a dataframe
*/

View File

@@ -159,7 +159,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
}
/**
* Upserts a bunch of new records into the Hoodie table, at the supplied commitTime
* Upserts a bunch of new records into the Hoodie table, at the supplied commitTime.
*/
public JavaRDD<WriteStatus> upsert(JavaRDD<HoodieRecord<T>> records, final String commitTime) {
HoodieTable<T> table = getTableAndInitCtx(OperationType.UPSERT);
@@ -505,14 +505,14 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
}
/**
* Commit changes performed at the given commitTime marker
* Commit changes performed at the given commitTime marker.
*/
public boolean commit(String commitTime, JavaRDD<WriteStatus> writeStatuses) {
return commit(commitTime, writeStatuses, Option.empty());
}
/**
* Commit changes performed at the given commitTime marker
* Commit changes performed at the given commitTime marker.
*/
public boolean commit(String commitTime, JavaRDD<WriteStatus> writeStatuses,
Option<Map<String, String>> extraMetadata) {
@@ -988,7 +988,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
}
/**
* Provides a new commit time for a write operation (insert/update)
* Provides a new commit time for a write operation (insert/update).
*/
public String startCommit() {
// NOTE : Need to ensure that rollback is done before a new commit is started
@@ -1027,7 +1027,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
}
/**
* Schedules a new compaction instant
* Schedules a new compaction instant.
*/
public Option<String> scheduleCompaction(Option<Map<String, String>> extraMetadata) throws IOException {
String instantTime = HoodieActiveTimeline.createNewCommitTime();
@@ -1037,7 +1037,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
}
/**
* Schedules a new compaction instant with passed-in instant time
* Schedules a new compaction instant with passed-in instant time.
*
* @param instantTime Compaction Instant Time
* @param extraMetadata Extra Metadata to be stored
@@ -1074,7 +1074,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
}
/**
* Performs Compaction for the workload stored in instant-time
* Performs Compaction for the workload stored in instant-time.
*
* @param compactionInstantTime Compaction Instant Time
*/
@@ -1141,7 +1141,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
}
/**
* Cleanup all inflight commits
* Cleanup all inflight commits.
*/
private void rollbackInflightCommits() {
HoodieTable<T> table = HoodieTable.getHoodieTable(createMetaClient(true), config, jsc);
@@ -1197,7 +1197,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
*/
/**
* Ensures compaction instant is in expected state and performs Compaction for the workload stored in instant-time
* Ensures compaction instant is in expected state and performs Compaction for the workload stored in instant-time.
*
* @param compactionInstantTime Compaction Instant Time
*/
@@ -1226,7 +1226,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
}
/**
* Perform compaction operations as specified in the compaction commit file
* Perform compaction operations as specified in the compaction commit file.
*
* @param compactionInstant Compacton Instant time
* @param activeTimeline Active Timeline
@@ -1254,7 +1254,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
}
/**
* Commit Compaction and track metrics
* Commit Compaction and track metrics.
*
* @param compactedStatuses Compaction Write status
* @param table Hoodie Table
@@ -1404,7 +1404,7 @@ public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHo
}
/**
* Refers to different operation types
* Refers to different operation types.
*/
enum OperationType {
INSERT,

View File

@@ -33,7 +33,7 @@ import org.apache.spark.SparkConf;
import java.io.IOException;
/**
* Timeline Service that runs as part of write client
* Timeline Service that runs as part of write client.
*/
public class EmbeddedTimelineService {
@@ -86,7 +86,7 @@ public class EmbeddedTimelineService {
}
/**
* Retrieves proper view storage configs for remote clients to access this service
* Retrieves proper view storage configs for remote clients to access this service.
*/
public FileSystemViewStorageConfig getRemoteFileSystemViewConfig() {
return FileSystemViewStorageConfig.newBuilder().withStorageType(FileSystemViewStorageType.REMOTE_FIRST)

View File

@@ -26,7 +26,7 @@ import org.apache.spark.api.java.JavaSparkContext;
public class ClientUtils {
/**
* Create Consistency Aware MetaClient
* Create Consistency Aware MetaClient.
*
* @param jsc JavaSparkContext
* @param config HoodieWriteConfig

View File

@@ -33,7 +33,7 @@ import java.io.IOException;
import java.util.Properties;
/**
* Compaction related config
* Compaction related config.
*/
@Immutable
public class HoodieCompactionConfig extends DefaultHoodieConfig {
@@ -55,8 +55,8 @@ public class HoodieCompactionConfig extends DefaultHoodieConfig {
// By default, treat any file <= 100MB as a small file.
public static final String DEFAULT_PARQUET_SMALL_FILE_LIMIT_BYTES = String.valueOf(104857600);
/**
* Configs related to specific table types
**/
* Configs related to specific table types.
*/
// Number of inserts, that will be put each partition/bucket for writing
public static final String COPY_ON_WRITE_TABLE_INSERT_SPLIT_SIZE = "hoodie.copyonwrite.insert" + ".split.size";
// The rationale to pick the insert parallelism is the following. Writing out 100MB files,

View File

@@ -34,17 +34,17 @@ public class HoodieHBaseIndexConfig extends DefaultHoodieConfig {
public static final String HBASE_ZK_ZNODEPARENT = "hoodie.index.hbase.zknode.path";
/**
* Note that if HBASE_PUT_BATCH_SIZE_AUTO_COMPUTE_PROP is set to true, this batch size will not be honored for HBase
* Puts
* Puts.
*/
public static final String HBASE_PUT_BATCH_SIZE_PROP = "hoodie.index.hbase.put.batch.size";
/**
* Property to set which implementation of HBase QPS resource allocator to be used
* Property to set which implementation of HBase QPS resource allocator to be used.
*/
public static final String HBASE_INDEX_QPS_ALLOCATOR_CLASS = "hoodie.index.hbase.qps.allocator.class";
public static final String DEFAULT_HBASE_INDEX_QPS_ALLOCATOR_CLASS = DefaultHBaseQPSResourceAllocator.class.getName();
/**
* Property to set to enable auto computation of put batch size
* Property to set to enable auto computation of put batch size.
*/
public static final String HBASE_PUT_BATCH_SIZE_AUTO_COMPUTE_PROP = "hoodie.index.hbase.put.batch.size.autocompute";
public static final String DEFAULT_HBASE_PUT_BATCH_SIZE_AUTO_COMPUTE = "false";
@@ -62,7 +62,7 @@ public class HoodieHBaseIndexConfig extends DefaultHoodieConfig {
*/
public static String HBASE_MAX_QPS_PER_REGION_SERVER_PROP = "hoodie.index.hbase.max.qps.per.region.server";
/**
* Default batch size, used only for Get, but computed for Put
* Default batch size, used only for Get, but computed for Put.
*/
public static final int DEFAULT_HBASE_BATCH_SIZE = 100;
/**
@@ -70,17 +70,17 @@ public class HoodieHBaseIndexConfig extends DefaultHoodieConfig {
*/
public static final int DEFAULT_HBASE_MAX_QPS_PER_REGION_SERVER = 1000;
/**
* Default is 50%, which means a total of 2 jobs can run using HbaseIndex without overwhelming Region Servers
* Default is 50%, which means a total of 2 jobs can run using HbaseIndex without overwhelming Region Servers.
*/
public static final float DEFAULT_HBASE_QPS_FRACTION = 0.5f;
/**
* Property to decide if HBASE_QPS_FRACTION_PROP is dynamically calculated based on volume
* Property to decide if HBASE_QPS_FRACTION_PROP is dynamically calculated based on volume.
*/
public static final String HOODIE_INDEX_COMPUTE_QPS_DYNAMICALLY = "hoodie.index.hbase.dynamic_qps";
public static final boolean DEFAULT_HOODIE_INDEX_COMPUTE_QPS_DYNAMICALLY = false;
/**
* Min and Max for HBASE_QPS_FRACTION_PROP to stabilize skewed volume workloads
* Min and Max for HBASE_QPS_FRACTION_PROP to stabilize skewed volume workloads.
*/
public static final String HBASE_MIN_QPS_FRACTION_PROP = "hoodie.index.hbase.min.qps.fraction";
public static final String DEFAULT_HBASE_MIN_QPS_FRACTION_PROP = "0.002";
@@ -88,7 +88,7 @@ public class HoodieHBaseIndexConfig extends DefaultHoodieConfig {
public static final String HBASE_MAX_QPS_FRACTION_PROP = "hoodie.index.hbase.max.qps.fraction";
public static final String DEFAULT_HBASE_MAX_QPS_FRACTION_PROP = "0.06";
/**
* Hoodie index desired puts operation time in seconds
* Hoodie index desired puts operation time in seconds.
*/
public static final String HOODIE_INDEX_DESIRED_PUTS_TIME_IN_SECS = "hoodie.index.hbase.desired_puts_time_in_secs";
public static final int DEFAULT_HOODIE_INDEX_DESIRED_PUTS_TIME_IN_SECS = 600;

View File

@@ -29,7 +29,7 @@ import java.util.Properties;
/**
* Indexing related config
* Indexing related config.
*/
@Immutable
public class HoodieIndexConfig extends DefaultHoodieConfig {

View File

@@ -29,7 +29,7 @@ import java.io.IOException;
import java.util.Properties;
/**
* Memory related config
* Memory related config.
*/
@Immutable
public class HoodieMemoryConfig extends DefaultHoodieConfig {

View File

@@ -26,7 +26,7 @@ import java.io.IOException;
import java.util.Properties;
/**
* Storage related config
* Storage related config.
*/
@Immutable
public class HoodieStorageConfig extends DefaultHoodieConfig {

View File

@@ -42,7 +42,7 @@ import java.util.Map;
import java.util.Properties;
/**
* Class storing configs for the {@link HoodieWriteClient}
* Class storing configs for the {@link HoodieWriteClient}.
*/
@Immutable
public class HoodieWriteConfig extends DefaultHoodieConfig {
@@ -115,8 +115,8 @@ public class HoodieWriteConfig extends DefaultHoodieConfig {
}
/**
* base properties
**/
* base properties.
*/
public String getBasePath() {
return props.getProperty(BASE_PATH_PROP);
}
@@ -210,8 +210,8 @@ public class HoodieWriteConfig extends DefaultHoodieConfig {
}
/**
* compaction properties
**/
* compaction properties.
*/
public HoodieCleaningPolicy getCleanerPolicy() {
return HoodieCleaningPolicy.valueOf(props.getProperty(HoodieCompactionConfig.CLEANER_POLICY_PROP));
}
@@ -297,8 +297,8 @@ public class HoodieWriteConfig extends DefaultHoodieConfig {
}
/**
* index properties
**/
* index properties.
*/
public HoodieIndex.IndexType getIndexType() {
return HoodieIndex.IndexType.valueOf(props.getProperty(HoodieIndexConfig.INDEX_TYPE_PROP));
}
@@ -417,8 +417,8 @@ public class HoodieWriteConfig extends DefaultHoodieConfig {
}
/**
* storage properties
**/
* storage properties.
*/
public long getParquetMaxFileSize() {
return Long.parseLong(props.getProperty(HoodieStorageConfig.PARQUET_FILE_MAX_BYTES));
}
@@ -452,8 +452,8 @@ public class HoodieWriteConfig extends DefaultHoodieConfig {
}
/**
* metrics properties
**/
* metrics properties.
*/
public boolean isMetricsOn() {
return Boolean.parseBoolean(props.getProperty(HoodieMetricsConfig.METRICS_ON));
}
@@ -483,7 +483,7 @@ public class HoodieWriteConfig extends DefaultHoodieConfig {
}
/**
* memory configs
* memory configs.
*/
public Double getMaxMemoryFractionPerPartitionMerge() {
return Double.valueOf(props.getProperty(HoodieMemoryConfig.MAX_MEMORY_FRACTION_FOR_MERGE_PROP));

View File

@@ -20,7 +20,7 @@ package org.apache.hudi.exception;
/**
* <p>
* Exception thrown for any higher level errors when <code>HoodieClient</code> is doing a delta commit
* Exception thrown for any higher level errors when <code>HoodieClient</code> is doing a delta commit.
* </p>
*/
public class HoodieAppendException extends HoodieException {

View File

@@ -20,7 +20,7 @@ package org.apache.hudi.exception;
/**
* <p>
* Exception thrown for any higher level errors when <code>HoodieClient</code> is doing a Commit
* Exception thrown for any higher level errors when <code>HoodieClient</code> is doing a Commit.
* </p>
*/
public class HoodieCommitException extends HoodieException {

View File

@@ -20,7 +20,7 @@ package org.apache.hudi.exception;
/**
* <p>
* Exception thrown when dependent system is not available
* Exception thrown when dependent system is not available.
* </p>
*/
public class HoodieDependentSystemUnavailableException extends HoodieException {

View File

@@ -20,7 +20,7 @@ package org.apache.hudi.exception;
/**
* <p>
* Exception thrown for any higher level errors when <code>HoodieClient</code> is doing a bulk insert
* Exception thrown for any higher level errors when <code>HoodieClient</code> is doing a bulk insert.
* </p>
*/
public class HoodieInsertException extends HoodieException {

View File

@@ -20,7 +20,7 @@ package org.apache.hudi.exception;
/**
* <p>
* Exception thrown for any higher level errors when <code>HoodieClient</code> is doing a incremental upsert
* Exception thrown for any higher level errors when <code>HoodieClient</code> is doing a incremental upsert.
* </p>
*/
public class HoodieUpsertException extends HoodieException {

View File

@@ -30,7 +30,7 @@ import java.util.Iterator;
import java.util.List;
/**
* Map function that handles a sorted stream of HoodieRecords
* Map function that handles a sorted stream of HoodieRecords.
*/
public class BulkInsertMapFunction<T extends HoodieRecordPayload>
implements Function2<Integer, Iterator<HoodieRecord<T>>, Iterator<List<WriteStatus>>> {

View File

@@ -122,7 +122,7 @@ public class CopyOnWriteLazyInsertIterable<T extends HoodieRecordPayload>
}
/**
* Consumes stream of hoodie records from in-memory queue and writes to one or more create-handles
* Consumes stream of hoodie records from in-memory queue and writes to one or more create-handles.
*/
protected class CopyOnWriteInsertHandler
extends BoundedInMemoryQueueConsumer<HoodieInsertValueGenResult<HoodieRecord>, List<WriteStatus>> {

View File

@@ -43,7 +43,7 @@ public abstract class LazyIterableIterator<I, O> implements Iterable<O>, Iterato
}
/**
* Called once, before any elements are processed
* Called once, before any elements are processed.
*/
protected abstract void start();

View File

@@ -39,7 +39,7 @@ import org.apache.spark.api.java.JavaSparkContext;
import java.io.Serializable;
/**
* Base class for different types of indexes to determine the mapping from uuid
* Base class for different types of indexes to determine the mapping from uuid.
*/
public abstract class HoodieIndex<T extends HoodieRecordPayload> implements Serializable {

View File

@@ -99,7 +99,7 @@ public class InMemoryHashIndex<T extends HoodieRecordPayload> extends HoodieInde
}
/**
* Only looks up by recordKey
* Only looks up by recordKey.
*/
@Override
public boolean isGlobal() {

View File

@@ -23,7 +23,7 @@ import com.google.common.base.Objects;
import java.io.Serializable;
/**
* Metadata about a given file group, useful for index lookup
* Metadata about a given file group, useful for index lookup.
*/
public class BloomIndexFileInfo implements Serializable {
@@ -62,7 +62,7 @@ public class BloomIndexFileInfo implements Serializable {
}
/**
* Does the given key fall within the range (inclusive)
* Does the given key fall within the range (inclusive).
*/
public boolean isKeyInRange(String recordKey) {
return minRecordKey.compareTo(recordKey) <= 0 && maxRecordKey.compareTo(recordKey) >= 0;

View File

@@ -141,7 +141,7 @@ public class HoodieBloomIndex<T extends HoodieRecordPayload> extends HoodieIndex
/**
* Lookup the location for each record key and return the pair<record_key,location> for all record keys already
* present and drop the record keys if not present
* present and drop the record keys if not present.
*/
private JavaPairRDD<HoodieKey, HoodieRecordLocation> lookupIndex(
JavaPairRDD<String, String> partitionRecordKeyPairRDD, final JavaSparkContext jsc,
@@ -167,7 +167,7 @@ public class HoodieBloomIndex<T extends HoodieRecordPayload> extends HoodieIndex
}
/**
* Compute the estimated number of bloom filter comparisons to be performed on each file group
* Compute the estimated number of bloom filter comparisons to be performed on each file group.
*/
private Map<String, Long> computeComparisonsPerFileGroup(final Map<String, Long> recordsPerPartition,
final Map<String, List<BloomIndexFileInfo>> partitionToFileInfo,
@@ -278,7 +278,7 @@ public class HoodieBloomIndex<T extends HoodieRecordPayload> extends HoodieIndex
}
/**
* This is not global, since we depend on the partitionPath to do the lookup
* This is not global, since we depend on the partitionPath to do the lookup.
*/
@Override
public boolean isGlobal() {

View File

@@ -37,7 +37,7 @@ import java.util.List;
import scala.Tuple2;
/**
* Function performing actual checking of RDD partition containing (fileId, hoodieKeys) against the actual files
* Function performing actual checking of RDD partition containing (fileId, hoodieKeys) against the actual files.
*/
public class HoodieBloomIndexCheckFunction
implements Function2<Integer, Iterator<Tuple2<String, HoodieKey>>, Iterator<List<KeyLookupResult>>> {

View File

@@ -106,7 +106,7 @@ public class HoodieGlobalBloomIndex<T extends HoodieRecordPayload> extends Hoodi
/**
* Tagging for global index should only consider the record key
* Tagging for global index should only consider the record key.
*/
@Override
protected JavaRDD<HoodieRecord<T>> tagLocationBacktoRecords(

View File

@@ -36,7 +36,7 @@ class IntervalTreeBasedGlobalIndexFileFilter implements IndexFileFilter {
private final Set<String> filesWithNoRanges = new HashSet<>();
/**
* Instantiates {@link IntervalTreeBasedGlobalIndexFileFilter}
* Instantiates {@link IntervalTreeBasedGlobalIndexFileFilter}.
*
* @param partitionToFileIndexInfo Map of partition to List of {@link BloomIndexFileInfo}s
*/

View File

@@ -35,7 +35,7 @@ class IntervalTreeBasedIndexFileFilter implements IndexFileFilter {
private final Map<String, Set<String>> partitionToFilesWithNoRanges = new HashMap<>();
/**
* Instantiates {@link IntervalTreeBasedIndexFileFilter}
* Instantiates {@link IntervalTreeBasedIndexFileFilter}.
*
* @param partitionToFileIndexInfo Map of partition to List of {@link BloomIndexFileInfo}s
*/

View File

@@ -21,7 +21,7 @@ package org.apache.hudi.index.bloom;
import java.util.List;
/**
* Encapsulates the result from a key lookup
* Encapsulates the result from a key lookup.
*/
public class KeyLookupResult {

View File

@@ -39,7 +39,7 @@ class KeyRangeNode implements Comparable<KeyRangeNode>, Serializable {
private KeyRangeNode right = null;
/**
* Instantiates a new {@link KeyRangeNode}
* Instantiates a new {@link KeyRangeNode}.
*
* @param minRecordKey min record key of the index file
* @param maxRecordKey max record key of the index file

View File

@@ -26,7 +26,7 @@ import java.util.Set;
class ListBasedGlobalIndexFileFilter extends ListBasedIndexFileFilter {
/**
* Instantiates {@link ListBasedGlobalIndexFileFilter}
* Instantiates {@link ListBasedGlobalIndexFileFilter}.
*
* @param partitionToFileIndexInfo Map of partition to List of {@link BloomIndexFileInfo}
*/

View File

@@ -32,7 +32,7 @@ class ListBasedIndexFileFilter implements IndexFileFilter {
final Map<String, List<BloomIndexFileInfo>> partitionToFileIndexInfo;
/**
* Instantiates {@link ListBasedIndexFileFilter}
* Instantiates {@link ListBasedIndexFileFilter}.
*
* @param partitionToFileIndexInfo Map of partition to List of {@link BloomIndexFileInfo}
*/

View File

@@ -67,7 +67,7 @@ import java.util.List;
import scala.Tuple2;
/**
* Hoodie Index implementation backed by HBase
* Hoodie Index implementation backed by HBase.
*/
public class HBaseIndex<T extends HoodieRecordPayload> extends HoodieIndex<T> {
@@ -89,7 +89,7 @@ public class HBaseIndex<T extends HoodieRecordPayload> extends HoodieIndex<T> {
private int maxQpsPerRegionServer;
/**
* multiPutBatchSize will be computed and re-set in updateLocation if
* {@link HoodieIndexConfig.HBASE_PUT_BATCH_SIZE_AUTO_COMPUTE_PROP} is set to true
* {@link HoodieIndexConfig.HBASE_PUT_BATCH_SIZE_AUTO_COMPUTE_PROP} is set to true.
*/
private Integer multiPutBatchSize;
private Integer numRegionServersForTable;
@@ -150,7 +150,7 @@ public class HBaseIndex<T extends HoodieRecordPayload> extends HoodieIndex<T> {
/**
* Since we are sharing the HbaseConnection across tasks in a JVM, make sure the HbaseConnectio is closed when JVM
* exits
* exits.
*/
private void addShutDownHook() {
Runtime.getRuntime().addShutdownHook(new Thread() {
@@ -342,7 +342,7 @@ public class HBaseIndex<T extends HoodieRecordPayload> extends HoodieIndex<T> {
}
/**
* Helper method to facilitate performing puts and deletes in Hbase
* Helper method to facilitate performing puts and deletes in Hbase.
*/
private void doPutsAndDeletes(HTable hTable, List<Put> puts, List<Delete> deletes) throws IOException {
if (puts.size() > 0) {
@@ -500,7 +500,7 @@ public class HBaseIndex<T extends HoodieRecordPayload> extends HoodieIndex<T> {
}
/**
* Only looks up by recordKey
* Only looks up by recordKey.
*/
@Override
public boolean isGlobal() {

View File

@@ -21,7 +21,7 @@ package org.apache.hudi.index.hbase;
import java.io.Serializable;
/**
* <code>HBaseIndexQPSResourceAllocator</code> defines methods to manage resource allocation for HBase index operations
* <code>HBaseIndexQPSResourceAllocator</code> defines methods to manage resource allocation for HBase index operations.
*/
public interface HBaseIndexQPSResourceAllocator extends Serializable {
@@ -45,7 +45,7 @@ public interface HBaseIndexQPSResourceAllocator extends Serializable {
float acquireQPSResources(final float desiredQPSFraction, final long numPuts);
/**
* This method releases the acquired QPS Fraction
* This method releases the acquired QPS Fraction.
*/
void releaseQPSResources();
}

View File

@@ -301,7 +301,7 @@ public class HoodieAppendHandle<T extends HoodieRecordPayload> extends HoodieWri
}
/**
* Checks if the number of records have reached the set threshold and then flushes the records to disk
* Checks if the number of records have reached the set threshold and then flushes the records to disk.
*/
private void flushToDiskIfRequired(HoodieRecord record) {
// Append if max number of records reached to achieve block size

View File

@@ -51,7 +51,7 @@ import java.util.Map;
import java.util.stream.Collectors;
/**
* Cleaner is responsible for garbage collecting older files in a given partition path, such that
* Cleaner is responsible for garbage collecting older files in a given partition path. Such that
* <p>
* 1) It provides sufficient time for existing queries running on older versions, to close
* <p>
@@ -83,7 +83,8 @@ public class HoodieCleanHelper<T extends HoodieRecordPayload<T>> implements Seri
}
/**
* Returns list of partitions where clean operations needs to be performed
* Returns list of partitions where clean operations needs to be performed.
*
* @param newInstantToRetain New instant to be retained after this cleanup operation
* @return list of partitions to scan for cleaning
* @throws IOException when underlying file-system throws this exception
@@ -294,7 +295,7 @@ public class HoodieCleanHelper<T extends HoodieRecordPayload<T>> implements Seri
}
/**
* Determine if file slice needed to be preserved for pending compaction
* Determine if file slice needed to be preserved for pending compaction.
*
* @param fileSlice File Slice
* @return true if file slice needs to be preserved, false otherwise.

View File

@@ -64,7 +64,7 @@ import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* Archiver to bound the growth of <action>.commit files
* Archiver to bound the growth of <action>.commit files.
*/
public class HoodieCommitArchiveLog {
@@ -201,7 +201,7 @@ public class HoodieCommitArchiveLog {
}
/**
* Remove older instants from auxiliary meta folder
* Remove older instants from auxiliary meta folder.
*
* @param thresholdInstant Hoodie Instant
* @return success if all eligible file deleted successfully

View File

@@ -77,7 +77,7 @@ public class HoodieCreateHandle<T extends HoodieRecordPayload> extends HoodieWri
}
/**
* Called by the compactor code path
* Called by the compactor code path.
*/
public HoodieCreateHandle(HoodieWriteConfig config, String commitTime, HoodieTable<T> hoodieTable,
String partitionPath, String fileId, Iterator<HoodieRecord<T>> recordIterator) {
@@ -124,7 +124,7 @@ public class HoodieCreateHandle<T extends HoodieRecordPayload> extends HoodieWri
}
/**
* Writes all records passed
* Writes all records passed.
*/
public void write() {
try {
@@ -147,7 +147,7 @@ public class HoodieCreateHandle<T extends HoodieRecordPayload> extends HoodieWri
}
/**
* Performs actions to durably, persist the current changes and returns a WriteStatus object
* Performs actions to durably, persist the current changes and returns a WriteStatus object.
*/
@Override
public WriteStatus close() {

View File

@@ -124,7 +124,7 @@ public class HoodieKeyLookupHandle<T extends HoodieRecordPayload> extends Hoodie
}
/**
* Encapsulates the result from a key lookup
* Encapsulates the result from a key lookup.
*/
public static class KeyLookupResult {

View File

@@ -77,7 +77,7 @@ public class HoodieMergeHandle<T extends HoodieRecordPayload> extends HoodieWrit
}
/**
* Called by compactor code path
* Called by compactor code path.
*/
public HoodieMergeHandle(HoodieWriteConfig config, String commitTime, HoodieTable<T> hoodieTable,
Map<String, HoodieRecord<T>> keyToNewRecords, String fileId, HoodieDataFile dataFileToBeMerged) {
@@ -108,7 +108,7 @@ public class HoodieMergeHandle<T extends HoodieRecordPayload> extends HoodieWrit
}
/**
* Determines whether we can accept the incoming records, into the current file, depending on
* Determines whether we can accept the incoming records, into the current file. Depending on
* <p>
* - Whether it belongs to the same partitionPath as existing records - Whether the current file written bytes lt max
* file size
@@ -139,14 +139,14 @@ public class HoodieMergeHandle<T extends HoodieRecordPayload> extends HoodieWrit
}
/**
* Rewrite the GenericRecord with the Schema containing the Hoodie Metadata fields
* Rewrite the GenericRecord with the Schema containing the Hoodie Metadata fields.
*/
protected GenericRecord rewriteRecord(GenericRecord record) {
return HoodieAvroUtils.rewriteRecord(record, writerSchema);
}
/**
* Extract old file path, initialize StorageWriter and WriteStatus
* Extract old file path, initialize StorageWriter and WriteStatus.
*/
private void init(String fileId, String partitionPath, HoodieDataFile dataFileToBeMerged) {
logger.info("partitionPath:" + partitionPath + ", fileId to be merged:" + fileId);
@@ -189,7 +189,7 @@ public class HoodieMergeHandle<T extends HoodieRecordPayload> extends HoodieWrit
}
/**
* Load the new incoming records in a map and return partitionPath
* Load the new incoming records in a map and return partitionPath.
*/
private String init(String fileId, Iterator<HoodieRecord<T>> newRecordsItr) {
try {

View File

@@ -28,7 +28,7 @@ import org.apache.hudi.table.HoodieTable;
import org.apache.hadoop.fs.Path;
/**
* Extract range information for a given file slice
* Extract range information for a given file slice.
*/
public class HoodieRangeInfoHandle<T extends HoodieRecordPayload> extends HoodieReadHandle<T> {

View File

@@ -90,7 +90,7 @@ public abstract class HoodieWriteHandle<T extends HoodieRecordPayload> extends H
}
/**
* Creates an empty marker file corresponding to storage writer path
* Creates an empty marker file corresponding to storage writer path.
*
* @param partitionPath Partition path
*/
@@ -105,7 +105,7 @@ public abstract class HoodieWriteHandle<T extends HoodieRecordPayload> extends H
}
/**
* THe marker path will be <base-path>/.hoodie/.temp/<instant_ts>/2019/04/25/filename
* THe marker path will be <base-path>/.hoodie/.temp/<instant_ts>/2019/04/25/filename.
*/
private Path makeNewMarkerPath(String partitionPath) {
Path markerRootPath = new Path(hoodieTable.getMetaClient().getMarkerFolderPath(instantTime));
@@ -123,7 +123,7 @@ public abstract class HoodieWriteHandle<T extends HoodieRecordPayload> extends H
}
/**
* Determines whether we can accept the incoming records, into the current file, depending on
* Determines whether we can accept the incoming records, into the current file. Depending on
* <p>
* - Whether it belongs to the same partitionPath as existing records - Whether the current file written bytes lt max
* file size
@@ -154,7 +154,7 @@ public abstract class HoodieWriteHandle<T extends HoodieRecordPayload> extends H
}
/**
* Rewrite the GenericRecord with the Schema containing the Hoodie Metadata fields
* Rewrite the GenericRecord with the Schema containing the Hoodie Metadata fields.
*/
protected GenericRecord rewriteRecord(GenericRecord record) {
return HoodieAvroUtils.rewriteRecord(record, writerSchema);

View File

@@ -32,12 +32,12 @@ import java.io.Serializable;
import java.util.Set;
/**
* A HoodieCompactor runs compaction on a hoodie table
* A HoodieCompactor runs compaction on a hoodie table.
*/
public interface HoodieCompactor extends Serializable {
/**
* Generate a new compaction plan for scheduling
* Generate a new compaction plan for scheduling.
*
* @param jsc Spark Context
* @param hoodieTable Hoodie Table
@@ -51,7 +51,7 @@ public interface HoodieCompactor extends Serializable {
String compactionCommitTime, Set<HoodieFileGroupId> fgIdsInPendingCompactions) throws IOException;
/**
* Execute compaction operations and report back status
* Execute compaction operations and report back status.
*/
JavaRDD<WriteStatus> compact(JavaSparkContext jsc, HoodieCompactionPlan compactionPlan, HoodieTable hoodieTable,
HoodieWriteConfig config, String compactionInstantTime) throws IOException;

View File

@@ -28,7 +28,7 @@ import java.util.List;
/**
* CompactionStrategy which looks at total IO to be done for the compaction (read + write) and limits the list of
* compactions to be under a configured limit on the IO
* compactions to be under a configured limit on the IO.
*
* @see CompactionStrategy
*/

View File

@@ -116,7 +116,7 @@ public abstract class CompactionStrategy implements Serializable {
}
/**
* Filter the partition paths based on compaction strategy
* Filter the partition paths based on compaction strategy.
*
* @param writeConfig
* @param allPartitionPaths

View File

@@ -32,7 +32,7 @@ import java.util.stream.Collectors;
/**
* LogFileSizeBasedCompactionStrategy orders the compactions based on the total log files size and limits the
* compactions within a configured IO bound
* compactions within a configured IO bound.
*
* @see BoundedIOCompactionStrategy
* @see CompactionStrategy

View File

@@ -26,12 +26,12 @@ import java.io.Closeable;
public abstract class MetricsReporter {
/**
* Push out metrics at scheduled intervals
* Push out metrics at scheduled intervals.
*/
public abstract void start();
/**
* Deterministically push out metrics
* Deterministically push out metrics.
*/
public abstract void report();

View File

@@ -84,7 +84,7 @@ import java.util.stream.Collectors;
import scala.Tuple2;
/**
* Implementation of a very heavily read-optimized Hoodie Table where
* Implementation of a very heavily read-optimized Hoodie Table where.
* <p>
* INSERTS - Produce new files, block aligned to desired size (or) Merge with the smallest existing file, to expand it
* <p>
@@ -273,7 +273,7 @@ public class HoodieCopyOnWriteTable<T extends HoodieRecordPayload> extends Hoodi
}
/**
* Generates List of files to be cleaned
* Generates List of files to be cleaned.
*
* @param jsc JavaSparkContext
* @return Cleaner Plan
@@ -389,7 +389,7 @@ public class HoodieCopyOnWriteTable<T extends HoodieRecordPayload> extends Hoodi
}
/**
* Delete Inflight instant if enabled
* Delete Inflight instant if enabled.
*
* @param deleteInstant Enable Deletion of Inflight instant
* @param activeTimeline Hoodie active timeline
@@ -414,7 +414,7 @@ public class HoodieCopyOnWriteTable<T extends HoodieRecordPayload> extends Hoodi
}
/**
* Consumer that dequeues records from queue and sends to Merge Handle
* Consumer that dequeues records from queue and sends to Merge Handle.
*/
private static class UpdateHandler extends BoundedInMemoryQueueConsumer<GenericRecord, Void> {
@@ -474,7 +474,7 @@ public class HoodieCopyOnWriteTable<T extends HoodieRecordPayload> extends Hoodi
}
/**
* Helper class for a small file's location and its actual size on disk
* Helper class for a small file's location and its actual size on disk.
*/
static class SmallFile implements Serializable {
@@ -493,7 +493,7 @@ public class HoodieCopyOnWriteTable<T extends HoodieRecordPayload> extends Hoodi
/**
* Helper class for an insert bucket along with the weight [0.0, 0.1] that defines the amount of incoming inserts that
* should be allocated to the bucket
* should be allocated to the bucket.
*/
class InsertBucket implements Serializable {
@@ -512,7 +512,7 @@ public class HoodieCopyOnWriteTable<T extends HoodieRecordPayload> extends Hoodi
}
/**
* Helper class for a bucket's type (INSERT and UPDATE) and its file location
* Helper class for a bucket's type (INSERT and UPDATE) and its file location.
*/
class BucketInfo implements Serializable {
@@ -530,16 +530,16 @@ public class HoodieCopyOnWriteTable<T extends HoodieRecordPayload> extends Hoodi
}
/**
* Packs incoming records to be upserted, into buckets (1 bucket = 1 RDD partition)
* Packs incoming records to be upserted, into buckets (1 bucket = 1 RDD partition).
*/
class UpsertPartitioner extends Partitioner {
/**
* List of all small files to be corrected
* List of all small files to be corrected.
*/
List<SmallFile> smallFiles = new ArrayList<SmallFile>();
/**
* Total number of RDD partitions, is determined by total buckets we want to pack the incoming workload into
* Total number of RDD partitions, is determined by total buckets we want to pack the incoming workload into.
*/
private int totalBuckets = 0;
/**
@@ -560,7 +560,7 @@ public class HoodieCopyOnWriteTable<T extends HoodieRecordPayload> extends Hoodi
private HashMap<Integer, BucketInfo> bucketInfoMap;
/**
* Rolling stats for files
* Rolling stats for files.
*/
protected HoodieRollingStatMetadata rollingStatMetadata;
protected long averageRecordSize;
@@ -672,7 +672,7 @@ public class HoodieCopyOnWriteTable<T extends HoodieRecordPayload> extends Hoodi
}
/**
* Returns a list of small files in the given partition path
* Returns a list of small files in the given partition path.
*/
protected List<SmallFile> getSmallFiles(String partitionPath) {

View File

@@ -201,7 +201,7 @@ public class HoodieMergeOnReadTable<T extends HoodieRecordPayload> extends Hoodi
/**
* Generate all rollback requests that we need to perform for rolling back this action without actually performing
* rolling back
* rolling back.
*
* @param jsc JavaSparkContext
* @param instantToRollback Instant to Rollback

View File

@@ -69,7 +69,7 @@ import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* Abstract implementation of a HoodieTable
* Abstract implementation of a HoodieTable.
*/
public abstract class HoodieTable<T extends HoodieRecordPayload> implements Serializable {
@@ -111,17 +111,17 @@ public abstract class HoodieTable<T extends HoodieRecordPayload> implements Seri
}
/**
* Provides a partitioner to perform the upsert operation, based on the workload profile
* Provides a partitioner to perform the upsert operation, based on the workload profile.
*/
public abstract Partitioner getUpsertPartitioner(WorkloadProfile profile);
/**
* Provides a partitioner to perform the insert operation, based on the workload profile
* Provides a partitioner to perform the insert operation, based on the workload profile.
*/
public abstract Partitioner getInsertPartitioner(WorkloadProfile profile);
/**
* Return whether this HoodieTable implementation can benefit from workload profiling
* Return whether this HoodieTable implementation can benefit from workload profiling.
*/
public abstract boolean isWorkloadProfileNeeded();
@@ -138,84 +138,84 @@ public abstract class HoodieTable<T extends HoodieRecordPayload> implements Seri
}
/**
* Get the view of the file system for this table
* Get the view of the file system for this table.
*/
public TableFileSystemView getFileSystemView() {
return new HoodieTableFileSystemView(metaClient, getCompletedCommitsTimeline());
}
/**
* Get the read optimized view of the file system for this table
* Get the read optimized view of the file system for this table.
*/
public TableFileSystemView.ReadOptimizedView getROFileSystemView() {
return getViewManager().getFileSystemView(metaClient.getBasePath());
}
/**
* Get the real time view of the file system for this table
* Get the real time view of the file system for this table.
*/
public TableFileSystemView.RealtimeView getRTFileSystemView() {
return getViewManager().getFileSystemView(metaClient.getBasePath());
}
/**
* Get complete view of the file system for this table with ability to force sync
* Get complete view of the file system for this table with ability to force sync.
*/
public SyncableFileSystemView getHoodieView() {
return getViewManager().getFileSystemView(metaClient.getBasePath());
}
/**
* Get only the completed (no-inflights) commit + deltacommit timeline
* Get only the completed (no-inflights) commit + deltacommit timeline.
*/
public HoodieTimeline getCompletedCommitsTimeline() {
return metaClient.getCommitsTimeline().filterCompletedInstants();
}
/**
* Get only the completed (no-inflights) commit timeline
* Get only the completed (no-inflights) commit timeline.
*/
public HoodieTimeline getCompletedCommitTimeline() {
return metaClient.getCommitTimeline().filterCompletedInstants();
}
/**
* Get only the inflights (no-completed) commit timeline
* Get only the inflights (no-completed) commit timeline.
*/
public HoodieTimeline getInflightCommitTimeline() {
return metaClient.getCommitsTimeline().filterInflightsExcludingCompaction();
}
/**
* Get only the completed (no-inflights) clean timeline
* Get only the completed (no-inflights) clean timeline.
*/
public HoodieTimeline getCompletedCleanTimeline() {
return getActiveTimeline().getCleanerTimeline().filterCompletedInstants();
}
/**
* Get clean timeline
* Get clean timeline.
*/
public HoodieTimeline getCleanTimeline() {
return getActiveTimeline().getCleanerTimeline();
}
/**
* Get only the completed (no-inflights) savepoint timeline
* Get only the completed (no-inflights) savepoint timeline.
*/
public HoodieTimeline getCompletedSavepointTimeline() {
return getActiveTimeline().getSavePointTimeline().filterCompletedInstants();
}
/**
* Get the list of savepoints in this table
* Get the list of savepoints in this table.
*/
public List<String> getSavepoints() {
return getCompletedSavepointTimeline().getInstants().map(HoodieInstant::getTimestamp).collect(Collectors.toList());
}
/**
* Get the list of data file names savepointed
* Get the list of data file names savepointed.
*/
public Stream<String> getSavepointedDataFiles(String savepointTime) {
if (!getSavepoints().contains(savepointTime)) {
@@ -237,26 +237,26 @@ public abstract class HoodieTable<T extends HoodieRecordPayload> implements Seri
}
/**
* Return the index
* Return the index.
*/
public HoodieIndex<T> getIndex() {
return index;
}
/**
* Perform the ultimate IO for a given upserted (RDD) partition
* Perform the ultimate IO for a given upserted (RDD) partition.
*/
public abstract Iterator<List<WriteStatus>> handleUpsertPartition(String commitTime, Integer partition,
Iterator<HoodieRecord<T>> recordIterator, Partitioner partitioner);
/**
* Perform the ultimate IO for a given inserted (RDD) partition
* Perform the ultimate IO for a given inserted (RDD) partition.
*/
public abstract Iterator<List<WriteStatus>> handleInsertPartition(String commitTime, Integer partition,
Iterator<HoodieRecord<T>> recordIterator, Partitioner partitioner);
/**
* Schedule compaction for the instant time
* Schedule compaction for the instant time.
*
* @param jsc Spark Context
* @param instantTime Instant Time for scheduling compaction
@@ -265,7 +265,7 @@ public abstract class HoodieTable<T extends HoodieRecordPayload> implements Seri
public abstract HoodieCompactionPlan scheduleCompaction(JavaSparkContext jsc, String instantTime);
/**
* Run Compaction on the table. Compaction arranges the data so that it is optimized for data access
* Run Compaction on the table. Compaction arranges the data so that it is optimized for data access.
*
* @param jsc Spark Context
* @param compactionInstantTime Instant Time
@@ -275,7 +275,7 @@ public abstract class HoodieTable<T extends HoodieRecordPayload> implements Seri
HoodieCompactionPlan compactionPlan);
/**
* Generates list of files that are eligible for cleaning
* Generates list of files that are eligible for cleaning.
*
* @param jsc Java Spark Context
* @return Cleaner Plan containing list of files to be deleted.
@@ -283,7 +283,7 @@ public abstract class HoodieTable<T extends HoodieRecordPayload> implements Seri
public abstract HoodieCleanerPlan scheduleClean(JavaSparkContext jsc);
/**
* Cleans the files listed in the cleaner plan associated with clean instant
* Cleans the files listed in the cleaner plan associated with clean instant.
*
* @param jsc Java Spark Context
* @param cleanInstant Clean Instant
@@ -300,7 +300,7 @@ public abstract class HoodieTable<T extends HoodieRecordPayload> implements Seri
throws IOException;
/**
* Finalize the written data onto storage. Perform any final cleanups
* Finalize the written data onto storage. Perform any final cleanups.
*
* @param jsc Spark Context
* @param stats List of HoodieWriteStats
@@ -312,7 +312,7 @@ public abstract class HoodieTable<T extends HoodieRecordPayload> implements Seri
}
/**
* Delete Marker directory corresponding to an instant
* Delete Marker directory corresponding to an instant.
*
* @param instantTs Instant Time
*/
@@ -409,7 +409,7 @@ public abstract class HoodieTable<T extends HoodieRecordPayload> implements Seri
}
/**
* Ensures all files passed either appear or disappear
* Ensures all files passed either appear or disappear.
*
* @param jsc JavaSparkContext
* @param groupByPartition Files grouped by partition

View File

@@ -51,7 +51,7 @@ import java.util.Map;
import scala.Tuple2;
/**
* Performs Rollback of Hoodie Tables
* Performs Rollback of Hoodie Tables.
*/
public class RollbackExecutor implements Serializable {
@@ -143,7 +143,7 @@ public class RollbackExecutor implements Serializable {
}
/**
* Helper to merge 2 rollback-stats for a given partition
* Helper to merge 2 rollback-stats for a given partition.
*
* @param stat1 HoodieRollbackStat
* @param stat2 HoodieRollbackStat
@@ -177,7 +177,7 @@ public class RollbackExecutor implements Serializable {
}
/**
* Common method used for cleaning out parquet files under a partition path during rollback of a set of commits
* Common method used for cleaning out parquet files under a partition path during rollback of a set of commits.
*/
private Map<FileStatus, Boolean> deleteCleanedFiles(HoodieTableMetaClient metaClient, HoodieWriteConfig config,
Map<FileStatus, Boolean> results, String partitionPath, PathFilter filter) throws IOException {
@@ -193,7 +193,7 @@ public class RollbackExecutor implements Serializable {
}
/**
* Common method used for cleaning out parquet files under a partition path during rollback of a set of commits
* Common method used for cleaning out parquet files under a partition path during rollback of a set of commits.
*/
private Map<FileStatus, Boolean> deleteCleanedFiles(HoodieTableMetaClient metaClient, HoodieWriteConfig config,
Map<FileStatus, Boolean> results, String commit, String partitionPath) throws IOException {

View File

@@ -22,39 +22,39 @@ import org.apache.hudi.common.table.timeline.HoodieInstant;
import org.apache.hudi.common.util.Option;
/**
* Request for performing one rollback action
* Request for performing one rollback action.
*/
public class RollbackRequest {
/**
* Rollback Action Types
* Rollback Action Types.
*/
public enum RollbackAction {
DELETE_DATA_FILES_ONLY, DELETE_DATA_AND_LOG_FILES, APPEND_ROLLBACK_BLOCK
}
/**
* Partition path that needs to be rolled-back
* Partition path that needs to be rolled-back.
*/
private final String partitionPath;
/**
* Rollback Instant
* Rollback Instant.
*/
private final HoodieInstant rollbackInstant;
/**
* FileId in case of appending rollback block
* FileId in case of appending rollback block.
*/
private final Option<String> fileId;
/**
* Latest base instant needed for appending rollback block instant
* Latest base instant needed for appending rollback block instant.
*/
private final Option<String> latestBaseInstant;
/**
* Rollback Action
* Rollback Action.
*/
private final RollbackAction rollbackAction;

View File

@@ -33,19 +33,19 @@ import java.util.Set;
import scala.Tuple2;
/**
* Information about incoming records for upsert/insert obtained either via sampling or introspecting the data fully
* Information about incoming records for upsert/insert obtained either via sampling or introspecting the data fully.
* <p>
* TODO(vc): Think about obtaining this directly from index.tagLocation
*/
public class WorkloadProfile<T extends HoodieRecordPayload> implements Serializable {
/**
* Input workload
* Input workload.
*/
private final JavaRDD<HoodieRecord<T>> taggedRecords;
/**
* Computed workload profile
* Computed workload profile.
*/
private final HashMap<String, WorkloadStat> partitionPathStatMap;

View File

@@ -65,7 +65,7 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
/**
* Test Cases for Async Compaction and Ingestion interaction
* Test Cases for Async Compaction and Ingestion interaction.
*/
public class TestAsyncCompaction extends TestHoodieClientBase {
@@ -400,7 +400,7 @@ public class TestAsyncCompaction extends TestHoodieClientBase {
}
/**
* HELPER METHODS FOR TESTING
* HELPER METHODS FOR TESTING.
**/
private void validateDeltaCommit(String latestDeltaCommit,

View File

@@ -87,7 +87,7 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
/**
* Test Cleaning related logic
* Test Cleaning related logic.
*/
public class TestCleaner extends TestHoodieClientBase {
@@ -95,7 +95,7 @@ public class TestCleaner extends TestHoodieClientBase {
private static Logger logger = LogManager.getLogger(TestHoodieClientBase.class);
/**
* Helper method to do first batch of insert for clean by versions/commits tests
* Helper method to do first batch of insert for clean by versions/commits tests.
*
* @param cfg Hoodie Write Config
* @param client Hoodie Client
@@ -140,7 +140,7 @@ public class TestCleaner extends TestHoodieClientBase {
}
/**
* Test Clean-By-Versions using insert/upsert API
* Test Clean-By-Versions using insert/upsert API.
*/
@Test
public void testInsertAndCleanByVersions() throws Exception {
@@ -148,7 +148,7 @@ public class TestCleaner extends TestHoodieClientBase {
}
/**
* Test Clean-By-Versions using prepped versions of insert/upsert API
* Test Clean-By-Versions using prepped versions of insert/upsert API.
*/
@Test
public void testInsertPreppedAndCleanByVersions() throws Exception {
@@ -157,7 +157,7 @@ public class TestCleaner extends TestHoodieClientBase {
}
/**
* Test Clean-By-Versions using bulk-insert/upsert API
* Test Clean-By-Versions using bulk-insert/upsert API.
*/
@Test
public void testBulkInsertAndCleanByVersions() throws Exception {
@@ -165,7 +165,7 @@ public class TestCleaner extends TestHoodieClientBase {
}
/**
* Test Clean-By-Versions using prepped versions of bulk-insert/upsert API
* Test Clean-By-Versions using prepped versions of bulk-insert/upsert API.
*/
@Test
public void testBulkInsertPreppedAndCleanByVersions() throws Exception {
@@ -175,7 +175,7 @@ public class TestCleaner extends TestHoodieClientBase {
}
/**
* Test Helper for Cleaning by versions logic from HoodieWriteClient API perspective
* Test Helper for Cleaning by versions logic from HoodieWriteClient API perspective.
*
* @param insertFn Insert API to be tested
* @param upsertFn Upsert API to be tested
@@ -301,7 +301,7 @@ public class TestCleaner extends TestHoodieClientBase {
}
/**
* Test Clean-By-Versions using insert/upsert API
* Test Clean-By-Versions using insert/upsert API.
*/
@Test
public void testInsertAndCleanByCommits() throws Exception {
@@ -309,7 +309,7 @@ public class TestCleaner extends TestHoodieClientBase {
}
/**
* Test Clean-By-Versions using prepped version of insert/upsert API
* Test Clean-By-Versions using prepped version of insert/upsert API.
*/
@Test
public void testInsertPreppedAndCleanByCommits() throws Exception {
@@ -317,7 +317,7 @@ public class TestCleaner extends TestHoodieClientBase {
}
/**
* Test Clean-By-Versions using prepped versions of bulk-insert/upsert API
* Test Clean-By-Versions using prepped versions of bulk-insert/upsert API.
*/
@Test
public void testBulkInsertPreppedAndCleanByCommits() throws Exception {
@@ -327,7 +327,7 @@ public class TestCleaner extends TestHoodieClientBase {
}
/**
* Test Clean-By-Versions using bulk-insert/upsert API
* Test Clean-By-Versions using bulk-insert/upsert API.
*/
@Test
public void testBulkInsertAndCleanByCommits() throws Exception {
@@ -335,7 +335,7 @@ public class TestCleaner extends TestHoodieClientBase {
}
/**
* Test Helper for Cleaning by versions logic from HoodieWriteClient API perspective
* Test Helper for Cleaning by versions logic from HoodieWriteClient API perspective.
*
* @param insertFn Insert API to be tested
* @param upsertFn Upsert API to be tested
@@ -407,7 +407,7 @@ public class TestCleaner extends TestHoodieClientBase {
}
/**
* Helper to run cleaner and collect Clean Stats
* Helper to run cleaner and collect Clean Stats.
*
* @param config HoodieWriteConfig
*/
@@ -416,7 +416,7 @@ public class TestCleaner extends TestHoodieClientBase {
}
/**
* Helper to run cleaner and collect Clean Stats
* Helper to run cleaner and collect Clean Stats.
*
* @param config HoodieWriteConfig
*/
@@ -463,7 +463,7 @@ public class TestCleaner extends TestHoodieClientBase {
}
/**
* Test HoodieTable.clean() Cleaning by versions logic
* Test HoodieTable.clean() Cleaning by versions logic.
*/
@Test
public void testKeepLatestFileVersions() throws IOException {
@@ -555,7 +555,7 @@ public class TestCleaner extends TestHoodieClientBase {
}
/**
* Test HoodieTable.clean() Cleaning by versions logic for MOR table with Log files
* Test HoodieTable.clean() Cleaning by versions logic for MOR table with Log files.
*/
@Test
public void testKeepLatestFileVersionsMOR() throws IOException {
@@ -701,7 +701,7 @@ public class TestCleaner extends TestHoodieClientBase {
}
/**
* Test HoodieTable.clean() Cleaning by commit logic for MOR table with Log files
* Test HoodieTable.clean() Cleaning by commit logic for MOR table with Log files.
*/
@Test
public void testKeepLatestCommits() throws IOException {
@@ -718,7 +718,7 @@ public class TestCleaner extends TestHoodieClientBase {
}
/**
* Test HoodieTable.clean() Cleaning by commit logic for MOR table with Log files
* Test HoodieTable.clean() Cleaning by commit logic for MOR table with Log files.
*/
@Test
public void testKeepLatestCommitsIncrMode() throws IOException {
@@ -936,7 +936,7 @@ public class TestCleaner extends TestHoodieClientBase {
}
/**
* Test Keep Latest Commits when there are pending compactions
* Test Keep Latest Commits when there are pending compactions.
*/
@Test
public void testKeepLatestCommitsWithPendingCompactions() throws IOException {
@@ -967,7 +967,7 @@ public class TestCleaner extends TestHoodieClientBase {
/**
* Test Keep Latest Versions when there are pending compactions
* Test Keep Latest Versions when there are pending compactions.
*/
@Test
public void testKeepLatestVersionsWithPendingCompactionsAndFailureRetry() throws IOException {
@@ -993,7 +993,7 @@ public class TestCleaner extends TestHoodieClientBase {
}
/**
* Common test method for validating pending compactions
* Common test method for validating pending compactions.
*
* @param config Hoodie Write Config
* @param expNumFilesDeleted Number of files deleted
@@ -1111,7 +1111,7 @@ public class TestCleaner extends TestHoodieClientBase {
}
/**
* Utility method to create temporary data files
* Utility method to create temporary data files.
*
* @param commitTime Commit Timestamp
* @param numFiles Number for files to be generated
@@ -1127,7 +1127,7 @@ public class TestCleaner extends TestHoodieClientBase {
}
/***
* Helper method to return temporary files count
* Helper method to return temporary files count.
*
* @return Number of temporary files found
* @throws IOException in case of error

View File

@@ -47,12 +47,12 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/**
* Test Cases for rollback of snapshots and commits
* Test Cases for rollback of snapshots and commits.
*/
public class TestClientRollback extends TestHoodieClientBase {
/**
* Test case for rollback-savepoint interaction
* Test case for rollback-savepoint interaction.
*/
@Test
public void testSavepointAndRollback() throws Exception {
@@ -165,7 +165,7 @@ public class TestClientRollback extends TestHoodieClientBase {
}
/**
* Test Cases for effects of rollbacking completed/inflight commits
* Test Cases for effects of rollbacking completed/inflight commits.
*/
@Test
public void testRollbackCommit() throws Exception {
@@ -255,7 +255,7 @@ public class TestClientRollback extends TestHoodieClientBase {
}
/**
* Test auto-rollback of commits which are in flight
* Test auto-rollback of commits which are in flight.
*/
@Test
public void testAutoRollbackInflightCommit() throws Exception {

View File

@@ -163,7 +163,7 @@ public class TestCompactionAdminClient extends TestHoodieClientBase {
}
/**
* Enssure compaction plan is valid
* Enssure compaction plan is valid.
*
* @param compactionInstant Compaction Instant
*/
@@ -206,7 +206,7 @@ public class TestCompactionAdminClient extends TestHoodieClientBase {
}
/**
* Validate Unschedule operations
* Validate Unschedule operations.
*/
private List<Pair<HoodieLogFile, HoodieLogFile>> validateUnSchedulePlan(CompactionAdminClient client,
String ingestionInstant, String compactionInstant, int numEntriesPerInstant, int expNumRenames) throws Exception {
@@ -215,7 +215,7 @@ public class TestCompactionAdminClient extends TestHoodieClientBase {
}
/**
* Validate Unschedule operations
* Validate Unschedule operations.
*/
private List<Pair<HoodieLogFile, HoodieLogFile>> validateUnSchedulePlan(CompactionAdminClient client,
String ingestionInstant, String compactionInstant, int numEntriesPerInstant, int expNumRenames,
@@ -287,7 +287,7 @@ public class TestCompactionAdminClient extends TestHoodieClientBase {
}
/**
* Validate Unschedule operations
* Validate Unschedule operations.
*/
private void validateUnScheduleFileId(CompactionAdminClient client, String ingestionInstant, String compactionInstant,
CompactionOperation op, int expNumRenames) throws Exception {

View File

@@ -66,7 +66,7 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
/**
* Base Class providing setup/cleanup and utility methods for testing Hoodie Client facing tests
* Base Class providing setup/cleanup and utility methods for testing Hoodie Client facing tests.
*/
public class TestHoodieClientBase extends HoodieClientTestHarness {
@@ -104,7 +104,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
}
/**
* Get Default HoodieWriteConfig for tests
* Get Default HoodieWriteConfig for tests.
*
* @return Default Hoodie Write Config for tests
*/
@@ -114,7 +114,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
/**
* Get Config builder with default configs set
* Get Config builder with default configs set.
*
* @return Config Builder
*/
@@ -123,7 +123,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
}
/**
* Get Config builder with default configs set
* Get Config builder with default configs set.
*
* @return Config Builder
*/
@@ -147,7 +147,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
}
/**
* Assert no failures in writing hoodie files
* Assert no failures in writing hoodie files.
*
* @param statuses List of Write Status
*/
@@ -159,7 +159,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
}
/**
* Ensure presence of partition meta-data at known depth
* Ensure presence of partition meta-data at known depth.
*
* @param partitionPaths Partition paths to check
* @param fs File System
@@ -175,7 +175,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
}
/**
* Ensure records have location field set
* Ensure records have location field set.
*
* @param taggedRecords Tagged Records
* @param commitTime Commit Timestamp
@@ -189,7 +189,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
}
/**
* Assert that there is no duplicate key at the partition level
* Assert that there is no duplicate key at the partition level.
*
* @param records List of Hoodie records
*/
@@ -252,7 +252,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
}
/**
* Generate wrapper for record generation function for testing Prepped APIs
* Generate wrapper for record generation function for testing Prepped APIs.
*
* @param isPreppedAPI Flag to indicate if this is for testing prepped-version of APIs
* @param writeConfig Hoodie Write Config
@@ -269,7 +269,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
}
/**
* Generate wrapper for delete key generation function for testing Prepped APIs
* Generate wrapper for delete key generation function for testing Prepped APIs.
*
* @param isPreppedAPI Flag to indicate if this is for testing prepped-version of APIs
* @param writeConfig Hoodie Write Config
@@ -286,7 +286,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
}
/**
* Helper to insert first batch of records and do regular assertions on the state after successful completion
* Helper to insert first batch of records and do regular assertions on the state after successful completion.
*
* @param writeConfig Hoodie Write Config
* @param client Hoodie Write Client
@@ -312,7 +312,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
}
/**
* Helper to upsert batch of records and do regular assertions on the state after successful completion
* Helper to upsert batch of records and do regular assertions on the state after successful completion.
*
* @param writeConfig Hoodie Write Config
* @param client Hoodie Write Client
@@ -344,7 +344,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
}
/**
* Helper to delete batch of keys and do regular assertions on the state after successful completion
* Helper to delete batch of keys and do regular assertions on the state after successful completion.
*
* @param writeConfig Hoodie Write Config
* @param client Hoodie Write Client
@@ -374,7 +374,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
}
/**
* Helper to insert/upsert batch of records and do regular assertions on the state after successful completion
* Helper to insert/upsert batch of records and do regular assertions on the state after successful completion.
*
* @param client Hoodie Write Client
* @param newCommitTime New Commit Timestamp to be used
@@ -445,7 +445,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
}
/**
* Helper to delete batch of hoodie keys and do regular assertions on the state after successful completion
* Helper to delete batch of hoodie keys and do regular assertions on the state after successful completion.
*
* @param client Hoodie Write Client
* @param newCommitTime New Commit Timestamp to be used
@@ -507,7 +507,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
}
/**
* Get Cleaner state corresponding to a partition path
* Get Cleaner state corresponding to a partition path.
*
* @param hoodieCleanStatsTwo List of Clean Stats
* @param partitionPath Partition path for filtering
@@ -518,7 +518,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
}
/**
* Utility to simulate commit touching files in a partition
* Utility to simulate commit touching files in a partition.
*
* @param files List of file-Ids to be touched
* @param partitionPath Partition
@@ -532,7 +532,7 @@ public class TestHoodieClientBase extends HoodieClientTestHarness {
}
/**
* Helper methods to create new data files in a partition
* Helper methods to create new data files in a partition.
*
* @param partitionPath Partition
* @param commitTime Commit Timestamp

View File

@@ -77,7 +77,7 @@ import static org.mockito.Mockito.when;
public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
/**
* Test Auto Commit behavior for HoodieWriteClient insert API
* Test Auto Commit behavior for HoodieWriteClient insert API.
*/
@Test
public void testAutoCommitOnInsert() throws Exception {
@@ -85,7 +85,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Test Auto Commit behavior for HoodieWriteClient insertPrepped API
* Test Auto Commit behavior for HoodieWriteClient insertPrepped API.
*/
@Test
public void testAutoCommitOnInsertPrepped() throws Exception {
@@ -93,7 +93,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Test Auto Commit behavior for HoodieWriteClient upsert API
* Test Auto Commit behavior for HoodieWriteClient upsert API.
*/
@Test
public void testAutoCommitOnUpsert() throws Exception {
@@ -101,7 +101,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Test Auto Commit behavior for HoodieWriteClient upsert Prepped API
* Test Auto Commit behavior for HoodieWriteClient upsert Prepped API.
*/
@Test
public void testAutoCommitOnUpsertPrepped() throws Exception {
@@ -109,7 +109,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Test Auto Commit behavior for HoodieWriteClient bulk-insert API
* Test Auto Commit behavior for HoodieWriteClient bulk-insert API.
*/
@Test
public void testAutoCommitOnBulkInsert() throws Exception {
@@ -117,7 +117,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Test Auto Commit behavior for HoodieWriteClient bulk-insert prepped API
* Test Auto Commit behavior for HoodieWriteClient bulk-insert prepped API.
*/
@Test
public void testAutoCommitOnBulkInsertPrepped() throws Exception {
@@ -126,7 +126,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Test auto-commit by applying write function
* Test auto-commit by applying write function.
*
* @param writeFn One of HoodieWriteClient Write API
* @throws Exception in case of failure
@@ -152,7 +152,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Test De-duplication behavior for HoodieWriteClient insert API
* Test De-duplication behavior for HoodieWriteClient insert API.
*/
@Test
public void testDeduplicationOnInsert() throws Exception {
@@ -160,7 +160,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Test De-duplication behavior for HoodieWriteClient bulk-insert API
* Test De-duplication behavior for HoodieWriteClient bulk-insert API.
*/
@Test
public void testDeduplicationOnBulkInsert() throws Exception {
@@ -168,7 +168,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Test De-duplication behavior for HoodieWriteClient upsert API
* Test De-duplication behavior for HoodieWriteClient upsert API.
*/
@Test
public void testDeduplicationOnUpsert() throws Exception {
@@ -176,7 +176,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Test Deduplication Logic for write function
* Test Deduplication Logic for write function.
*
* @param writeFn One of HoddieWriteClient non-prepped write APIs
* @throws Exception in case of failure
@@ -224,7 +224,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Build a test Hoodie WriteClient with dummy index to configure isGlobal flag
* Build a test Hoodie WriteClient with dummy index to configure isGlobal flag.
*
* @param isGlobal Flag to control HoodieIndex.isGlobal
* @return Hoodie Write Client
@@ -237,7 +237,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Test Upsert API
* Test Upsert API.
*/
@Test
public void testUpserts() throws Exception {
@@ -245,7 +245,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Test UpsertPrepped API
* Test UpsertPrepped API.
*/
@Test
public void testUpsertsPrepped() throws Exception {
@@ -253,7 +253,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Test one of HoodieWriteClient upsert(Prepped) APIs
* Test one of HoodieWriteClient upsert(Prepped) APIs.
*
* @param hoodieWriteConfig Write Config
* @param writeFn One of Hoodie Write Function API
@@ -291,7 +291,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Tesst deletion of records
* Tesst deletion of records.
*/
@Test
public void testDeletes() throws Exception {
@@ -318,7 +318,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
-1, recordGenFunction, HoodieWriteClient::upsert, true, 200, 200, 1);
/**
* Write 2 (deletes+writes)
* Write 2 (deletes+writes).
*/
String prevCommitTime = newCommitTime;
newCommitTime = "004";
@@ -336,7 +336,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Test scenario of new file-group getting added during upsert()
* Test scenario of new file-group getting added during upsert().
*/
@Test
public void testSmallInsertHandlingForUpserts() throws Exception {
@@ -448,7 +448,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Test scenario of new file-group getting added during insert()
* Test scenario of new file-group getting added during insert().
*/
@Test
public void testSmallInsertHandlingForInserts() throws Exception {
@@ -530,7 +530,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Test delete with delete api
* Test delete with delete api.
*/
@Test
public void testDeletesWithDeleteApi() throws Exception {
@@ -659,7 +659,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Test delete with delete api
* Test delete with delete api.
*/
@Test
public void testDeletesWithoutInserts() throws Exception {
@@ -688,7 +688,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Test to ensure commit metadata points to valid files
* Test to ensure commit metadata points to valid files.
*/
@Test
public void testCommitWritesRelativePaths() throws Exception {
@@ -735,7 +735,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Test to ensure commit metadata points to valid files
* Test to ensure commit metadata points to valid files.
*/
@Test
public void testRollingStatsInMetadata() throws Exception {
@@ -810,7 +810,7 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Tests behavior of committing only when consistency is verified
* Tests behavior of committing only when consistency is verified.
*/
@Test
public void testConsistencyCheckDuringFinalize() throws Exception {
@@ -879,14 +879,14 @@ public class TestHoodieClientOnCopyOnWriteStorage extends TestHoodieClientBase {
}
/**
* Build Hoodie Write Config for small data file sizes
* Build Hoodie Write Config for small data file sizes.
*/
private HoodieWriteConfig getSmallInsertWriteConfig(int insertSplitSize) {
return getSmallInsertWriteConfig(insertSplitSize, false);
}
/**
* Build Hoodie Write Config for small data file sizes
* Build Hoodie Write Config for small data file sizes.
*/
private HoodieWriteConfig getSmallInsertWriteConfig(int insertSplitSize, boolean useNullSchema) {
HoodieWriteConfig.Builder builder = getConfigBuilder(useNullSchema ? NULL_SCHEMA : TRIP_EXAMPLE_SCHEMA);

View File

@@ -40,7 +40,7 @@ import static org.junit.Assert.assertTrue;
public class TestHoodieReadClient extends TestHoodieClientBase {
/**
* Test ReadFilter API after writing new records using HoodieWriteClient.insert
* Test ReadFilter API after writing new records using HoodieWriteClient.insert.
*/
@Test
public void testReadFilterExistAfterInsert() throws Exception {
@@ -48,7 +48,7 @@ public class TestHoodieReadClient extends TestHoodieClientBase {
}
/**
* Test ReadFilter API after writing new records using HoodieWriteClient.insertPrepped
* Test ReadFilter API after writing new records using HoodieWriteClient.insertPrepped.
*/
@Test
public void testReadFilterExistAfterInsertPrepped() throws Exception {
@@ -56,7 +56,7 @@ public class TestHoodieReadClient extends TestHoodieClientBase {
}
/**
* Test ReadFilter API after writing new records using HoodieWriteClient.bulkInsert
* Test ReadFilter API after writing new records using HoodieWriteClient.bulkInsert.
*/
@Test
public void testReadFilterExistAfterBulkInsert() throws Exception {
@@ -64,7 +64,7 @@ public class TestHoodieReadClient extends TestHoodieClientBase {
}
/**
* Test ReadFilter API after writing new records using HoodieWriteClient.bulkInsertPrepped
* Test ReadFilter API after writing new records using HoodieWriteClient.bulkInsertPrepped.
*/
@Test
public void testReadFilterExistAfterBulkInsertPrepped() throws Exception {
@@ -76,7 +76,7 @@ public class TestHoodieReadClient extends TestHoodieClientBase {
/**
* Helper to write new records using one of HoodieWriteClient's write API and use ReadClient to test filterExists()
* API works correctly
* API works correctly.
*
* @param config Hoodie Write Config
* @param writeFn Write Function for writing records
@@ -111,7 +111,7 @@ public class TestHoodieReadClient extends TestHoodieClientBase {
}
/**
* Test tagLocation API after insert()
* Test tagLocation API after insert().
*/
@Test
public void testTagLocationAfterInsert() throws Exception {
@@ -119,7 +119,7 @@ public class TestHoodieReadClient extends TestHoodieClientBase {
}
/**
* Test tagLocation API after insertPrepped()
* Test tagLocation API after insertPrepped().
*/
@Test
public void testTagLocationAfterInsertPrepped() throws Exception {
@@ -128,7 +128,7 @@ public class TestHoodieReadClient extends TestHoodieClientBase {
}
/**
* Test tagLocation API after bulk-insert()
* Test tagLocation API after bulk-insert().
*/
@Test
public void testTagLocationAfterBulkInsert() throws Exception {
@@ -137,7 +137,7 @@ public class TestHoodieReadClient extends TestHoodieClientBase {
}
/**
* Test tagLocation API after bulkInsertPrepped()
* Test tagLocation API after bulkInsertPrepped().
*/
@Test
public void testTagLocationAfterBulkInsertPrepped() throws Exception {
@@ -148,7 +148,7 @@ public class TestHoodieReadClient extends TestHoodieClientBase {
}
/**
* Helper method to test tagLocation after using different HoodieWriteClient write APIS
* Helper method to test tagLocation after using different HoodieWriteClient write APIS.
*
* @param hoodieWriteConfig Write Config
* @param insertFn Hoodie Write Client first Insert API

View File

@@ -192,7 +192,7 @@ public class HoodieClientTestUtils {
}
/**
* Reads the paths under the a hoodie dataset out as a DataFrame
* Reads the paths under the a hoodie dataset out as a DataFrame.
*/
public static Dataset<Row> read(JavaSparkContext jsc, String basePath, SQLContext sqlContext, FileSystem fs,
String... paths) {

View File

@@ -41,7 +41,7 @@ import java.util.List;
import java.util.stream.Collectors;
/**
* Utility methods to aid in testing MergeOnRead (workaround for HoodieReadClient for MOR)
* Utility methods to aid in testing MergeOnRead (workaround for HoodieReadClient for MOR).
*/
public class HoodieMergeOnReadTestUtils {

View File

@@ -110,7 +110,7 @@ public class TestBoundedInMemoryQueue extends HoodieClientTestHarness {
}
/**
* Test to ensure that we are reading all records from queue iterator when we have multiple producers
* Test to ensure that we are reading all records from queue iterator when we have multiple producers.
*/
@SuppressWarnings("unchecked")
@Test(timeout = 60000)

View File

@@ -31,7 +31,7 @@ import static junit.framework.TestCase.assertEquals;
import static junit.framework.TestCase.assertTrue;
/**
* Tests {@link KeyRangeLookupTree}
* Tests {@link KeyRangeLookupTree}.
*/
public class TestKeyRangeLookupTree {
@@ -59,7 +59,7 @@ public class TestKeyRangeLookupTree {
}
/**
* Tests for many entries in the tree with same start value and different end values
* Tests for many entries in the tree with same start value and different end values.
*/
@Test
public void testFileGroupLookUpManyEntriesWithSameStartValue() {
@@ -78,7 +78,7 @@ public class TestKeyRangeLookupTree {
}
/**
* Tests for many duplicte entries in the tree
* Tests for many duplicte entries in the tree.
*/
@Test
public void testFileGroupLookUpManyDulicateEntries() {
@@ -158,7 +158,7 @@ public class TestKeyRangeLookupTree {
}
/**
* Updates the expected matches for a given {@link KeyRangeNode}
* Updates the expected matches for a given {@link KeyRangeNode}.
*
* @param toInsert the {@link KeyRangeNode} to be inserted
*/

View File

@@ -318,7 +318,7 @@ public class TestHoodieMergeHandle extends HoodieClientTestHarness {
}
/**
* Assert no failures in writing hoodie files
* Assert no failures in writing hoodie files.
*
* @param statuses List of Write Status
*/
@@ -340,7 +340,7 @@ public class TestHoodieMergeHandle extends HoodieClientTestHarness {
}
/**
* Overridden so that we can capture and inspect all success records
* Overridden so that we can capture and inspect all success records.
*/
public static class TestWriteStatus extends WriteStatus {

View File

@@ -1019,7 +1019,7 @@ public class TestMergeOnReadTable extends HoodieClientTestHarness {
}
/**
* Test to ensure rolling stats are correctly written to metadata file
* Test to ensure rolling stats are correctly written to metadata file.
*/
@Test
public void testRollingStatsInMetadata() throws Exception {
@@ -1118,7 +1118,7 @@ public class TestMergeOnReadTable extends HoodieClientTestHarness {
}
/**
* Test to ensure rolling stats are correctly written to the metadata file, identifies small files and corrects them
* Test to ensure rolling stats are correctly written to the metadata file, identifies small files and corrects them.
*/
@Test
public void testRollingStatsWithSmallFileHandling() throws Exception {