Fixing a javadoc lint issue
This commit is contained in:
@@ -70,7 +70,7 @@ public class HoodieReadClient implements Serializable {
|
||||
|
||||
private transient final FileSystem fs;
|
||||
/**
|
||||
* TODO: We need to persist the index type into hoodie.properties & be able to access the index
|
||||
* TODO: We need to persist the index type into hoodie.properties and be able to access the index
|
||||
* just with a simple basepath pointing to the dataset. Until, then just always assume a
|
||||
* BloomIndex
|
||||
*/
|
||||
|
||||
@@ -323,7 +323,7 @@ public class HoodieBloomIndex<T extends HoodieRecordPayload> extends HoodieIndex
|
||||
/**
|
||||
* Find out <RowKey, filename> pair. All workload grouped by file-level.
|
||||
*
|
||||
* // Join PairRDD(PartitionPath, RecordKey) and PairRDD(PartitionPath, File) & then repartition such that
|
||||
* // Join PairRDD(PartitionPath, RecordKey) and PairRDD(PartitionPath, File) and then repartition such that
|
||||
// each RDD partition is a file, then for each file, we do (1) load bloom filter, (2) load rowKeys, (3) Tag rowKey
|
||||
// Make sure the parallelism is atleast the groupby parallelism for tagging location
|
||||
*/
|
||||
|
||||
@@ -40,7 +40,7 @@ import java.util.Random;
|
||||
import java.util.UUID;
|
||||
|
||||
/**
|
||||
* Class to be used in tests to keep generating test inserts & updates against a corpus.
|
||||
* Class to be used in tests to keep generating test inserts and updates against a corpus.
|
||||
*
|
||||
* Test data uses a toy Uber trips, data model.
|
||||
*/
|
||||
|
||||
@@ -33,11 +33,11 @@ import java.util.HashSet;
|
||||
|
||||
/**
|
||||
* Given a path is a part of
|
||||
* - Hoodie dataset => accepts ONLY the latest version of each path
|
||||
* - Non-Hoodie dataset => then always accept
|
||||
* - Hoodie dataset = accepts ONLY the latest version of each path
|
||||
* - Non-Hoodie dataset = then always accept
|
||||
*
|
||||
* We can set this filter, on a query engine's Hadoop Config & if it respects path filters, then
|
||||
* you should be able to query both hoodie & non-hoodie datasets as you would normally do.
|
||||
* We can set this filter, on a query engine's Hadoop Config and if it respects path filters, then
|
||||
* you should be able to query both hoodie and non-hoodie datasets as you would normally do.
|
||||
*
|
||||
* hadoopConf.setClass("mapreduce.input.pathFilter.class",
|
||||
* com.uber.hoodie.hadoop.HoodieROTablePathFilter.class,
|
||||
@@ -50,7 +50,7 @@ public class HoodieROTablePathFilter implements PathFilter, Serializable {
|
||||
|
||||
/**
|
||||
* Its quite common, to have all files from a given partition path be passed into accept(),
|
||||
* cache the check for hoodie metadata for known partition paths & the latest versions of files
|
||||
* cache the check for hoodie metadata for known partition paths and the latest versions of files
|
||||
*/
|
||||
private HashMap<String, HashSet<Path>> hoodiePathCache;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user