diff --git a/hudi-client/src/main/java/org/apache/hudi/HoodieCleanClient.java b/hudi-client/src/main/java/org/apache/hudi/HoodieCleanClient.java index 9411782bc..fe0cc6004 100644 --- a/hudi-client/src/main/java/org/apache/hudi/HoodieCleanClient.java +++ b/hudi-client/src/main/java/org/apache/hudi/HoodieCleanClient.java @@ -37,7 +37,6 @@ import org.apache.hudi.metrics.HoodieMetrics; import org.apache.hudi.table.HoodieTable; import com.codahale.metrics.Timer; -import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; @@ -108,7 +107,6 @@ public class HoodieCleanClient extends AbstractHo * @param startCleanTime Cleaner Instant Time * @return Cleaner Plan if generated */ - @VisibleForTesting protected Option scheduleClean(String startCleanTime) { // Create a Hoodie table which encapsulated the commits and files visible HoodieTable table = HoodieTable.getHoodieTable(createMetaClient(true), config, jsc); @@ -138,7 +136,6 @@ public class HoodieCleanClient extends AbstractHo * @param table Hoodie Table * @param cleanInstant Cleaner Instant */ - @VisibleForTesting protected HoodieCleanMetadata runClean(HoodieTable table, HoodieInstant cleanInstant) { try { HoodieCleanerPlan cleanerPlan = CleanerUtils.getCleanerPlan(table.getMetaClient(), cleanInstant); diff --git a/hudi-client/src/main/java/org/apache/hudi/HoodieWriteClient.java b/hudi-client/src/main/java/org/apache/hudi/HoodieWriteClient.java index 23055da6b..931ca07e1 100644 --- a/hudi-client/src/main/java/org/apache/hudi/HoodieWriteClient.java +++ b/hudi-client/src/main/java/org/apache/hudi/HoodieWriteClient.java @@ -61,7 +61,6 @@ import org.apache.hudi.table.WorkloadProfile; import org.apache.hudi.table.WorkloadStat; import com.codahale.metrics.Timer; -import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import org.apache.log4j.LogManager; @@ -121,7 +120,6 @@ public class HoodieWriteClient extends AbstractHo this(jsc, clientConfig, rollbackPending, HoodieIndex.createIndex(clientConfig, jsc)); } - @VisibleForTesting HoodieWriteClient(JavaSparkContext jsc, HoodieWriteConfig clientConfig, boolean rollbackPending, HoodieIndex index) { this(jsc, clientConfig, rollbackPending, index, Option.empty()); } @@ -1113,7 +1111,6 @@ public class HoodieWriteClient extends AbstractHo * @param inflightInstant Inflight Compaction Instant * @param table Hoodie Table */ - @VisibleForTesting void rollbackInflightCompaction(HoodieInstant inflightInstant, HoodieTable table) throws IOException { table.rollback(jsc, inflightInstant, false); // Revert instant state file diff --git a/hudi-client/src/main/java/org/apache/hudi/index/bloom/BucketizedBloomCheckPartitioner.java b/hudi-client/src/main/java/org/apache/hudi/index/bloom/BucketizedBloomCheckPartitioner.java index 80458460d..17b7506fe 100644 --- a/hudi-client/src/main/java/org/apache/hudi/index/bloom/BucketizedBloomCheckPartitioner.java +++ b/hudi-client/src/main/java/org/apache/hudi/index/bloom/BucketizedBloomCheckPartitioner.java @@ -20,7 +20,6 @@ package org.apache.hudi.index.bloom; import org.apache.hudi.common.util.collection.Pair; -import com.google.common.annotations.VisibleForTesting; import com.google.common.hash.Hashing; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; @@ -152,7 +151,6 @@ public class BucketizedBloomCheckPartitioner extends Partitioner { return candidatePartitions.get(idx); } - @VisibleForTesting Map> getFileGroupToPartitions() { return fileGroupToPartitions; } diff --git a/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieBloomIndex.java b/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieBloomIndex.java index a6d46d8e4..22b4c3f53 100644 --- a/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieBloomIndex.java +++ b/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieBloomIndex.java @@ -32,7 +32,6 @@ import org.apache.hudi.index.HoodieIndex; import org.apache.hudi.io.HoodieRangeInfoHandle; import org.apache.hudi.table.HoodieTable; -import com.google.common.annotations.VisibleForTesting; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; import org.apache.spark.Partitioner; @@ -188,7 +187,6 @@ public class HoodieBloomIndex extends HoodieIndex /** * Load all involved files as pair RDD. */ - @VisibleForTesting List> loadInvolvedFiles(List partitions, final JavaSparkContext jsc, final HoodieTable hoodieTable) { @@ -262,7 +260,6 @@ public class HoodieBloomIndex extends HoodieIndex * Sub-partition to ensure the records can be looked up against files & also prune file<=>record comparisons based on * recordKey ranges in the index info. */ - @VisibleForTesting JavaRDD> explodeRecordRDDWithFileComparisons( final Map> partitionToFileIndexInfo, JavaPairRDD partitionRecordKeyPairRDD) { @@ -289,7 +286,6 @@ public class HoodieBloomIndex extends HoodieIndex *

* Make sure the parallelism is atleast the groupby parallelism for tagging location */ - @VisibleForTesting JavaPairRDD findMatchingFilesForRecordKeys( final Map> partitionToFileIndexInfo, JavaPairRDD partitionRecordKeyPairRDD, int shuffleParallelism, HoodieTable hoodieTable, diff --git a/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieGlobalBloomIndex.java b/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieGlobalBloomIndex.java index ba8976b9c..3c6cc7298 100644 --- a/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieGlobalBloomIndex.java +++ b/hudi-client/src/main/java/org/apache/hudi/index/bloom/HoodieGlobalBloomIndex.java @@ -30,7 +30,6 @@ import org.apache.hudi.config.HoodieWriteConfig; import org.apache.hudi.exception.HoodieIOException; import org.apache.hudi.table.HoodieTable; -import com.google.common.annotations.VisibleForTesting; import org.apache.spark.api.java.JavaPairRDD; import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaSparkContext; @@ -59,7 +58,6 @@ public class HoodieGlobalBloomIndex extends Hoodi * Load all involved files as pair RDD from all partitions in the table. */ @Override - @VisibleForTesting List> loadInvolvedFiles(List partitions, final JavaSparkContext jsc, final HoodieTable hoodieTable) { HoodieTableMetaClient metaClient = hoodieTable.getMetaClient(); @@ -83,7 +81,6 @@ public class HoodieGlobalBloomIndex extends Hoodi */ @Override - @VisibleForTesting JavaRDD> explodeRecordRDDWithFileComparisons( final Map> partitionToFileIndexInfo, JavaPairRDD partitionRecordKeyPairRDD) { diff --git a/hudi-client/src/main/java/org/apache/hudi/index/hbase/HBaseIndex.java b/hudi-client/src/main/java/org/apache/hudi/index/hbase/HBaseIndex.java index 12d352db1..6d750cfcc 100644 --- a/hudi-client/src/main/java/org/apache/hudi/index/hbase/HBaseIndex.java +++ b/hudi-client/src/main/java/org/apache/hudi/index/hbase/HBaseIndex.java @@ -36,7 +36,6 @@ import org.apache.hudi.exception.HoodieIndexException; import org.apache.hudi.index.HoodieIndex; import org.apache.hudi.table.HoodieTable; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HRegionLocation; @@ -114,7 +113,6 @@ public class HBaseIndex extends HoodieIndex { this.hBaseIndexQPSResourceAllocator = createQPSResourceAllocator(this.config); } - @VisibleForTesting public HBaseIndexQPSResourceAllocator createQPSResourceAllocator(HoodieWriteConfig config) { try { LOG.info("createQPSResourceAllocator :" + config.getHBaseQPSResourceAllocatorClass()); @@ -387,7 +385,6 @@ public class HBaseIndex extends HoodieIndex { } } - @VisibleForTesting public Tuple2 getHBasePutAccessParallelism(final JavaRDD writeStatusRDD) { final JavaPairRDD insertOnlyWriteStatusRDD = writeStatusRDD .filter(w -> w.getStat().getNumInserts() > 0).mapToPair(w -> new Tuple2<>(w.getStat().getNumInserts(), 1)); @@ -497,7 +494,6 @@ public class HBaseIndex extends HoodieIndex { return false; } - @VisibleForTesting public void setHbaseConnection(Connection hbaseConnection) { HBaseIndex.hbaseConnection = hbaseConnection; } diff --git a/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/BoundedPartitionAwareCompactionStrategy.java b/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/BoundedPartitionAwareCompactionStrategy.java index 5b64cedb1..6ba82132d 100644 --- a/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/BoundedPartitionAwareCompactionStrategy.java +++ b/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/BoundedPartitionAwareCompactionStrategy.java @@ -22,8 +22,6 @@ import org.apache.hudi.avro.model.HoodieCompactionOperation; import org.apache.hudi.avro.model.HoodieCompactionPlan; import org.apache.hudi.config.HoodieWriteConfig; -import com.google.common.annotations.VisibleForTesting; - import java.text.SimpleDateFormat; import java.util.Calendar; import java.util.Comparator; @@ -68,7 +66,6 @@ public class BoundedPartitionAwareCompactionStrategy extends DayBasedCompactionS .filter(e -> comparator.compare(earliestPartitionPathToCompact, e) >= 0).collect(Collectors.toList()); } - @VisibleForTesting public static Date getDateAtOffsetFromToday(int offset) { Calendar calendar = Calendar.getInstance(); calendar.add(Calendar.DATE, offset); diff --git a/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/DayBasedCompactionStrategy.java b/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/DayBasedCompactionStrategy.java index a491818d1..9d537763b 100644 --- a/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/DayBasedCompactionStrategy.java +++ b/hudi-client/src/main/java/org/apache/hudi/io/compact/strategy/DayBasedCompactionStrategy.java @@ -23,8 +23,6 @@ import org.apache.hudi.avro.model.HoodieCompactionPlan; import org.apache.hudi.config.HoodieWriteConfig; import org.apache.hudi.exception.HoodieException; -import com.google.common.annotations.VisibleForTesting; - import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Comparator; @@ -55,7 +53,6 @@ public class DayBasedCompactionStrategy extends CompactionStrategy { } }; - @VisibleForTesting public Comparator getComparator() { return comparator; } diff --git a/hudi-client/src/main/java/org/apache/hudi/metrics/HoodieMetrics.java b/hudi-client/src/main/java/org/apache/hudi/metrics/HoodieMetrics.java index b6fcd09e8..4b2b48b01 100644 --- a/hudi-client/src/main/java/org/apache/hudi/metrics/HoodieMetrics.java +++ b/hudi-client/src/main/java/org/apache/hudi/metrics/HoodieMetrics.java @@ -23,7 +23,6 @@ import org.apache.hudi.common.table.HoodieTimeline; import org.apache.hudi.config.HoodieWriteConfig; import com.codahale.metrics.Timer; -import com.google.common.annotations.VisibleForTesting; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; @@ -187,7 +186,6 @@ public class HoodieMetrics { } } - @VisibleForTesting String getMetricsName(String action, String metric) { return config == null ? null : String.format("%s.%s.%s", tableName, action, metric); } diff --git a/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java b/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java index 754b0ac8a..2e43013c7 100644 --- a/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java +++ b/hudi-client/src/main/java/org/apache/hudi/table/HoodieMergeOnReadTable.java @@ -42,7 +42,6 @@ import org.apache.hudi.func.MergeOnReadLazyInsertIterable; import org.apache.hudi.io.HoodieAppendHandle; import org.apache.hudi.io.compact.HoodieMergeOnReadTableCompactor; -import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; @@ -409,7 +408,6 @@ public class HoodieMergeOnReadTable extends Hoodi } // TODO (NA) : Make this static part of utility - @VisibleForTesting public long convertLogFilesSizeToExpectedParquetSize(List hoodieLogFiles) { long totalSizeOfLogFiles = hoodieLogFiles.stream().map(HoodieLogFile::getFileSize) .filter(size -> size > 0).reduce(Long::sum).orElse(0L); diff --git a/hudi-common/src/main/java/org/apache/hudi/common/bloom/filter/InternalFilter.java b/hudi-common/src/main/java/org/apache/hudi/common/bloom/filter/InternalFilter.java index 228143b64..c0a5028c9 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/bloom/filter/InternalFilter.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/bloom/filter/InternalFilter.java @@ -145,8 +145,8 @@ abstract class InternalFilter implements Writable { if (keys == null) { throw new IllegalArgumentException("Key[] may not be null"); } - for (int i = 0; i < keys.length; i++) { - add(keys[i]); + for (Key key : keys) { + add(key); } } //end add() diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/BufferedRandomAccessFile.java b/hudi-common/src/main/java/org/apache/hudi/common/util/BufferedRandomAccessFile.java index 25fe7b080..15cf4c39f 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/BufferedRandomAccessFile.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/BufferedRandomAccessFile.java @@ -169,7 +169,7 @@ public final class BufferedRandomAccessFile extends RandomAccessFile { private int fillBuffer() throws IOException { int cnt = 0; int bytesToRead = this.capacity; - /** blocking read, until buffer is filled or EOF reached */ + // blocking read, until buffer is filled or EOF reached while (bytesToRead > 0) { int n = super.read(this.dataBuffer.array(), cnt, bytesToRead); if (n < 0) { diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/FSUtils.java b/hudi-common/src/main/java/org/apache/hudi/common/util/FSUtils.java index bbfe7b126..3cc7bf23e 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/FSUtils.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/FSUtils.java @@ -28,7 +28,6 @@ import org.apache.hudi.exception.HoodieException; import org.apache.hudi.exception.HoodieIOException; import org.apache.hudi.exception.InvalidHoodiePathException; -import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; @@ -216,7 +215,6 @@ public class FSUtils { * @param excludeMetaFolder Exclude .hoodie folder * @throws IOException */ - @VisibleForTesting static void processFiles(FileSystem fs, String basePathStr, Function consumer, boolean excludeMetaFolder) throws IOException { PathFilter pathFilter = excludeMetaFolder ? getExcludeMetaPathFilter() : ALLOW_ALL_FILTER; diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/ObjectSizeCalculator.java b/hudi-common/src/main/java/org/apache/hudi/common/util/ObjectSizeCalculator.java index d39600044..1898a4e78 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/ObjectSizeCalculator.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/ObjectSizeCalculator.java @@ -16,7 +16,6 @@ package org.apache.hudi.common.util; -import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; @@ -252,7 +251,6 @@ public class ObjectSizeCalculator { size += objectSize; } - @VisibleForTesting static long roundTo(long x, int multiple) { return ((x + multiple - 1) / multiple) * multiple; } @@ -325,7 +323,6 @@ public class ObjectSizeCalculator { throw new AssertionError("Encountered unexpected primitive type " + type.getName()); } - @VisibleForTesting static MemoryLayoutSpecification getEffectiveMemoryLayoutSpecification() { final String vmName = System.getProperty("java.vm.name"); if (vmName == null || !(vmName.startsWith("Java HotSpot(TM) ") || vmName.startsWith("OpenJDK") diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/RocksDBDAO.java b/hudi-common/src/main/java/org/apache/hudi/common/util/RocksDBDAO.java index ec46af47a..8884e9c43 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/RocksDBDAO.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/RocksDBDAO.java @@ -22,7 +22,6 @@ import org.apache.hudi.common.util.collection.Pair; import org.apache.hudi.exception.HoodieException; import org.apache.hudi.exception.HoodieIOException; -import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; @@ -450,7 +449,6 @@ public class RocksDBDAO { } } - @VisibleForTesting String getRocksDBBasePath() { return rocksDBBasePath; } diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BoundedInMemoryQueue.java b/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BoundedInMemoryQueue.java index 7ea6d5e1b..356f00057 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BoundedInMemoryQueue.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/queue/BoundedInMemoryQueue.java @@ -23,7 +23,6 @@ import org.apache.hudi.common.util.Option; import org.apache.hudi.common.util.SizeEstimator; import org.apache.hudi.exception.HoodieException; -import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; @@ -62,7 +61,6 @@ public class BoundedInMemoryQueue implements Iterable { // It indicates number of records to cache. We will be using sampled record's average size to // determine how many // records we should cache and will change (increase/decrease) permits accordingly. - @VisibleForTesting public final Semaphore rateLimiter = new Semaphore(1); // used for sampling records with "RECORD_SAMPLING_RATE" frequency. public final AtomicLong samplingRecordCounter = new AtomicLong(-1); @@ -86,10 +84,8 @@ public class BoundedInMemoryQueue implements Iterable { private final QueueIterator iterator; // indicates rate limit (number of records to cache). it is updated whenever there is a change // in avg record size. - @VisibleForTesting public int currentRateLimit = 1; // indicates avg record size in bytes. It is updated whenever a new record is sampled. - @VisibleForTesting public long avgRecordSizeInBytes = 0; // indicates number of samples collected so far. private long numSamples = 0; @@ -119,7 +115,6 @@ public class BoundedInMemoryQueue implements Iterable { this.iterator = new QueueIterator(); } - @VisibleForTesting public int size() { return this.queue.size(); } diff --git a/hudi-spark/src/main/scala/org/apache/hudi/AvroConversionHelper.scala b/hudi-spark/src/main/scala/org/apache/hudi/AvroConversionHelper.scala index e1a7ae18f..b61bef3d8 100644 --- a/hudi-spark/src/main/scala/org/apache/hudi/AvroConversionHelper.scala +++ b/hudi-spark/src/main/scala/org/apache/hudi/AvroConversionHelper.scala @@ -127,7 +127,7 @@ object AvroConversionHelper { new Timestamp(item.asInstanceOf[Long]) case other => throw new IncompatibleSchemaException( - s"Cannot convert Avro logical type ${other} to Catalyst Timestamp type.") + s"Cannot convert Avro logical type $other to Catalyst Timestamp type.") } } case (struct: StructType, RECORD) => @@ -215,7 +215,7 @@ object AvroConversionHelper { createConverter(Schema.createUnion(remainingUnionTypes.asJava), sqlType, path) } } else avroSchema.getTypes.asScala.map(_.getType) match { - case Seq(t1) => createConverter(avroSchema.getTypes.get(0), sqlType, path) + case Seq(_) => createConverter(avroSchema.getTypes.get(0), sqlType, path) case Seq(a, b) if Set(a, b) == Set(INT, LONG) && sqlType == LongType => (item: AnyRef) => { item match { @@ -286,7 +286,7 @@ object AvroConversionHelper { case ShortType => (item: Any) => if (item == null) null else item.asInstanceOf[Short].intValue case dec: DecimalType => (item: Any) => - Option(item).map { i => + Option(item).map { _ => val bigDecimalValue = item.asInstanceOf[java.math.BigDecimal] val decimalConversions = new DecimalConversion() decimalConversions.toFixed(bigDecimalValue, avroSchema.getField(structName).schema().getTypes.get(0), diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java index 218df228e..c326814db 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java @@ -35,7 +35,6 @@ import com.beust.jcommander.IValueValidator; import com.beust.jcommander.JCommander; import com.beust.jcommander.Parameter; import com.beust.jcommander.ParameterException; -import com.google.common.annotations.VisibleForTesting; import org.apache.avro.Schema; import org.apache.avro.generic.GenericRecord; import org.apache.hadoop.fs.FileSystem; @@ -120,7 +119,6 @@ public class HDFSParquetImporter implements Serializable { return ret; } - @VisibleForTesting protected int dataImport(JavaSparkContext jsc) throws IOException { try { if (fs.exists(new Path(cfg.targetPath))) {