1
0

[HUDI-751] Fix some coding issues reported by FindBugs (#1470)

This commit is contained in:
Shaofeng Shi
2020-03-31 21:19:32 +08:00
committed by GitHub
parent 9ecf0ccfb2
commit 78b3194e82
31 changed files with 57 additions and 41 deletions

View File

@@ -47,6 +47,7 @@ import java.util.List;
public class HoodieCleanClient<T extends HoodieRecordPayload> extends AbstractHoodieClient {
private static final long serialVersionUID = 1L;
private static final Logger LOG = LogManager.getLogger(HoodieCleanClient.class);
private final transient HoodieMetrics metrics;

View File

@@ -57,6 +57,7 @@ import scala.Tuple2;
*/
public class HoodieReadClient<T extends HoodieRecordPayload> implements Serializable {
private static final long serialVersionUID = 1L;
private static final Logger LOG = LogManager.getLogger(HoodieReadClient.class);
/**

View File

@@ -93,6 +93,7 @@ import scala.Tuple2;
*/
public class HoodieWriteClient<T extends HoodieRecordPayload> extends AbstractHoodieWriteClient<T> {
private static final long serialVersionUID = 1L;
private static final Logger LOG = LogManager.getLogger(HoodieWriteClient.class);
private static final String LOOKUP_STR = "lookup";
private final boolean rollbackPending;

View File

@@ -35,6 +35,7 @@ import java.util.Random;
*/
public class WriteStatus implements Serializable {
private static final long serialVersionUID = 1L;
private static final long RANDOM_SEED = 9038412832L;
private final HashMap<HoodieKey, Throwable> errors = new HashMap<>();

View File

@@ -61,7 +61,7 @@ public class HoodieHBaseIndexConfig extends DefaultHoodieConfig {
* value based on global indexing throughput needs and most importantly, how much the HBase installation in use is
* able to tolerate without Region Servers going down.
*/
public static String HBASE_MAX_QPS_PER_REGION_SERVER_PROP = "hoodie.index.hbase.max.qps.per.region.server";
public static final String HBASE_MAX_QPS_PER_REGION_SERVER_PROP = "hoodie.index.hbase.max.qps.per.region.server";
/**
* Default batch size, used only for Get, but computed for Put.
*/

View File

@@ -39,7 +39,7 @@ import java.util.stream.Collectors;
*/
public class BoundedPartitionAwareCompactionStrategy extends DayBasedCompactionStrategy {
SimpleDateFormat dateFormat = new SimpleDateFormat(datePartitionFormat);
SimpleDateFormat dateFormat = new SimpleDateFormat(DATE_PARTITION_FORMAT);
@Override
public List<HoodieCompactionOperation> orderAndFilter(HoodieWriteConfig writeConfig,

View File

@@ -39,14 +39,14 @@ import java.util.stream.Collectors;
public class DayBasedCompactionStrategy extends CompactionStrategy {
// For now, use SimpleDateFormat as default partition format
protected static String datePartitionFormat = "yyyy/MM/dd";
protected static final String DATE_PARTITION_FORMAT = "yyyy/MM/dd";
// Sorts compaction in LastInFirstCompacted order
protected static Comparator<String> comparator = (String leftPartition, String rightPartition) -> {
try {
leftPartition = getPartitionPathWithoutPartitionKeys(leftPartition);
rightPartition = getPartitionPathWithoutPartitionKeys(rightPartition);
Date left = new SimpleDateFormat(datePartitionFormat, Locale.ENGLISH).parse(leftPartition);
Date right = new SimpleDateFormat(datePartitionFormat, Locale.ENGLISH).parse(rightPartition);
Date left = new SimpleDateFormat(DATE_PARTITION_FORMAT, Locale.ENGLISH).parse(leftPartition);
Date right = new SimpleDateFormat(DATE_PARTITION_FORMAT, Locale.ENGLISH).parse(rightPartition);
return left.after(right) ? -1 : right.after(left) ? 1 : 0;
} catch (ParseException e) {
throw new HoodieException("Invalid Partition Date Format", e);