[checkstyle] Add ConstantName java checkstyle rule (#1066)
* add SimplifyBooleanExpression java checkstyle rule * collapse empty tags in scalastyle file
This commit is contained in:
@@ -38,7 +38,7 @@ import java.io.IOException;
|
||||
*/
|
||||
public class HdfsTestService {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(HdfsTestService.class);
|
||||
private static final Logger LOG = LogManager.getLogger(HdfsTestService.class);
|
||||
|
||||
/**
|
||||
* Configuration settings.
|
||||
@@ -72,7 +72,7 @@ public class HdfsTestService {
|
||||
// If clean, then remove the work dir so we can start fresh.
|
||||
String localDFSLocation = getDFSLocation(workDir);
|
||||
if (format) {
|
||||
logger.info("Cleaning HDFS cluster data at: " + localDFSLocation + " and starting fresh.");
|
||||
LOG.info("Cleaning HDFS cluster data at: " + localDFSLocation + " and starting fresh.");
|
||||
File file = new File(localDFSLocation);
|
||||
FileIOUtils.deleteDirectory(file);
|
||||
}
|
||||
@@ -83,12 +83,12 @@ public class HdfsTestService {
|
||||
datanodePort, datanodeIpcPort, datanodeHttpPort);
|
||||
miniDfsCluster = new MiniDFSCluster.Builder(hadoopConf).numDataNodes(1).format(format).checkDataNodeAddrConfig(true)
|
||||
.checkDataNodeHostConfig(true).build();
|
||||
logger.info("HDFS Minicluster service started.");
|
||||
LOG.info("HDFS Minicluster service started.");
|
||||
return miniDfsCluster;
|
||||
}
|
||||
|
||||
public void stop() throws IOException {
|
||||
logger.info("HDFS Minicluster service being shut down.");
|
||||
LOG.info("HDFS Minicluster service being shut down.");
|
||||
miniDfsCluster.shutdown();
|
||||
miniDfsCluster = null;
|
||||
hadoopConf = null;
|
||||
@@ -132,7 +132,7 @@ public class HdfsTestService {
|
||||
private static Configuration configureDFSCluster(Configuration config, String localDFSLocation, String bindIP,
|
||||
int namenodeRpcPort, int namenodeHttpPort, int datanodePort, int datanodeIpcPort, int datanodeHttpPort) {
|
||||
|
||||
logger.info("HDFS force binding to ip: " + bindIP);
|
||||
LOG.info("HDFS force binding to ip: " + bindIP);
|
||||
config.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + bindIP + ":" + namenodeRpcPort);
|
||||
config.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, bindIP + ":" + datanodePort);
|
||||
config.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, bindIP + ":" + datanodeIpcPort);
|
||||
|
||||
@@ -53,7 +53,7 @@ import java.net.Socket;
|
||||
*/
|
||||
public class ZookeeperTestService {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(ZookeeperTestService.class);
|
||||
private static final Logger LOG = LogManager.getLogger(ZookeeperTestService.class);
|
||||
|
||||
private static final int TICK_TIME = 2000;
|
||||
private static final int CONNECTION_TIMEOUT = 30000;
|
||||
@@ -103,7 +103,7 @@ public class ZookeeperTestService {
|
||||
|
||||
// NOTE: Changed from the original, where InetSocketAddress was
|
||||
// originally created to bind to the wildcard IP, we now configure it.
|
||||
logger.info("Zookeeper force binding to: " + this.bindIP);
|
||||
LOG.info("Zookeeper force binding to: " + this.bindIP);
|
||||
standaloneServerFactory.configure(new InetSocketAddress(bindIP, clientPort), 1000);
|
||||
|
||||
// Start up this ZK server
|
||||
@@ -120,7 +120,7 @@ public class ZookeeperTestService {
|
||||
}
|
||||
|
||||
started = true;
|
||||
logger.info("Zookeeper Minicluster service started on client port: " + clientPort);
|
||||
LOG.info("Zookeeper Minicluster service started on client port: " + clientPort);
|
||||
return zooKeeperServer;
|
||||
}
|
||||
|
||||
@@ -139,7 +139,7 @@ public class ZookeeperTestService {
|
||||
standaloneServerFactory = null;
|
||||
zooKeeperServer = null;
|
||||
|
||||
logger.info("Zookeeper Minicluster service shut down.");
|
||||
LOG.info("Zookeeper Minicluster service shut down.");
|
||||
}
|
||||
|
||||
private void recreateDir(File dir, boolean clean) throws IOException {
|
||||
@@ -221,7 +221,7 @@ public class ZookeeperTestService {
|
||||
}
|
||||
} catch (IOException e) {
|
||||
// ignore as this is expected
|
||||
logger.info("server " + hostname + ":" + port + " not up " + e);
|
||||
LOG.info("server " + hostname + ":" + port + " not up " + e);
|
||||
}
|
||||
|
||||
if (System.currentTimeMillis() > start + timeout) {
|
||||
|
||||
@@ -496,7 +496,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
|
||||
// Write out a length that does not confirm with the content
|
||||
outputStream.writeLong(1000);
|
||||
outputStream.writeInt(HoodieLogBlockType.AVRO_DATA_BLOCK.ordinal());
|
||||
outputStream.writeInt(HoodieLogFormat.currentVersion);
|
||||
outputStream.writeInt(HoodieLogFormat.CURRENT_VERSION);
|
||||
// Write out a length that does not confirm with the content
|
||||
outputStream.writeLong(500);
|
||||
// Write out some bytes
|
||||
@@ -524,7 +524,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
|
||||
// Write out a length that does not confirm with the content
|
||||
outputStream.writeLong(1000);
|
||||
outputStream.writeInt(HoodieLogBlockType.AVRO_DATA_BLOCK.ordinal());
|
||||
outputStream.writeInt(HoodieLogFormat.currentVersion);
|
||||
outputStream.writeInt(HoodieLogFormat.CURRENT_VERSION);
|
||||
// Write out a length that does not confirm with the content
|
||||
outputStream.writeLong(500);
|
||||
// Write out some bytes
|
||||
@@ -694,7 +694,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
|
||||
// Write out a length that does not confirm with the content
|
||||
outputStream.writeLong(1000);
|
||||
|
||||
outputStream.writeInt(HoodieLogFormat.currentVersion);
|
||||
outputStream.writeInt(HoodieLogFormat.CURRENT_VERSION);
|
||||
outputStream.writeInt(HoodieLogBlockType.AVRO_DATA_BLOCK.ordinal());
|
||||
|
||||
// Write out some header
|
||||
@@ -1066,7 +1066,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
|
||||
outputStream.write(HoodieLogFormat.MAGIC);
|
||||
outputStream.writeLong(1000);
|
||||
outputStream.writeInt(HoodieLogBlockType.AVRO_DATA_BLOCK.ordinal());
|
||||
outputStream.writeInt(HoodieLogFormat.currentVersion);
|
||||
outputStream.writeInt(HoodieLogFormat.CURRENT_VERSION);
|
||||
// Write out a length that does not confirm with the content
|
||||
outputStream.writeLong(100);
|
||||
outputStream.flush();
|
||||
@@ -1079,7 +1079,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
|
||||
outputStream.write(HoodieLogFormat.MAGIC);
|
||||
outputStream.writeLong(1000);
|
||||
outputStream.writeInt(HoodieLogBlockType.AVRO_DATA_BLOCK.ordinal());
|
||||
outputStream.writeInt(HoodieLogFormat.currentVersion);
|
||||
outputStream.writeInt(HoodieLogFormat.CURRENT_VERSION);
|
||||
// Write out a length that does not confirm with the content
|
||||
outputStream.writeLong(100);
|
||||
outputStream.flush();
|
||||
@@ -1099,7 +1099,7 @@ public class TestHoodieLogFormat extends HoodieCommonTestHarness {
|
||||
outputStream.write(HoodieLogFormat.MAGIC);
|
||||
outputStream.writeLong(1000);
|
||||
outputStream.writeInt(HoodieLogBlockType.AVRO_DATA_BLOCK.ordinal());
|
||||
outputStream.writeInt(HoodieLogFormat.currentVersion);
|
||||
outputStream.writeInt(HoodieLogFormat.CURRENT_VERSION);
|
||||
// Write out a length that does not confirm with the content
|
||||
outputStream.writeLong(100);
|
||||
outputStream.flush();
|
||||
|
||||
@@ -71,7 +71,7 @@ import static org.junit.Assert.assertTrue;
|
||||
@SuppressWarnings("ResultOfMethodCallIgnored")
|
||||
public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness {
|
||||
|
||||
private static final transient Logger log = LogManager.getLogger(TestHoodieTableFileSystemView.class);
|
||||
private static final transient Logger LOG = LogManager.getLogger(TestHoodieTableFileSystemView.class);
|
||||
|
||||
private static String TEST_WRITE_TOKEN = "1-0-1";
|
||||
|
||||
@@ -498,7 +498,7 @@ public class TestHoodieTableFileSystemView extends HoodieCommonTestHarness {
|
||||
roView.getAllDataFiles(partitionPath);
|
||||
|
||||
fileSliceList = rtView.getLatestFileSlices(partitionPath).collect(Collectors.toList());
|
||||
log.info("FILESLICE LIST=" + fileSliceList);
|
||||
LOG.info("FILESLICE LIST=" + fileSliceList);
|
||||
dataFiles = fileSliceList.stream().map(FileSlice::getDataFile).filter(Option::isPresent).map(Option::get)
|
||||
.collect(Collectors.toList());
|
||||
assertEquals("Expect only one data-files in latest view as there is only one file-group", 1, dataFiles.size());
|
||||
|
||||
@@ -77,7 +77,7 @@ import static org.apache.hudi.common.table.HoodieTimeline.COMPACTION_ACTION;
|
||||
*/
|
||||
public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
|
||||
|
||||
private static final transient Logger log = LogManager.getLogger(TestIncrementalFSViewSync.class);
|
||||
private static final transient Logger LOG = LogManager.getLogger(TestIncrementalFSViewSync.class);
|
||||
|
||||
private static String TEST_WRITE_TOKEN = "1-0-1";
|
||||
|
||||
@@ -318,7 +318,7 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
|
||||
Assert.assertEquals(newCleanerInstants.size(), cleanedInstants.size());
|
||||
long initialFileSlices = partitions.stream().mapToLong(p -> view.getAllFileSlices(p).count()).findAny().getAsLong();
|
||||
long exp = initialFileSlices;
|
||||
log.info("Initial File Slices :" + exp);
|
||||
LOG.info("Initial File Slices :" + exp);
|
||||
for (int idx = 0; idx < newCleanerInstants.size(); idx++) {
|
||||
String instant = cleanedInstants.get(idx);
|
||||
try {
|
||||
@@ -335,8 +335,8 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
|
||||
Assert.assertEquals(State.COMPLETED, view.getLastInstant().get().getState());
|
||||
Assert.assertEquals(HoodieTimeline.CLEAN_ACTION, view.getLastInstant().get().getAction());
|
||||
partitions.forEach(p -> {
|
||||
log.info("PARTTITION : " + p);
|
||||
log.info("\tFileSlices :" + view.getAllFileSlices(p).collect(Collectors.toList()));
|
||||
LOG.info("PARTTITION : " + p);
|
||||
LOG.info("\tFileSlices :" + view.getAllFileSlices(p).collect(Collectors.toList()));
|
||||
});
|
||||
|
||||
partitions.forEach(p -> Assert.assertEquals(fileIdsPerPartition.size(), view.getLatestFileSlices(p).count()));
|
||||
@@ -377,7 +377,7 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
|
||||
isDeltaCommit ? initialFileSlices : initialFileSlices - ((idx + 1) * fileIdsPerPartition.size());
|
||||
view.sync();
|
||||
Assert.assertTrue(view.getLastInstant().isPresent());
|
||||
log.info("Last Instant is :" + view.getLastInstant().get());
|
||||
LOG.info("Last Instant is :" + view.getLastInstant().get());
|
||||
if (isRestore) {
|
||||
Assert.assertEquals(newRestoreInstants.get(idx), view.getLastInstant().get().getTimestamp());
|
||||
Assert.assertEquals(isRestore ? HoodieTimeline.RESTORE_ACTION : HoodieTimeline.ROLLBACK_ACTION,
|
||||
@@ -615,7 +615,7 @@ public class TestIncrementalFSViewSync extends HoodieCommonTestHarness {
|
||||
int multiple = begin;
|
||||
for (int idx = 0; idx < instants.size(); idx++) {
|
||||
String instant = instants.get(idx);
|
||||
log.info("Adding instant=" + instant);
|
||||
LOG.info("Adding instant=" + instant);
|
||||
HoodieInstant lastInstant = lastInstants.get(idx);
|
||||
// Add a non-empty ingestion to COW table
|
||||
List<String> filePaths =
|
||||
|
||||
@@ -59,9 +59,9 @@ public class TestCompactionUtils extends HoodieCommonTestHarness {
|
||||
|
||||
private static String TEST_WRITE_TOKEN = "1-0-1";
|
||||
|
||||
private static final Map<String, Double> metrics =
|
||||
private static final Map<String, Double> METRICS =
|
||||
new ImmutableMap.Builder<String, Double>().put("key1", 1.0).put("key2", 3.0).build();
|
||||
private Function<Pair<String, FileSlice>, Map<String, Double>> metricsCaptureFn = (partitionFileSlice) -> metrics;
|
||||
private Function<Pair<String, FileSlice>, Map<String, Double>> metricsCaptureFn = (partitionFileSlice) -> METRICS;
|
||||
|
||||
@Before
|
||||
public void init() throws IOException {
|
||||
@@ -252,7 +252,7 @@ public class TestCompactionUtils extends HoodieCommonTestHarness {
|
||||
version == COMPACTION_METADATA_VERSION_1 ? paths.get(idx) : new Path(paths.get(idx)).getName(),
|
||||
op.getDeltaFilePaths().get(idx));
|
||||
});
|
||||
Assert.assertEquals("Metrics set", metrics, op.getMetrics());
|
||||
Assert.assertEquals("Metrics set", METRICS, op.getMetrics());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
Reference in New Issue
Block a user