diff --git a/hoodie-client/src/test/java/com/uber/hoodie/TestCompactionAdminClient.java b/hoodie-client/src/test/java/com/uber/hoodie/TestCompactionAdminClient.java index 0ed1f7c88..220b2fb2b 100644 --- a/hoodie-client/src/test/java/com/uber/hoodie/TestCompactionAdminClient.java +++ b/hoodie-client/src/test/java/com/uber/hoodie/TestCompactionAdminClient.java @@ -158,7 +158,7 @@ public class TestCompactionAdminClient extends TestHoodieClientBase { Assert.assertTrue("Rename Files must be empty", renameFiles.isEmpty()); } expRenameFiles.entrySet().stream().forEach(r -> { - System.out.println("Key :" + r.getKey() + " renamed to " + r.getValue() + " rolled back to " + logger.info("Key :" + r.getKey() + " renamed to " + r.getValue() + " rolled back to " + renameFilesFromUndo.get(r.getKey())); }); diff --git a/hoodie-client/src/test/java/com/uber/hoodie/common/HoodieClientTestUtils.java b/hoodie-client/src/test/java/com/uber/hoodie/common/HoodieClientTestUtils.java index e77653e53..ebd05d3b8 100644 --- a/hoodie-client/src/test/java/com/uber/hoodie/common/HoodieClientTestUtils.java +++ b/hoodie-client/src/test/java/com/uber/hoodie/common/HoodieClientTestUtils.java @@ -50,6 +50,8 @@ import org.apache.avro.Schema; import org.apache.avro.generic.GenericRecord; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; import org.apache.parquet.avro.AvroSchemaConverter; import org.apache.parquet.hadoop.ParquetWriter; import org.apache.parquet.hadoop.metadata.CompressionCodecName; @@ -64,6 +66,7 @@ import org.apache.spark.sql.SQLContext; */ public class HoodieClientTestUtils { + private static final transient Logger log = LogManager.getLogger(HoodieClientTestUtils.class); public static List collectStatuses(Iterator> statusListItr) { List statuses = new ArrayList<>(); @@ -137,7 +140,7 @@ public class HoodieClientTestUtils { try { HashMap paths = getLatestFileIDsToFullPath(basePath, commitTimeline, Arrays.asList(commitInstant)); - System.out.println("Path :" + paths.values()); + log.info("Path :" + paths.values()); return sqlContext.read().parquet(paths.values().toArray(new String[paths.size()])) .filter(String.format("%s ='%s'", HoodieRecord.COMMIT_TIME_METADATA_FIELD, commitTime)); } catch (Exception e) { diff --git a/hoodie-client/src/test/java/com/uber/hoodie/func/TestUpdateMapFunction.java b/hoodie-client/src/test/java/com/uber/hoodie/func/TestUpdateMapFunction.java index d3c78a8fd..064021e65 100644 --- a/hoodie-client/src/test/java/com/uber/hoodie/func/TestUpdateMapFunction.java +++ b/hoodie-client/src/test/java/com/uber/hoodie/func/TestUpdateMapFunction.java @@ -80,7 +80,6 @@ public class TestUpdateMapFunction implements Serializable { public void testSchemaEvolutionOnUpdate() throws Exception { // Create a bunch of records with a old version of schema final HoodieWriteConfig config = makeHoodieClientConfig("/exampleSchema.txt"); - System.out.println("JSC =" + jsc); final HoodieCopyOnWriteTable table = new HoodieCopyOnWriteTable(config, jsc); final List statuses = jsc.parallelize(Arrays.asList(1)).map(x -> { diff --git a/hoodie-client/src/test/java/com/uber/hoodie/table/TestCopyOnWriteTable.java b/hoodie-client/src/test/java/com/uber/hoodie/table/TestCopyOnWriteTable.java index 822ec9378..76c12d87c 100644 --- a/hoodie-client/src/test/java/com/uber/hoodie/table/TestCopyOnWriteTable.java +++ b/hoodie-client/src/test/java/com/uber/hoodie/table/TestCopyOnWriteTable.java @@ -50,6 +50,8 @@ import java.util.UUID; import org.apache.avro.generic.GenericRecord; import org.apache.commons.io.IOUtils; import org.apache.hadoop.fs.Path; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; import org.apache.parquet.avro.AvroReadSupport; import org.apache.parquet.hadoop.ParquetReader; import org.apache.spark.TaskContext; @@ -64,6 +66,8 @@ import scala.Tuple2; public class TestCopyOnWriteTable { + protected static Logger log = LogManager.getLogger(TestCopyOnWriteTable.class); + private String basePath = null; private transient JavaSparkContext jsc = null; @@ -378,7 +382,7 @@ public class TestCopyOnWriteTable { int counts = 0; for (File file : new File(basePath + "/2016/01/31").listFiles()) { if (file.getName().endsWith(".parquet") && FSUtils.getCommitTime(file.getName()).equals(commitTime)) { - System.out.println(file.getName() + "-" + file.length()); + log.info(file.getName() + "-" + file.length()); counts++; } } diff --git a/hoodie-common/src/main/java/com/uber/hoodie/common/util/RocksDBDAO.java b/hoodie-common/src/main/java/com/uber/hoodie/common/util/RocksDBDAO.java index 29cd8d6c7..03227964a 100644 --- a/hoodie-common/src/main/java/com/uber/hoodie/common/util/RocksDBDAO.java +++ b/hoodie-common/src/main/java/com/uber/hoodie/common/util/RocksDBDAO.java @@ -86,7 +86,7 @@ public class RocksDBDAO { */ private void init() throws HoodieException { try { - log.warn("DELETING RocksDB persisted at " + rocksDBBasePath); + log.info("DELETING RocksDB persisted at " + rocksDBBasePath); FileUtils.deleteDirectory(new File(rocksDBBasePath)); managedHandlesMap = new ConcurrentHashMap<>(); diff --git a/hoodie-common/src/test/java/com/uber/hoodie/common/table/view/HoodieTableFileSystemViewTest.java b/hoodie-common/src/test/java/com/uber/hoodie/common/table/view/HoodieTableFileSystemViewTest.java index cd26a2ba2..fbfa214a7 100644 --- a/hoodie-common/src/test/java/com/uber/hoodie/common/table/view/HoodieTableFileSystemViewTest.java +++ b/hoodie-common/src/test/java/com/uber/hoodie/common/table/view/HoodieTableFileSystemViewTest.java @@ -56,6 +56,8 @@ import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; @@ -65,6 +67,8 @@ import org.junit.rules.TemporaryFolder; @SuppressWarnings("ResultOfMethodCallIgnored") public class HoodieTableFileSystemViewTest { + private static final transient Logger log = LogManager.getLogger(HoodieTableFileSystemViewTest.class); + private static String TEST_WRITE_TOKEN = "1-0-1"; protected HoodieTableMetaClient metaClient; @@ -502,7 +506,7 @@ public class HoodieTableFileSystemViewTest { roView.getAllDataFiles(partitionPath); fileSliceList = rtView.getLatestFileSlices(partitionPath).collect(Collectors.toList()); - System.out.println("FILESLICE LIST=" + fileSliceList); + log.info("FILESLICE LIST=" + fileSliceList); dataFiles = fileSliceList.stream().map(FileSlice::getDataFile) .filter(Optional::isPresent).map(Optional::get).collect(Collectors.toList()); assertEquals("Expect only one data-files in latest view as there is only one file-group", 1, dataFiles.size()); diff --git a/hoodie-common/src/test/java/com/uber/hoodie/common/table/view/IncrementalFSViewSyncTest.java b/hoodie-common/src/test/java/com/uber/hoodie/common/table/view/IncrementalFSViewSyncTest.java index a57ebad14..7e73bce43 100644 --- a/hoodie-common/src/test/java/com/uber/hoodie/common/table/view/IncrementalFSViewSyncTest.java +++ b/hoodie-common/src/test/java/com/uber/hoodie/common/table/view/IncrementalFSViewSyncTest.java @@ -61,6 +61,8 @@ import java.util.UUID; import java.util.stream.Collectors; import java.util.stream.IntStream; import org.apache.hadoop.fs.Path; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; @@ -69,6 +71,8 @@ import org.junit.rules.TemporaryFolder; public class IncrementalFSViewSyncTest { + private static final transient Logger log = LogManager.getLogger(IncrementalFSViewSyncTest.class); + private static String TEST_WRITE_TOKEN = "1-0-1"; protected HoodieTableMetaClient metaClient; @@ -344,7 +348,7 @@ public class IncrementalFSViewSyncTest { Assert.assertEquals(newCleanerInstants.size(), cleanedInstants.size()); long initialFileSlices = partitions.stream().mapToLong(p -> view.getAllFileSlices(p).count()).findAny().getAsLong(); long exp = initialFileSlices; - System.out.println("Initial File Slices :" + exp); + log.info("Initial File Slices :" + exp); for (int idx = 0; idx < newCleanerInstants.size(); idx++) { String instant = cleanedInstants.get(idx); try { @@ -361,8 +365,8 @@ public class IncrementalFSViewSyncTest { Assert.assertEquals(State.COMPLETED, view.getLastInstant().get().getState()); Assert.assertEquals(HoodieTimeline.CLEAN_ACTION, view.getLastInstant().get().getAction()); partitions.forEach(p -> { - System.out.println("PARTTITION : " + p); - System.out.println("\tFileSlices :" + view.getAllFileSlices(p).collect(Collectors.toList())); + log.info("PARTTITION : " + p); + log.info("\tFileSlices :" + view.getAllFileSlices(p).collect(Collectors.toList())); }); partitions.forEach(p -> Assert.assertEquals(fileIdsPerPartition.size(), view.getLatestFileSlices(p).count())); @@ -404,7 +408,7 @@ public class IncrementalFSViewSyncTest { initialFileSlices - ((idx + 1) * fileIdsPerPartition.size()); view.sync(); Assert.assertTrue(view.getLastInstant().isPresent()); - System.out.println("Last Instant is :" + view.getLastInstant().get()); + log.info("Last Instant is :" + view.getLastInstant().get()); if (isRestore) { Assert.assertEquals(newRestoreInstants.get(idx), view.getLastInstant().get().getTimestamp()); Assert.assertEquals(isRestore ? HoodieTimeline.RESTORE_ACTION : HoodieTimeline.ROLLBACK_ACTION, @@ -645,7 +649,7 @@ public class IncrementalFSViewSyncTest { int multiple = begin; for (int idx = 0; idx < instants.size(); idx++) { String instant = instants.get(idx); - System.out.println("Adding instant=" + instant); + log.info("Adding instant=" + instant); HoodieInstant lastInstant = lastInstants.get(idx); // Add a non-empty ingestion to COW table List filePaths = addInstant(metaClient, instant, deltaCommit, diff --git a/hoodie-common/src/test/java/com/uber/hoodie/common/util/TestRocksDBManager.java b/hoodie-common/src/test/java/com/uber/hoodie/common/util/TestRocksDBManager.java index 7bd3cb309..5b04fe3d8 100644 --- a/hoodie-common/src/test/java/com/uber/hoodie/common/util/TestRocksDBManager.java +++ b/hoodie-common/src/test/java/com/uber/hoodie/common/util/TestRocksDBManager.java @@ -29,17 +29,12 @@ import java.util.Objects; import java.util.UUID; import java.util.stream.Collectors; import java.util.stream.IntStream; -import org.apache.log4j.Level; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Test; public class TestRocksDBManager { - static { - RocksDBDAO.log.setLevel(Level.INFO); - } - private static RocksDBDAO dbManager; @AfterClass