1
0

[HUDI-265] Failed to delete tmp dirs created in unit tests (#928)

This commit is contained in:
leesf
2019-10-04 00:48:13 +08:00
committed by vinoth chandar
parent cef06c1e48
commit 3dedc7e5fd
33 changed files with 249 additions and 273 deletions

View File

@@ -17,7 +17,6 @@
package org.apache.hudi;
import java.io.File;
import java.io.IOException;
import java.io.Serializable;
import java.util.concurrent.ExecutorService;
@@ -29,30 +28,27 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hudi.common.HoodieClientTestUtils;
import org.apache.hudi.common.HoodieCommonTestHarness;
import org.apache.hudi.common.HoodieTestDataGenerator;
import org.apache.hudi.common.minicluster.HdfsTestService;
import org.apache.hudi.common.model.HoodieTableType;
import org.apache.hudi.common.model.HoodieTestUtils;
import org.apache.hudi.common.table.HoodieTableMetaClient;
import org.apache.hudi.common.util.FSUtils;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.SQLContext;
import org.junit.rules.TemporaryFolder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The test harness for resource initialization and cleanup.
*/
public abstract class HoodieClientTestHarness implements Serializable {
public abstract class HoodieClientTestHarness extends HoodieCommonTestHarness implements Serializable {
private static final Logger logger = LoggerFactory.getLogger(HoodieClientTestHarness.class);
protected transient JavaSparkContext jsc = null;
protected transient SQLContext sqlContext;
protected transient FileSystem fs;
protected String basePath = null;
protected TemporaryFolder folder = null;
protected transient HoodieTestDataGenerator dataGen = null;
protected transient ExecutorService executorService;
protected transient HoodieTableMetaClient metaClient;
@@ -69,7 +65,7 @@ public abstract class HoodieClientTestHarness implements Serializable {
* @throws IOException
*/
public void initResources() throws IOException {
initTempFolderAndPath();
initPath();
initSparkContexts();
initTestDataGenerator();
initFileSystem();
@@ -85,7 +81,6 @@ public abstract class HoodieClientTestHarness implements Serializable {
cleanupSparkContexts();
cleanupTestDataGenerator();
cleanupFileSystem();
cleanupTempFolderAndPath();
}
/**
@@ -129,33 +124,6 @@ public abstract class HoodieClientTestHarness implements Serializable {
}
}
/**
* Initializes a temporary folder and base path.
*
* @throws IOException
*/
protected void initTempFolderAndPath() throws IOException {
folder = new TemporaryFolder();
folder.create();
basePath = folder.getRoot().getAbsolutePath();
}
/**
* Cleanups the temporary folder and base path.
*
* @throws IOException
*/
protected void cleanupTempFolderAndPath() throws IOException {
if (basePath != null) {
new File(basePath).delete();
}
if (folder != null) {
logger.info("Explicitly removing workspace used in previously run test-case");
folder.delete();
}
}
/**
* Initializes a file system with the hadoop configuration of Spark context.
*/
@@ -229,16 +197,6 @@ public abstract class HoodieClientTestHarness implements Serializable {
dataGen = null;
}
/**
* Gets a default {@link HoodieTableType#COPY_ON_WRITE} table type.
* Sub-classes can override this method to specify a new table type.
*
* @return an instance of Hoodie table type.
*/
protected HoodieTableType getTableType() {
return HoodieTableType.COPY_ON_WRITE;
}
/**
* Initializes a distributed file system and base directory.
*

View File

@@ -51,18 +51,17 @@ public class TestCompactionAdminClient extends TestHoodieClientBase {
@Before
public void setUp() throws Exception {
initTempFolderAndPath();
initPath();
initSparkContexts();
metaClient = HoodieTestUtils.init(HoodieTestUtils.getDefaultHadoopConf(), basePath, MERGE_ON_READ);
client = new CompactionAdminClient(jsc, basePath);
}
@After
public void tearDown() throws Exception {
public void tearDown() {
client.close();
metaClient = null;
cleanupSparkContexts();
cleanupTempFolderAndPath();
}
@Test

View File

@@ -18,7 +18,6 @@
package org.apache.hudi;
import java.io.IOException;
import java.util.Arrays;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.fs.Path;
@@ -33,15 +32,14 @@ import org.junit.Test;
public class TestConsistencyGuard extends HoodieClientTestHarness {
@Before
public void setup() throws IOException {
initTempFolderAndPath();
public void setup() {
initPath();
initFileSystemWithDefaultConfiguration();
}
@After
public void tearDown() throws Exception {
cleanupFileSystem();
cleanupTempFolderAndPath();
}
@Test

View File

@@ -53,14 +53,13 @@ public class TestUpdateMapFunction extends HoodieClientTestHarness {
@Before
public void setUp() throws Exception {
initTempFolderAndPath();
initPath();
HoodieTestUtils.init(HoodieTestUtils.getDefaultHadoopConf(), basePath);
initSparkContexts("TestUpdateMapFunction");
}
@After
public void tearDown() throws Exception {
cleanupTempFolderAndPath();
public void tearDown() {
cleanupSparkContexts();
}

View File

@@ -50,7 +50,7 @@ public class TestHBaseQPSResourceAllocator extends HoodieClientTestHarness {
hbaseConfig = utility.getConnection().getConfiguration();
initSparkContexts("TestQPSResourceAllocator");
initTempFolderAndPath();
initPath();
basePath = folder.getRoot().getAbsolutePath() + QPS_TEST_SUFFIX_PATH;
// Initialize table
initMetaClient();
@@ -59,7 +59,6 @@ public class TestHBaseQPSResourceAllocator extends HoodieClientTestHarness {
@After
public void tearDown() throws Exception {
cleanupSparkContexts();
cleanupTempFolderAndPath();
cleanupMetaClient();
if (utility != null) {
utility.shutdownMiniCluster();

View File

@@ -102,7 +102,7 @@ public class TestHbaseIndex extends HoodieClientTestHarness {
jsc.hadoopConfiguration().addResource(utility.getConfiguration());
// Create a temp folder as the base path
initTempFolderAndPath();
initPath();
initTestDataGenerator();
initMetaClient();
}
@@ -110,7 +110,6 @@ public class TestHbaseIndex extends HoodieClientTestHarness {
@After
public void tearDown() throws Exception {
cleanupSparkContexts();
cleanupTempFolderAndPath();
cleanupTestDataGenerator();
cleanupMetaClient();
}

View File

@@ -35,14 +35,13 @@ public class TestHoodieIndex extends HoodieClientTestHarness {
@Before
public void setUp() throws Exception {
initSparkContexts("TestHoodieIndex");
initTempFolderAndPath();
initPath();
initMetaClient();
}
@After
public void tearDown() throws Exception {
public void tearDown() {
cleanupSparkContexts();
cleanupTempFolderAndPath();
cleanupMetaClient();
}

View File

@@ -89,7 +89,7 @@ public class TestHoodieBloomIndex extends HoodieClientTestHarness {
@Before
public void setUp() throws Exception {
initSparkContexts("TestHoodieBloomIndex");
initTempFolderAndPath();
initPath();
initFileSystem();
// We have some records to be tagged (two different partitions)
schemaStr = FileIOUtils.readAsUTFString(getClass().getResourceAsStream("/exampleSchema.txt"));
@@ -101,7 +101,6 @@ public class TestHoodieBloomIndex extends HoodieClientTestHarness {
public void tearDown() throws Exception {
cleanupSparkContexts();
cleanupFileSystem();
cleanupTempFolderAndPath();
cleanupMetaClient();
}

View File

@@ -64,7 +64,7 @@ public class TestHoodieGlobalBloomIndex extends HoodieClientTestHarness {
@Before
public void setUp() throws Exception {
initSparkContexts("TestHoodieGlobalBloomIndex");
initTempFolderAndPath();
initPath();
// We have some records to be tagged (two different partitions)
schemaStr = FileIOUtils.readAsUTFString(getClass().getResourceAsStream("/exampleSchema.txt"));
schema = HoodieAvroUtils.addMetadataFields(new Schema.Parser().parse(schemaStr));
@@ -72,9 +72,8 @@ public class TestHoodieGlobalBloomIndex extends HoodieClientTestHarness {
}
@After
public void tearDown() throws Exception {
public void tearDown() {
cleanupSparkContexts();
cleanupTempFolderAndPath();
cleanupMetaClient();
}

View File

@@ -59,7 +59,7 @@ public class TestHoodieCommitArchiveLog extends HoodieClientTestHarness {
@Before
public void init() throws Exception {
initDFS();
initTempFolderAndPath();
initPath();
initSparkContexts("TestHoodieCommitArchiveLog");
hadoopConf = dfs.getConf();
jsc.hadoopConfiguration().addResource(dfs.getConf());
@@ -70,7 +70,6 @@ public class TestHoodieCommitArchiveLog extends HoodieClientTestHarness {
@After
public void clean() throws IOException {
cleanupDFS();
cleanupTempFolderAndPath();
cleanupSparkContexts();
}

View File

@@ -60,7 +60,7 @@ public class TestHoodieCompactor extends HoodieClientTestHarness {
initSparkContexts("TestHoodieCompactor");
// Create a temp folder as the base path
initTempFolderAndPath();
initPath();
hadoopConf = HoodieTestUtils.getDefaultHadoopConf();
fs = FSUtils.getFs(basePath, hadoopConf);
metaClient = HoodieTestUtils.init(hadoopConf, basePath, HoodieTableType.MERGE_ON_READ);
@@ -71,7 +71,6 @@ public class TestHoodieCompactor extends HoodieClientTestHarness {
public void tearDown() throws Exception {
cleanupFileSystem();
cleanupTestDataGenerator();
cleanupTempFolderAndPath();
cleanupSparkContexts();
}

View File

@@ -56,7 +56,7 @@ public class TestHoodieMergeHandle extends HoodieClientTestHarness {
@Before
public void setUp() throws Exception {
initSparkContexts("TestHoodieMergeHandle");
initTempFolderAndPath();
initPath();
initFileSystem();
initTestDataGenerator();
initMetaClient();
@@ -66,7 +66,6 @@ public class TestHoodieMergeHandle extends HoodieClientTestHarness {
public void tearDown() throws Exception {
cleanupFileSystem();
cleanupTestDataGenerator();
cleanupTempFolderAndPath();
cleanupSparkContexts();
cleanupMetaClient();
}

View File

@@ -72,7 +72,7 @@ public class TestCopyOnWriteTable extends HoodieClientTestHarness {
@Before
public void setUp() throws Exception {
initSparkContexts("TestCopyOnWriteTable");
initTempFolderAndPath();
initPath();
initMetaClient();
initTestDataGenerator();
initFileSystem();
@@ -81,7 +81,6 @@ public class TestCopyOnWriteTable extends HoodieClientTestHarness {
@After
public void tearDown() throws Exception {
cleanupSparkContexts();
cleanupTempFolderAndPath();
cleanupMetaClient();
cleanupFileSystem();
cleanupTestDataGenerator();

View File

@@ -83,7 +83,7 @@ public class TestMergeOnReadTable extends HoodieClientTestHarness {
initDFS();
initSparkContexts("TestHoodieMergeOnReadTable");
jsc.hadoopConfiguration().addResource(dfs.getConf());
initTempFolderAndPath();
initPath();
dfs.mkdirs(new Path(basePath));
HoodieTestUtils.init(jsc.hadoopConfiguration(), basePath, HoodieTableType.MERGE_ON_READ);
initTestDataGenerator();
@@ -92,7 +92,6 @@ public class TestMergeOnReadTable extends HoodieClientTestHarness {
@After
public void clean() throws IOException {
cleanupDFS();
cleanupTempFolderAndPath();
cleanupSparkContexts();
cleanupTestDataGenerator();
}
@@ -968,6 +967,7 @@ public class TestMergeOnReadTable extends HoodieClientTestHarness {
Thread.sleep(1000);
// Rollback again to pretend the first rollback failed partially. This should not error our
writeClient.rollback(newCommitTime);
folder.delete();
}
}