Close Hoodie Clients which are opened to properly shutdown embedded timeline service
This commit is contained in:
committed by
vinoth chandar
parent
065173211e
commit
51d122b5c3
@@ -183,14 +183,19 @@ public class DataSourceUtils {
|
|||||||
public static JavaRDD<HoodieRecord> dropDuplicates(JavaSparkContext jssc,
|
public static JavaRDD<HoodieRecord> dropDuplicates(JavaSparkContext jssc,
|
||||||
JavaRDD<HoodieRecord> incomingHoodieRecords,
|
JavaRDD<HoodieRecord> incomingHoodieRecords,
|
||||||
HoodieWriteConfig writeConfig) throws Exception {
|
HoodieWriteConfig writeConfig) throws Exception {
|
||||||
|
HoodieReadClient client = null;
|
||||||
try {
|
try {
|
||||||
HoodieReadClient client = new HoodieReadClient<>(jssc, writeConfig);
|
client = new HoodieReadClient<>(jssc, writeConfig);
|
||||||
return client.tagLocation(incomingHoodieRecords)
|
return client.tagLocation(incomingHoodieRecords)
|
||||||
.filter(r -> !((HoodieRecord<HoodieRecordPayload>) r).isCurrentLocationKnown());
|
.filter(r -> !((HoodieRecord<HoodieRecordPayload>) r).isCurrentLocationKnown());
|
||||||
} catch (DatasetNotFoundException e) {
|
} catch (DatasetNotFoundException e) {
|
||||||
// this will be executed when there is no hoodie dataset yet
|
// this will be executed when there is no hoodie dataset yet
|
||||||
// so no dups to drop
|
// so no dups to drop
|
||||||
return incomingHoodieRecords;
|
return incomingHoodieRecords;
|
||||||
|
} finally {
|
||||||
|
if (null != client) {
|
||||||
|
client.close();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -58,7 +58,8 @@ public class HoodieCompactionAdminTool {
|
|||||||
*/
|
*/
|
||||||
public void run(JavaSparkContext jsc) throws Exception {
|
public void run(JavaSparkContext jsc) throws Exception {
|
||||||
HoodieTableMetaClient metaClient = new HoodieTableMetaClient(jsc.hadoopConfiguration(), cfg.basePath);
|
HoodieTableMetaClient metaClient = new HoodieTableMetaClient(jsc.hadoopConfiguration(), cfg.basePath);
|
||||||
CompactionAdminClient admin = new CompactionAdminClient(jsc, cfg.basePath);
|
final CompactionAdminClient admin = new CompactionAdminClient(jsc, cfg.basePath);
|
||||||
|
try {
|
||||||
final FileSystem fs = FSUtils.getFs(cfg.basePath, jsc.hadoopConfiguration());
|
final FileSystem fs = FSUtils.getFs(cfg.basePath, jsc.hadoopConfiguration());
|
||||||
if (cfg.outputPath != null && fs.exists(new Path(cfg.outputPath))) {
|
if (cfg.outputPath != null && fs.exists(new Path(cfg.outputPath))) {
|
||||||
throw new IllegalStateException("Output File Path already exists");
|
throw new IllegalStateException("Output File Path already exists");
|
||||||
@@ -83,7 +84,8 @@ public class HoodieCompactionAdminTool {
|
|||||||
break;
|
break;
|
||||||
case UNSCHEDULE_PLAN:
|
case UNSCHEDULE_PLAN:
|
||||||
List<RenameOpResult> r2 =
|
List<RenameOpResult> r2 =
|
||||||
admin.unscheduleCompactionPlan(cfg.compactionInstantTime, cfg.skipValidation, cfg.parallelism, cfg.dryRun);
|
admin
|
||||||
|
.unscheduleCompactionPlan(cfg.compactionInstantTime, cfg.skipValidation, cfg.parallelism, cfg.dryRun);
|
||||||
if (cfg.printOutput) {
|
if (cfg.printOutput) {
|
||||||
printOperationResult("Result of Unscheduling Compaction Plan :", r2);
|
printOperationResult("Result of Unscheduling Compaction Plan :", r2);
|
||||||
}
|
}
|
||||||
@@ -100,6 +102,9 @@ public class HoodieCompactionAdminTool {
|
|||||||
default:
|
default:
|
||||||
throw new IllegalStateException("Not yet implemented !!");
|
throw new IllegalStateException("Not yet implemented !!");
|
||||||
}
|
}
|
||||||
|
} finally {
|
||||||
|
admin.close();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private <T> void serializeOperationResult(FileSystem fs, T result) throws Exception {
|
private <T> void serializeOperationResult(FileSystem fs, T result) throws Exception {
|
||||||
|
|||||||
Reference in New Issue
Block a user