[HUDI 1308] Harden RFC-15 Implementation based on production testing (#2441)
Addresses leaks, perf degradation observed during testing. These were regressions from the original rfc-15 PoC implementation. * Pass a single instance of HoodieTableMetadata everywhere * Fix tests and add config for enabling metrics - Removed special casing of assumeDatePartitioning inside FSUtils#getAllPartitionPaths() - Consequently, IOException is never thrown and many files had to be adjusted - More diligent handling of open file handles in metadata table - Added config for controlling reuse of connections - Added config for turning off fallback to listing, so we can see tests fail - Changed all ipf listing code to cache/amortize the open/close for better performance - Timelineserver also reuses connections, for better performance - Without timelineserver, when metadata table is opened from executors, reuse is not allowed - HoodieMetadataConfig passed into HoodieTableMetadata#create as argument. - Fix TestHoodieBackedTableMetadata#testSync
This commit is contained in:
@@ -98,7 +98,7 @@ public class HoodieSnapshotCopier implements Serializable {
|
||||
LOG.info(String.format("Starting to snapshot latest version files which are also no-late-than %s.",
|
||||
latestCommitTimestamp));
|
||||
|
||||
List<String> partitions = FSUtils.getAllPartitionPaths(context, fs, baseDir, useFileListingFromMetadata, verifyMetadataFileListing, shouldAssumeDatePartitioning);
|
||||
List<String> partitions = FSUtils.getAllPartitionPaths(context, baseDir, useFileListingFromMetadata, verifyMetadataFileListing, shouldAssumeDatePartitioning);
|
||||
if (partitions.size() > 0) {
|
||||
LOG.info(String.format("The job needs to copy %d partitions.", partitions.size()));
|
||||
|
||||
|
||||
@@ -129,7 +129,7 @@ public class HoodieSnapshotExporter {
|
||||
LOG.info(String.format("Starting to snapshot latest version files which are also no-late-than %s.",
|
||||
latestCommitTimestamp));
|
||||
|
||||
final List<String> partitions = getPartitions(engineContext, fs, cfg);
|
||||
final List<String> partitions = getPartitions(engineContext, cfg);
|
||||
if (partitions.isEmpty()) {
|
||||
throw new HoodieSnapshotExporterException("The source dataset has 0 partition to snapshot.");
|
||||
}
|
||||
@@ -154,8 +154,8 @@ public class HoodieSnapshotExporter {
|
||||
return latestCommit.isPresent() ? Option.of(latestCommit.get().getTimestamp()) : Option.empty();
|
||||
}
|
||||
|
||||
private List<String> getPartitions(HoodieEngineContext engineContext, FileSystem fs, Config cfg) throws IOException {
|
||||
return FSUtils.getAllPartitionPaths(engineContext, fs, cfg.sourceBasePath, true, false, false);
|
||||
private List<String> getPartitions(HoodieEngineContext engineContext, Config cfg) {
|
||||
return FSUtils.getAllPartitionPaths(engineContext, cfg.sourceBasePath, true, false, false);
|
||||
}
|
||||
|
||||
private void createSuccessTag(FileSystem fs, Config cfg) throws IOException {
|
||||
|
||||
@@ -87,7 +87,7 @@ public class TimelineServerPerf implements Serializable {
|
||||
public void run() throws IOException {
|
||||
JavaSparkContext jsc = UtilHelpers.buildSparkContext("hudi-view-perf-" + cfg.basePath, cfg.sparkMaster);
|
||||
HoodieSparkEngineContext engineContext = new HoodieSparkEngineContext(jsc);
|
||||
List<String> allPartitionPaths = FSUtils.getAllPartitionPaths(engineContext, timelineServer.getFs(), cfg.basePath,
|
||||
List<String> allPartitionPaths = FSUtils.getAllPartitionPaths(engineContext, cfg.basePath,
|
||||
cfg.useFileListingFromMetadata, cfg.verifyMetadataFileListing, true);
|
||||
Collections.shuffle(allPartitionPaths);
|
||||
List<String> selected = allPartitionPaths.stream().filter(p -> !p.contains("error")).limit(cfg.maxPartitions)
|
||||
|
||||
Reference in New Issue
Block a user