Avoid WriteStatus collect() call when committing batch
This commit is contained in:
committed by
vinoth chandar
parent
fa65db9c4c
commit
f999e4960c
@@ -29,6 +29,7 @@ import com.uber.hoodie.common.model.HoodieRecord;
|
||||
import com.uber.hoodie.common.model.HoodieRecordLocation;
|
||||
import com.uber.hoodie.common.model.HoodieRecordPayload;
|
||||
import com.uber.hoodie.common.model.HoodieRollingStatMetadata;
|
||||
import com.uber.hoodie.common.model.HoodieWriteStat;
|
||||
import com.uber.hoodie.common.table.HoodieTableMetaClient;
|
||||
import com.uber.hoodie.common.table.HoodieTimeline;
|
||||
import com.uber.hoodie.common.table.timeline.HoodieActiveTimeline;
|
||||
@@ -376,20 +377,19 @@ public class HoodieCopyOnWriteTable<T extends HoodieRecordPayload> extends Hoodi
|
||||
/**
|
||||
* Finalize the written data files
|
||||
*
|
||||
* @param writeStatuses List of WriteStatus
|
||||
* @param stats List of HoodieWriteStats
|
||||
* @return number of files finalized
|
||||
*/
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public void finalizeWrite(JavaSparkContext jsc, List<WriteStatus> writeStatuses)
|
||||
public void finalizeWrite(JavaSparkContext jsc, List<HoodieWriteStat> stats)
|
||||
throws HoodieIOException {
|
||||
|
||||
super.finalizeWrite(jsc, writeStatuses);
|
||||
super.finalizeWrite(jsc, stats);
|
||||
|
||||
if (config.shouldUseTempFolderForCopyOnWrite()) {
|
||||
// This is to rename each data file from temporary path to its final location
|
||||
jsc.parallelize(writeStatuses, config.getFinalizeWriteParallelism())
|
||||
.map(status -> status.getStat())
|
||||
jsc.parallelize(stats, config.getFinalizeWriteParallelism())
|
||||
.foreach(writeStat -> {
|
||||
final FileSystem fs = getMetaClient().getFs();
|
||||
final Path finalPath = new Path(config.getBasePath(), writeStat.getPath());
|
||||
|
||||
@@ -295,10 +295,10 @@ public class HoodieMergeOnReadTable<T extends HoodieRecordPayload> extends
|
||||
}
|
||||
|
||||
@Override
|
||||
public void finalizeWrite(JavaSparkContext jsc, List<WriteStatus> writeStatuses)
|
||||
public void finalizeWrite(JavaSparkContext jsc, List<HoodieWriteStat> stats)
|
||||
throws HoodieIOException {
|
||||
// delegate to base class for MOR tables
|
||||
super.finalizeWrite(jsc, writeStatuses);
|
||||
super.finalizeWrite(jsc, stats);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -23,6 +23,7 @@ import com.uber.hoodie.common.HoodieCleanStat;
|
||||
import com.uber.hoodie.common.HoodieRollbackStat;
|
||||
import com.uber.hoodie.common.model.HoodieRecord;
|
||||
import com.uber.hoodie.common.model.HoodieRecordPayload;
|
||||
import com.uber.hoodie.common.model.HoodieWriteStat;
|
||||
import com.uber.hoodie.common.table.HoodieTableMetaClient;
|
||||
import com.uber.hoodie.common.table.HoodieTimeline;
|
||||
import com.uber.hoodie.common.table.TableFileSystemView;
|
||||
@@ -252,15 +253,15 @@ public abstract class HoodieTable<T extends HoodieRecordPayload> implements Seri
|
||||
* Finalize the written data onto storage. Perform any final cleanups
|
||||
*
|
||||
* @param jsc Spark Context
|
||||
* @param writeStatuses List of WriteStatus
|
||||
* @param stats List of HoodieWriteStats
|
||||
* @throws HoodieIOException if some paths can't be finalized on storage
|
||||
*/
|
||||
public void finalizeWrite(JavaSparkContext jsc, List<WriteStatus> writeStatuses)
|
||||
public void finalizeWrite(JavaSparkContext jsc, List<HoodieWriteStat> stats)
|
||||
throws HoodieIOException {
|
||||
if (config.isConsistencyCheckEnabled()) {
|
||||
List<String> pathsToCheck = writeStatuses.stream()
|
||||
.map(ws -> ws.getStat().getTempPath() != null
|
||||
? ws.getStat().getTempPath() : ws.getStat().getPath())
|
||||
List<String> pathsToCheck = stats.stream()
|
||||
.map(stat -> stat.getTempPath() != null
|
||||
? stat.getTempPath() : stat.getPath())
|
||||
.collect(Collectors.toList());
|
||||
|
||||
List<String> failingPaths = new ConsistencyCheck(config.getBasePath(), pathsToCheck, jsc,
|
||||
|
||||
Reference in New Issue
Block a user