Fixes HUDI-38: Reduce memory overhead of WriteStatus
- For implicit indexes (e.g BloomIndex), don't buffer up written records - By default, only collect 10% of failing records to avoid OOMs - Improves debuggability via above, since data errors can now show up in collect() - Unit tests & fixing subclasses & adjusting tests
This commit is contained in:
committed by
vinoth chandar
parent
e56c1612e4
commit
f1410bfdcd
@@ -25,12 +25,15 @@ import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Random;
|
||||
|
||||
/**
|
||||
* Status of a write operation.
|
||||
*/
|
||||
public class WriteStatus implements Serializable {
|
||||
|
||||
private static final long RANDOM_SEED = 9038412832L;
|
||||
|
||||
private final HashMap<HoodieKey, Throwable> errors = new HashMap<>();
|
||||
|
||||
private final List<HoodieRecord> writtenRecords = new ArrayList<>();
|
||||
@@ -48,6 +51,16 @@ public class WriteStatus implements Serializable {
|
||||
private long totalRecords = 0;
|
||||
private long totalErrorRecords = 0;
|
||||
|
||||
private final double failureFraction;
|
||||
private final boolean trackSuccessRecords;
|
||||
private final transient Random random;
|
||||
|
||||
public WriteStatus(Boolean trackSuccessRecords, Double failureFraction) {
|
||||
this.trackSuccessRecords = trackSuccessRecords;
|
||||
this.failureFraction = failureFraction;
|
||||
this.random = new Random(RANDOM_SEED);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark write as success, optionally using given parameters for the purpose of calculating some
|
||||
* aggregate metrics. This method is not meant to cache passed arguments, since WriteStatus
|
||||
@@ -58,9 +71,10 @@ public class WriteStatus implements Serializable {
|
||||
* @param optionalRecordMetadata optional metadata related to data contained in {@link
|
||||
* HoodieRecord} before deflation.
|
||||
*/
|
||||
public void markSuccess(HoodieRecord record,
|
||||
Optional<Map<String, String>> optionalRecordMetadata) {
|
||||
writtenRecords.add(record);
|
||||
public void markSuccess(HoodieRecord record, Optional<Map<String, String>> optionalRecordMetadata) {
|
||||
if (trackSuccessRecords) {
|
||||
writtenRecords.add(record);
|
||||
}
|
||||
totalRecords++;
|
||||
}
|
||||
|
||||
@@ -74,10 +88,11 @@ public class WriteStatus implements Serializable {
|
||||
* @param optionalRecordMetadata optional metadata related to data contained in {@link
|
||||
* HoodieRecord} before deflation.
|
||||
*/
|
||||
public void markFailure(HoodieRecord record, Throwable t,
|
||||
Optional<Map<String, String>> optionalRecordMetadata) {
|
||||
failedRecords.add(record);
|
||||
errors.put(record.getKey(), t);
|
||||
public void markFailure(HoodieRecord record, Throwable t, Optional<Map<String, String>> optionalRecordMetadata) {
|
||||
if (random.nextDouble() <= failureFraction) {
|
||||
failedRecords.add(record);
|
||||
errors.put(record.getKey(), t);
|
||||
}
|
||||
totalRecords++;
|
||||
totalErrorRecords++;
|
||||
}
|
||||
|
||||
@@ -52,6 +52,12 @@ public class HoodieMemoryConfig extends DefaultHoodieConfig {
|
||||
// Default file path prefix for spillable file
|
||||
public static final String DEFAULT_SPILLABLE_MAP_BASE_PATH = "/tmp/";
|
||||
|
||||
// Property to control how what fraction of the failed record, exceptions we report back to driver.
|
||||
public static final String WRITESTATUS_FAILURE_FRACTION_PROP = "hoodie.memory.writestatus.failure.fraction";
|
||||
// Default is 10%. If set to 100%, with lot of failures, this can cause memory pressure, cause OOMs and
|
||||
// mask actual data errors.
|
||||
public static final double DEFAULT_WRITESTATUS_FAILURE_FRACTION = 0.1;
|
||||
|
||||
private HoodieMemoryConfig(Properties props) {
|
||||
super(props);
|
||||
}
|
||||
@@ -97,6 +103,11 @@ public class HoodieMemoryConfig extends DefaultHoodieConfig {
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder withWriteStatusFailureFraction(double failureFraction) {
|
||||
props.setProperty(WRITESTATUS_FAILURE_FRACTION_PROP, String.valueOf(failureFraction));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Dynamic calculation of max memory to use for for spillable map. user.available.memory = spark.executor.memory *
|
||||
* (1 - spark.memory.fraction) spillable.available.memory = user.available.memory * hoodie.memory.fraction. Anytime
|
||||
@@ -118,8 +129,8 @@ public class HoodieMemoryConfig extends DefaultHoodieConfig {
|
||||
if (SparkEnv.get() != null) {
|
||||
// 1 GB is the default conf used by Spark, look at SparkContext.scala
|
||||
long executorMemoryInBytes = Utils.memoryStringToMb(SparkEnv.get().conf().get(SPARK_EXECUTOR_MEMORY_PROP,
|
||||
DEFAULT_SPARK_EXECUTOR_MEMORY_MB)) * 1024
|
||||
* 1024L;
|
||||
DEFAULT_SPARK_EXECUTOR_MEMORY_MB)) * 1024
|
||||
* 1024L;
|
||||
// 0.6 is the default value used by Spark,
|
||||
// look at {@link
|
||||
// https://github.com/apache/spark/blob/master/core/src/main/scala/org/apache/spark/SparkConf.scala#L507}
|
||||
@@ -159,6 +170,9 @@ public class HoodieMemoryConfig extends DefaultHoodieConfig {
|
||||
setDefaultOnCondition(props,
|
||||
!props.containsKey(SPILLABLE_MAP_BASE_PATH_PROP),
|
||||
SPILLABLE_MAP_BASE_PATH_PROP, DEFAULT_SPILLABLE_MAP_BASE_PATH);
|
||||
setDefaultOnCondition(props,
|
||||
!props.containsKey(WRITESTATUS_FAILURE_FRACTION_PROP),
|
||||
WRITESTATUS_FAILURE_FRACTION_PROP, String.valueOf(DEFAULT_WRITESTATUS_FAILURE_FRACTION));
|
||||
return config;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -406,21 +406,21 @@ public class HoodieWriteConfig extends DefaultHoodieConfig {
|
||||
}
|
||||
|
||||
public Long getMaxMemoryPerCompaction() {
|
||||
return Long
|
||||
.valueOf(
|
||||
props.getProperty(HoodieMemoryConfig.MAX_MEMORY_FOR_COMPACTION_PROP));
|
||||
return Long.valueOf(props.getProperty(HoodieMemoryConfig.MAX_MEMORY_FOR_COMPACTION_PROP));
|
||||
}
|
||||
|
||||
public int getMaxDFSStreamBufferSize() {
|
||||
return Integer
|
||||
.valueOf(
|
||||
props.getProperty(HoodieMemoryConfig.MAX_DFS_STREAM_BUFFER_SIZE_PROP));
|
||||
return Integer.valueOf(props.getProperty(HoodieMemoryConfig.MAX_DFS_STREAM_BUFFER_SIZE_PROP));
|
||||
}
|
||||
|
||||
public String getSpillableMapBasePath() {
|
||||
return props.getProperty(HoodieMemoryConfig.SPILLABLE_MAP_BASE_PATH_PROP);
|
||||
}
|
||||
|
||||
public double getWriteStatusFailureFraction() {
|
||||
return Double.valueOf(props.getProperty(HoodieMemoryConfig.WRITESTATUS_FAILURE_FRACTION_PROP));
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
|
||||
private final Properties props = new Properties();
|
||||
@@ -428,7 +428,6 @@ public class HoodieWriteConfig extends DefaultHoodieConfig {
|
||||
private boolean isStorageConfigSet = false;
|
||||
private boolean isCompactionConfigSet = false;
|
||||
private boolean isMetricsConfigSet = false;
|
||||
private boolean isAutoCommit = true;
|
||||
private boolean isMemoryConfigSet = false;
|
||||
|
||||
public Builder fromFile(File propertiesFile) throws IOException {
|
||||
|
||||
@@ -61,7 +61,9 @@ public abstract class HoodieIOHandle<T extends HoodieRecordPayload> {
|
||||
this.originalSchema = new Schema.Parser().parse(config.getSchema());
|
||||
this.writerSchema = createHoodieWriteSchema(originalSchema);
|
||||
this.timer = new HoodieTimer().startTimer();
|
||||
this.writeStatus = ReflectionUtils.loadClass(config.getWriteStatusClassName());
|
||||
this.writeStatus = (WriteStatus) ReflectionUtils.loadClass(config.getWriteStatusClassName(),
|
||||
!hoodieTable.getIndex().isImplicitWithStorage(),
|
||||
config.getWriteStatusFailureFraction());
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user