1
0

[HUDI-3943] Some description fixes for 0.10.1 docs (#5447)

This commit is contained in:
LiChuang
2022-04-29 06:18:56 +08:00
committed by GitHub
parent 52953c8f5e
commit 4e928a6fe1
6 changed files with 16 additions and 16 deletions

View File

@@ -87,7 +87,7 @@ public class HoodieClusteringConfig extends HoodieConfig {
.key(CLUSTERING_STRATEGY_PARAM_PREFIX + "small.file.limit") .key(CLUSTERING_STRATEGY_PARAM_PREFIX + "small.file.limit")
.defaultValue(String.valueOf(300 * 1024 * 1024L)) .defaultValue(String.valueOf(300 * 1024 * 1024L))
.sinceVersion("0.7.0") .sinceVersion("0.7.0")
.withDocumentation("Files smaller than the size specified here are candidates for clustering"); .withDocumentation("Files smaller than the size in bytes specified here are candidates for clustering");
public static final ConfigProperty<String> PARTITION_REGEX_PATTERN = ConfigProperty public static final ConfigProperty<String> PARTITION_REGEX_PATTERN = ConfigProperty
.key(CLUSTERING_STRATEGY_PARAM_PREFIX + "partition.regex.pattern") .key(CLUSTERING_STRATEGY_PARAM_PREFIX + "partition.regex.pattern")

View File

@@ -65,17 +65,17 @@ public class HoodieMemoryConfig extends HoodieConfig {
public static final ConfigProperty<Long> MAX_MEMORY_FOR_MERGE = ConfigProperty public static final ConfigProperty<Long> MAX_MEMORY_FOR_MERGE = ConfigProperty
.key("hoodie.memory.merge.max.size") .key("hoodie.memory.merge.max.size")
.defaultValue(DEFAULT_MAX_MEMORY_FOR_SPILLABLE_MAP_IN_BYTES) .defaultValue(DEFAULT_MAX_MEMORY_FOR_SPILLABLE_MAP_IN_BYTES)
.withDocumentation("Maximum amount of memory used for merge operations, before spilling to local storage."); .withDocumentation("Maximum amount of memory used in bytes for merge operations, before spilling to local storage.");
public static final ConfigProperty<String> MAX_MEMORY_FOR_COMPACTION = ConfigProperty public static final ConfigProperty<String> MAX_MEMORY_FOR_COMPACTION = ConfigProperty
.key("hoodie.memory.compaction.max.size") .key("hoodie.memory.compaction.max.size")
.noDefaultValue() .noDefaultValue()
.withDocumentation("Maximum amount of memory used for compaction operations, before spilling to local storage."); .withDocumentation("Maximum amount of memory used in bytes for compaction operations in bytes , before spilling to local storage.");
public static final ConfigProperty<Integer> MAX_DFS_STREAM_BUFFER_SIZE = ConfigProperty public static final ConfigProperty<Integer> MAX_DFS_STREAM_BUFFER_SIZE = ConfigProperty
.key("hoodie.memory.dfs.buffer.max.size") .key("hoodie.memory.dfs.buffer.max.size")
.defaultValue(16 * 1024 * 1024) .defaultValue(16 * 1024 * 1024)
.withDocumentation("Property to control the max memory for dfs input stream buffer size"); .withDocumentation("Property to control the max memory in bytes for dfs input stream buffer size");
public static final ConfigProperty<String> SPILLABLE_MAP_BASE_PATH = ConfigProperty public static final ConfigProperty<String> SPILLABLE_MAP_BASE_PATH = ConfigProperty
.key("hoodie.memory.spillable.map.path") .key("hoodie.memory.spillable.map.path")

View File

@@ -42,25 +42,25 @@ public class HoodieStorageConfig extends HoodieConfig {
public static final ConfigProperty<String> PARQUET_MAX_FILE_SIZE = ConfigProperty public static final ConfigProperty<String> PARQUET_MAX_FILE_SIZE = ConfigProperty
.key("hoodie.parquet.max.file.size") .key("hoodie.parquet.max.file.size")
.defaultValue(String.valueOf(120 * 1024 * 1024)) .defaultValue(String.valueOf(120 * 1024 * 1024))
.withDocumentation("Target size for parquet files produced by Hudi write phases. " .withDocumentation("Target size in bytes for parquet files produced by Hudi write phases. "
+ "For DFS, this needs to be aligned with the underlying filesystem block size for optimal performance."); + "For DFS, this needs to be aligned with the underlying filesystem block size for optimal performance.");
public static final ConfigProperty<String> PARQUET_BLOCK_SIZE = ConfigProperty public static final ConfigProperty<String> PARQUET_BLOCK_SIZE = ConfigProperty
.key("hoodie.parquet.block.size") .key("hoodie.parquet.block.size")
.defaultValue(String.valueOf(120 * 1024 * 1024)) .defaultValue(String.valueOf(120 * 1024 * 1024))
.withDocumentation("Parquet RowGroup size. It's recommended to make this large enough that scan costs can be" .withDocumentation("Parquet RowGroup size in bytes. It's recommended to make this large enough that scan costs can be"
+ " amortized by packing enough column values into a single row group."); + " amortized by packing enough column values into a single row group.");
public static final ConfigProperty<String> PARQUET_PAGE_SIZE = ConfigProperty public static final ConfigProperty<String> PARQUET_PAGE_SIZE = ConfigProperty
.key("hoodie.parquet.page.size") .key("hoodie.parquet.page.size")
.defaultValue(String.valueOf(1 * 1024 * 1024)) .defaultValue(String.valueOf(1 * 1024 * 1024))
.withDocumentation("Parquet page size. Page is the unit of read within a parquet file. " .withDocumentation("Parquet page size in bytes. Page is the unit of read within a parquet file. "
+ "Within a block, pages are compressed separately."); + "Within a block, pages are compressed separately.");
public static final ConfigProperty<String> ORC_FILE_MAX_SIZE = ConfigProperty public static final ConfigProperty<String> ORC_FILE_MAX_SIZE = ConfigProperty
.key("hoodie.orc.max.file.size") .key("hoodie.orc.max.file.size")
.defaultValue(String.valueOf(120 * 1024 * 1024)) .defaultValue(String.valueOf(120 * 1024 * 1024))
.withDocumentation("Target file size for ORC base files."); .withDocumentation("Target file size in bytes for ORC base files.");
public static final ConfigProperty<String> ORC_STRIPE_SIZE = ConfigProperty public static final ConfigProperty<String> ORC_STRIPE_SIZE = ConfigProperty
.key("hoodie.orc.stripe.size") .key("hoodie.orc.stripe.size")
@@ -75,12 +75,12 @@ public class HoodieStorageConfig extends HoodieConfig {
public static final ConfigProperty<String> HFILE_MAX_FILE_SIZE = ConfigProperty public static final ConfigProperty<String> HFILE_MAX_FILE_SIZE = ConfigProperty
.key("hoodie.hfile.max.file.size") .key("hoodie.hfile.max.file.size")
.defaultValue(String.valueOf(120 * 1024 * 1024)) .defaultValue(String.valueOf(120 * 1024 * 1024))
.withDocumentation("Target file size for HFile base files."); .withDocumentation("Target file size in bytes for HFile base files.");
public static final ConfigProperty<String> HFILE_BLOCK_SIZE = ConfigProperty public static final ConfigProperty<String> HFILE_BLOCK_SIZE = ConfigProperty
.key("hoodie.hfile.block.size") .key("hoodie.hfile.block.size")
.defaultValue(String.valueOf(1024 * 1024)) .defaultValue(String.valueOf(1024 * 1024))
.withDocumentation("Lower values increase the size of metadata tracked within HFile, but can offer potentially " .withDocumentation("Lower values increase the size in bytes of metadata tracked within HFile, but can offer potentially "
+ "faster lookup times."); + "faster lookup times.");
public static final ConfigProperty<String> LOGFILE_DATA_BLOCK_FORMAT = ConfigProperty public static final ConfigProperty<String> LOGFILE_DATA_BLOCK_FORMAT = ConfigProperty
@@ -91,13 +91,13 @@ public class HoodieStorageConfig extends HoodieConfig {
public static final ConfigProperty<String> LOGFILE_MAX_SIZE = ConfigProperty public static final ConfigProperty<String> LOGFILE_MAX_SIZE = ConfigProperty
.key("hoodie.logfile.max.size") .key("hoodie.logfile.max.size")
.defaultValue(String.valueOf(1024 * 1024 * 1024)) // 1 GB .defaultValue(String.valueOf(1024 * 1024 * 1024)) // 1 GB
.withDocumentation("LogFile max size. This is the maximum size allowed for a log file " .withDocumentation("LogFile max size in bytes. This is the maximum size allowed for a log file "
+ "before it is rolled over to the next version."); + "before it is rolled over to the next version.");
public static final ConfigProperty<String> LOGFILE_DATA_BLOCK_MAX_SIZE = ConfigProperty public static final ConfigProperty<String> LOGFILE_DATA_BLOCK_MAX_SIZE = ConfigProperty
.key("hoodie.logfile.data.block.max.size") .key("hoodie.logfile.data.block.max.size")
.defaultValue(String.valueOf(256 * 1024 * 1024)) .defaultValue(String.valueOf(256 * 1024 * 1024))
.withDocumentation("LogFile Data block max size. This is the maximum size allowed for a single data block " .withDocumentation("LogFile Data block max size in bytes. This is the maximum size allowed for a single data block "
+ "to be appended to a log file. This helps to make sure the data appended to the log file is broken up " + "to be appended to a log file. This helps to make sure the data appended to the log file is broken up "
+ "into sizable blocks to prevent from OOM errors. This size should be greater than the JVM memory."); + "into sizable blocks to prevent from OOM errors. This size should be greater than the JVM memory.");

View File

@@ -150,7 +150,7 @@ public class HoodieWriteConfig extends HoodieConfig {
.key("hoodie.table.base.file.format") .key("hoodie.table.base.file.format")
.defaultValue(HoodieFileFormat.PARQUET) .defaultValue(HoodieFileFormat.PARQUET)
.withAlternatives("hoodie.table.ro.file.format") .withAlternatives("hoodie.table.ro.file.format")
.withDocumentation(""); .withDocumentation("Base file format to store all the base file data.");
public static final ConfigProperty<String> BASE_PATH = ConfigProperty public static final ConfigProperty<String> BASE_PATH = ConfigProperty
.key("hoodie.base.path") .key("hoodie.base.path")

View File

@@ -78,7 +78,7 @@ public class FileSystemViewStorageConfig extends HoodieConfig {
public static final ConfigProperty<Long> SPILLABLE_MEMORY = ConfigProperty public static final ConfigProperty<Long> SPILLABLE_MEMORY = ConfigProperty
.key("hoodie.filesystem.view.spillable.mem") .key("hoodie.filesystem.view.spillable.mem")
.defaultValue(100 * 1024 * 1024L) // 100 MB .defaultValue(100 * 1024 * 1024L) // 100 MB
.withDocumentation("Amount of memory to be used for holding file system view, before spilling to disk."); .withDocumentation("Amount of memory to be used in bytes for holding file system view, before spilling to disk.");
public static final ConfigProperty<Double> SPILLABLE_COMPACTION_MEM_FRACTION = ConfigProperty public static final ConfigProperty<Double> SPILLABLE_COMPACTION_MEM_FRACTION = ConfigProperty
.key("hoodie.filesystem.view.spillable.compaction.mem.fraction") .key("hoodie.filesystem.view.spillable.compaction.mem.fraction")

View File

@@ -137,7 +137,7 @@ public class FlinkOptions extends HoodieConfig {
.key("index.partition.regex") .key("index.partition.regex")
.stringType() .stringType()
.defaultValue(".*") .defaultValue(".*")
.withDescription("Whether to load partitions in state if partition path matching default *"); .withDescription("Whether to load partitions in state if partition path matching default `*`");
// ------------------------------------------------------------------------ // ------------------------------------------------------------------------
// Read Options // Read Options
@@ -542,7 +542,7 @@ public class FlinkOptions extends HoodieConfig {
.key("compaction.target_io") .key("compaction.target_io")
.longType() .longType()
.defaultValue(500 * 1024L) // default 500 GB .defaultValue(500 * 1024L) // default 500 GB
.withDescription("Target IO per compaction (both read and write), default 500 GB"); .withDescription("Target IO in MB for per compaction (both read and write), default 500 GB");
public static final ConfigOption<Boolean> CLEAN_ASYNC_ENABLED = ConfigOptions public static final ConfigOption<Boolean> CLEAN_ASYNC_ENABLED = ConfigOptions
.key("clean.async.enabled") .key("clean.async.enabled")