[HUDI-4038] Avoid calling getDataSize after every record written (#5497)
- getDataSize has non-trivial overhead in the current ParquetWriter impl, requiring traversal of already composed Column Groups in memory. Instead we can sample these calls to getDataSize to amortize its cost. Co-authored-by: sivabalan <n.siva.b@gmail.com>
This commit is contained in:
@@ -19,11 +19,7 @@
|
||||
package org.apache.hudi.io.storage.row;
|
||||
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hudi.common.fs.FSUtils;
|
||||
import org.apache.hudi.common.fs.HoodieWrapperFileSystem;
|
||||
|
||||
import org.apache.parquet.hadoop.ParquetFileWriter;
|
||||
import org.apache.parquet.hadoop.ParquetWriter;
|
||||
import org.apache.hudi.io.storage.HoodieBaseParquetWriter;
|
||||
import org.apache.spark.sql.catalyst.InternalRow;
|
||||
|
||||
import java.io.IOException;
|
||||
@@ -31,32 +27,16 @@ import java.io.IOException;
|
||||
/**
|
||||
* Parquet's impl of {@link HoodieInternalRowFileWriter} to write {@link InternalRow}s.
|
||||
*/
|
||||
public class HoodieInternalRowParquetWriter extends ParquetWriter<InternalRow>
|
||||
public class HoodieInternalRowParquetWriter extends HoodieBaseParquetWriter<InternalRow>
|
||||
implements HoodieInternalRowFileWriter {
|
||||
|
||||
private final Path file;
|
||||
private final HoodieWrapperFileSystem fs;
|
||||
private final long maxFileSize;
|
||||
private final HoodieRowParquetWriteSupport writeSupport;
|
||||
|
||||
public HoodieInternalRowParquetWriter(Path file, HoodieRowParquetConfig parquetConfig)
|
||||
throws IOException {
|
||||
super(HoodieWrapperFileSystem.convertToHoodiePath(file, parquetConfig.getHadoopConf()),
|
||||
ParquetFileWriter.Mode.CREATE, parquetConfig.getWriteSupport(), parquetConfig.getCompressionCodecName(),
|
||||
parquetConfig.getBlockSize(), parquetConfig.getPageSize(), parquetConfig.getPageSize(),
|
||||
DEFAULT_IS_DICTIONARY_ENABLED, DEFAULT_IS_VALIDATING_ENABLED,
|
||||
DEFAULT_WRITER_VERSION, FSUtils.registerFileSystem(file, parquetConfig.getHadoopConf()));
|
||||
this.file = HoodieWrapperFileSystem.convertToHoodiePath(file, parquetConfig.getHadoopConf());
|
||||
this.fs = (HoodieWrapperFileSystem) this.file.getFileSystem(FSUtils.registerFileSystem(file,
|
||||
parquetConfig.getHadoopConf()));
|
||||
this.maxFileSize = parquetConfig.getMaxFileSize()
|
||||
+ Math.round(parquetConfig.getMaxFileSize() * parquetConfig.getCompressionRatio());
|
||||
this.writeSupport = parquetConfig.getWriteSupport();
|
||||
}
|
||||
super(file, parquetConfig);
|
||||
|
||||
@Override
|
||||
public boolean canWrite() {
|
||||
return getDataSize() < maxFileSize;
|
||||
this.writeSupport = parquetConfig.getWriteSupport();
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -69,9 +49,4 @@ public class HoodieInternalRowParquetWriter extends ParquetWriter<InternalRow>
|
||||
public void writeRow(InternalRow row) throws IOException {
|
||||
super.write(row);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
super.close();
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user