[HUDI-1451] Support bulk insert v2 with Spark 3.0.0 (#2328)
Co-authored-by: Wenning Ding <wenningd@amazon.com> - Added support for bulk insert v2 with datasource v2 api in Spark 3.0.0.
This commit is contained in:
@@ -18,26 +18,19 @@
|
||||
|
||||
package org.apache.hudi.internal;
|
||||
|
||||
import org.apache.hudi.client.HoodieInternalWriteStatus;
|
||||
import org.apache.hudi.common.model.HoodieRecord;
|
||||
import org.apache.hudi.common.model.HoodieWriteStat;
|
||||
import org.apache.hudi.common.testutils.HoodieTestDataGenerator;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
import org.apache.hudi.table.HoodieSparkTable;
|
||||
import org.apache.hudi.table.HoodieTable;
|
||||
import org.apache.hudi.testutils.HoodieClientTestHarness;
|
||||
|
||||
import org.apache.spark.package$;
|
||||
import org.apache.spark.sql.Dataset;
|
||||
import org.apache.spark.sql.Row;
|
||||
import org.apache.spark.sql.catalyst.InternalRow;
|
||||
import org.junit.jupiter.api.AfterEach;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
import static org.apache.hudi.testutils.SparkDatasetTestUtils.ENCODER;
|
||||
import static org.apache.hudi.testutils.SparkDatasetTestUtils.STRUCT_TYPE;
|
||||
@@ -45,36 +38,13 @@ import static org.apache.hudi.testutils.SparkDatasetTestUtils.getConfigBuilder;
|
||||
import static org.apache.hudi.testutils.SparkDatasetTestUtils.getInternalRowWithError;
|
||||
import static org.apache.hudi.testutils.SparkDatasetTestUtils.getRandomRows;
|
||||
import static org.apache.hudi.testutils.SparkDatasetTestUtils.toInternalRows;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import static org.junit.jupiter.api.Assertions.assertNotNull;
|
||||
import static org.junit.jupiter.api.Assertions.assertNull;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
import static org.junit.jupiter.api.Assertions.fail;
|
||||
import static org.junit.jupiter.api.Assumptions.assumeTrue;
|
||||
|
||||
/**
|
||||
* Unit tests {@link HoodieBulkInsertDataInternalWriter}.
|
||||
*/
|
||||
public class TestHoodieBulkInsertDataInternalWriter extends HoodieClientTestHarness {
|
||||
|
||||
private static final Random RANDOM = new Random();
|
||||
|
||||
@BeforeEach
|
||||
public void setUp() throws Exception {
|
||||
// this test is only compatible with spark 2
|
||||
assumeTrue(package$.MODULE$.SPARK_VERSION().startsWith("2."));
|
||||
initSparkContexts("TestHoodieBulkInsertDataInternalWriter");
|
||||
initPath();
|
||||
initFileSystem();
|
||||
initTestDataGenerator();
|
||||
initMetaClient();
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
public void tearDown() throws Exception {
|
||||
cleanupResources();
|
||||
}
|
||||
public class TestHoodieBulkInsertDataInternalWriter extends
|
||||
HoodieBulkInsertInternalWriterTestBase {
|
||||
|
||||
@Test
|
||||
public void testDataInternalWriter() throws Exception {
|
||||
@@ -103,15 +73,15 @@ public class TestHoodieBulkInsertDataInternalWriter extends HoodieClientTestHarn
|
||||
}
|
||||
}
|
||||
|
||||
HoodieWriterCommitMessage commitMetadata = (HoodieWriterCommitMessage) writer.commit();
|
||||
List<String> fileAbsPaths = new ArrayList<>();
|
||||
List<String> fileNames = new ArrayList<>();
|
||||
BaseWriterCommitMessage commitMetadata = (BaseWriterCommitMessage) writer.commit();
|
||||
Option<List<String>> fileAbsPaths = Option.of(new ArrayList<>());
|
||||
Option<List<String>> fileNames = Option.of(new ArrayList<>());
|
||||
|
||||
// verify write statuses
|
||||
assertWriteStatuses(commitMetadata.getWriteStatuses(), batches, size, fileAbsPaths, fileNames);
|
||||
|
||||
// verify rows
|
||||
Dataset<Row> result = sqlContext.read().parquet(fileAbsPaths.toArray(new String[0]));
|
||||
Dataset<Row> result = sqlContext.read().parquet(fileAbsPaths.get().toArray(new String[0]));
|
||||
assertOutput(totalInputRows, result, instantTime, fileNames);
|
||||
}
|
||||
}
|
||||
@@ -156,15 +126,15 @@ public class TestHoodieBulkInsertDataInternalWriter extends HoodieClientTestHarn
|
||||
// expected
|
||||
}
|
||||
|
||||
HoodieWriterCommitMessage commitMetadata = (HoodieWriterCommitMessage) writer.commit();
|
||||
BaseWriterCommitMessage commitMetadata = (BaseWriterCommitMessage) writer.commit();
|
||||
|
||||
List<String> fileAbsPaths = new ArrayList<>();
|
||||
List<String> fileNames = new ArrayList<>();
|
||||
Option<List<String>> fileAbsPaths = Option.of(new ArrayList<>());
|
||||
Option<List<String>> fileNames = Option.of(new ArrayList<>());
|
||||
// verify write statuses
|
||||
assertWriteStatuses(commitMetadata.getWriteStatuses(), 1, size / 2, fileAbsPaths, fileNames);
|
||||
|
||||
// verify rows
|
||||
Dataset<Row> result = sqlContext.read().parquet(fileAbsPaths.toArray(new String[0]));
|
||||
Dataset<Row> result = sqlContext.read().parquet(fileAbsPaths.get().toArray(new String[0]));
|
||||
assertOutput(inputRows, result, instantTime, fileNames);
|
||||
}
|
||||
|
||||
@@ -176,43 +146,4 @@ public class TestHoodieBulkInsertDataInternalWriter extends HoodieClientTestHarn
|
||||
writer.write(internalRow);
|
||||
}
|
||||
}
|
||||
|
||||
private void assertWriteStatuses(List<HoodieInternalWriteStatus> writeStatuses, int batches, int size, List<String> fileAbsPaths, List<String> fileNames) {
|
||||
assertEquals(batches, writeStatuses.size());
|
||||
int counter = 0;
|
||||
for (HoodieInternalWriteStatus writeStatus : writeStatuses) {
|
||||
// verify write status
|
||||
assertEquals(writeStatus.getTotalRecords(), size);
|
||||
assertNull(writeStatus.getGlobalError());
|
||||
assertEquals(writeStatus.getFailedRowsSize(), 0);
|
||||
assertNotNull(writeStatus.getFileId());
|
||||
String fileId = writeStatus.getFileId();
|
||||
assertEquals(HoodieTestDataGenerator.DEFAULT_PARTITION_PATHS[counter % 3], writeStatus.getPartitionPath());
|
||||
fileAbsPaths.add(basePath + "/" + writeStatus.getStat().getPath());
|
||||
fileNames.add(writeStatus.getStat().getPath().substring(writeStatus.getStat().getPath().lastIndexOf('/') + 1));
|
||||
HoodieWriteStat writeStat = writeStatus.getStat();
|
||||
assertEquals(size, writeStat.getNumInserts());
|
||||
assertEquals(size, writeStat.getNumWrites());
|
||||
assertEquals(fileId, writeStat.getFileId());
|
||||
assertEquals(HoodieTestDataGenerator.DEFAULT_PARTITION_PATHS[counter++ % 3], writeStat.getPartitionPath());
|
||||
assertEquals(0, writeStat.getNumDeletes());
|
||||
assertEquals(0, writeStat.getNumUpdateWrites());
|
||||
assertEquals(0, writeStat.getTotalWriteErrors());
|
||||
}
|
||||
}
|
||||
|
||||
private void assertOutput(Dataset<Row> expectedRows, Dataset<Row> actualRows, String instantTime, List<String> fileNames) {
|
||||
// verify 3 meta fields that are filled in within create handle
|
||||
actualRows.collectAsList().forEach(entry -> {
|
||||
assertEquals(entry.get(HoodieRecord.HOODIE_META_COLUMNS_NAME_TO_POS.get(HoodieRecord.COMMIT_TIME_METADATA_FIELD)).toString(), instantTime);
|
||||
assertFalse(entry.isNullAt(HoodieRecord.HOODIE_META_COLUMNS_NAME_TO_POS.get(HoodieRecord.FILENAME_METADATA_FIELD)));
|
||||
assertTrue(fileNames.contains(entry.get(HoodieRecord.HOODIE_META_COLUMNS_NAME_TO_POS.get(HoodieRecord.FILENAME_METADATA_FIELD))));
|
||||
assertFalse(entry.isNullAt(HoodieRecord.HOODIE_META_COLUMNS_NAME_TO_POS.get(HoodieRecord.COMMIT_SEQNO_METADATA_FIELD)));
|
||||
});
|
||||
|
||||
// after trimming 2 of the meta fields, rest of the fields should match
|
||||
Dataset<Row> trimmedExpected = expectedRows.drop(HoodieRecord.COMMIT_SEQNO_METADATA_FIELD, HoodieRecord.COMMIT_TIME_METADATA_FIELD, HoodieRecord.FILENAME_METADATA_FIELD);
|
||||
Dataset<Row> trimmedActual = actualRows.drop(HoodieRecord.COMMIT_SEQNO_METADATA_FIELD, HoodieRecord.COMMIT_TIME_METADATA_FIELD, HoodieRecord.FILENAME_METADATA_FIELD);
|
||||
assertEquals(0, trimmedActual.except(trimmedExpected).count());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,61 +18,32 @@
|
||||
|
||||
package org.apache.hudi.internal;
|
||||
|
||||
import org.apache.hudi.client.HoodieInternalWriteStatus;
|
||||
import org.apache.hudi.common.model.HoodieRecord;
|
||||
import org.apache.hudi.common.model.HoodieWriteStat;
|
||||
import org.apache.hudi.common.testutils.HoodieTestDataGenerator;
|
||||
import org.apache.hudi.common.util.Option;
|
||||
import org.apache.hudi.config.HoodieWriteConfig;
|
||||
import org.apache.hudi.testutils.HoodieClientTestHarness;
|
||||
import org.apache.hudi.testutils.HoodieClientTestUtils;
|
||||
|
||||
import org.apache.spark.package$;
|
||||
import org.apache.spark.sql.Dataset;
|
||||
import org.apache.spark.sql.Row;
|
||||
import org.apache.spark.sql.catalyst.InternalRow;
|
||||
import org.apache.spark.sql.sources.v2.writer.DataWriter;
|
||||
import org.junit.jupiter.api.AfterEach;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
import static org.apache.hudi.testutils.SparkDatasetTestUtils.ENCODER;
|
||||
import static org.apache.hudi.testutils.SparkDatasetTestUtils.STRUCT_TYPE;
|
||||
import static org.apache.hudi.testutils.SparkDatasetTestUtils.getConfigBuilder;
|
||||
import static org.apache.hudi.testutils.SparkDatasetTestUtils.getRandomRows;
|
||||
import static org.apache.hudi.testutils.SparkDatasetTestUtils.toInternalRows;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import static org.junit.jupiter.api.Assertions.assertNotNull;
|
||||
import static org.junit.jupiter.api.Assertions.assertNull;
|
||||
import static org.junit.jupiter.api.Assumptions.assumeTrue;
|
||||
|
||||
/**
|
||||
* Unit tests {@link HoodieDataSourceInternalWriter}.
|
||||
*/
|
||||
public class TestHoodieDataSourceInternalWriter extends HoodieClientTestHarness {
|
||||
|
||||
private static final Random RANDOM = new Random();
|
||||
|
||||
@BeforeEach
|
||||
public void setUp() throws Exception {
|
||||
// this test is only compatible with spark 2
|
||||
assumeTrue(package$.MODULE$.SPARK_VERSION().startsWith("2."));
|
||||
initSparkContexts("TestHoodieDataSourceInternalWriter");
|
||||
initPath();
|
||||
initFileSystem();
|
||||
initTestDataGenerator();
|
||||
initMetaClient();
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
public void tearDown() throws Exception {
|
||||
cleanupResources();
|
||||
}
|
||||
public class TestHoodieDataSourceInternalWriter extends
|
||||
HoodieBulkInsertInternalWriterTestBase {
|
||||
|
||||
@Test
|
||||
public void testDataSourceWriter() throws Exception {
|
||||
@@ -84,7 +55,7 @@ public class TestHoodieDataSourceInternalWriter extends HoodieClientTestHarness
|
||||
new HoodieDataSourceInternalWriter(instantTime, cfg, STRUCT_TYPE, sqlContext.sparkSession(), hadoopConf);
|
||||
DataWriter<InternalRow> writer = dataSourceInternalWriter.createWriterFactory().createDataWriter(0, RANDOM.nextLong(), RANDOM.nextLong());
|
||||
|
||||
List<String> partitionPaths = Arrays.asList(HoodieTestDataGenerator.DEFAULT_PARTITION_PATHS);
|
||||
String[] partitionPaths = HoodieTestDataGenerator.DEFAULT_PARTITION_PATHS;
|
||||
List<String> partitionPathsAbs = new ArrayList<>();
|
||||
for (String partitionPath : partitionPaths) {
|
||||
partitionPathsAbs.add(basePath + "/" + partitionPath + "/*");
|
||||
@@ -112,8 +83,8 @@ public class TestHoodieDataSourceInternalWriter extends HoodieClientTestHarness
|
||||
metaClient.reloadActiveTimeline();
|
||||
Dataset<Row> result = HoodieClientTestUtils.read(jsc, basePath, sqlContext, metaClient.getFs(), partitionPathsAbs.toArray(new String[0]));
|
||||
// verify output
|
||||
assertOutput(totalInputRows, result, instantTime);
|
||||
assertWriteStatuses(commitMessages.get(0).getWriteStatuses(), batches, size);
|
||||
assertOutput(totalInputRows, result, instantTime, Option.empty());
|
||||
assertWriteStatuses(commitMessages.get(0).getWriteStatuses(), batches, size, Option.empty(), Option.empty());
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -155,8 +126,8 @@ public class TestHoodieDataSourceInternalWriter extends HoodieClientTestHarness
|
||||
Dataset<Row> result = HoodieClientTestUtils.readCommit(basePath, sqlContext, metaClient.getCommitTimeline(), instantTime);
|
||||
|
||||
// verify output
|
||||
assertOutput(totalInputRows, result, instantTime);
|
||||
assertWriteStatuses(commitMessages.get(0).getWriteStatuses(), batches, size);
|
||||
assertOutput(totalInputRows, result, instantTime, Option.empty());
|
||||
assertWriteStatuses(commitMessages.get(0).getWriteStatuses(), batches, size, Option.empty(), Option.empty());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -199,8 +170,8 @@ public class TestHoodieDataSourceInternalWriter extends HoodieClientTestHarness
|
||||
Dataset<Row> result = HoodieClientTestUtils.readCommit(basePath, sqlContext, metaClient.getCommitTimeline(), instantTime);
|
||||
|
||||
// verify output
|
||||
assertOutput(totalInputRows, result, instantTime);
|
||||
assertWriteStatuses(commitMessages.get(0).getWriteStatuses(), batches, size);
|
||||
assertOutput(totalInputRows, result, instantTime, Option.empty());
|
||||
assertWriteStatuses(commitMessages.get(0).getWriteStatuses(), batches, size, Option.empty(), Option.empty());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -250,8 +221,8 @@ public class TestHoodieDataSourceInternalWriter extends HoodieClientTestHarness
|
||||
metaClient.reloadActiveTimeline();
|
||||
Dataset<Row> result = HoodieClientTestUtils.read(jsc, basePath, sqlContext, metaClient.getFs(), partitionPathsAbs.toArray(new String[0]));
|
||||
// verify rows
|
||||
assertOutput(totalInputRows, result, instantTime0);
|
||||
assertWriteStatuses(commitMessages.get(0).getWriteStatuses(), batches, size);
|
||||
assertOutput(totalInputRows, result, instantTime0, Option.empty());
|
||||
assertWriteStatuses(commitMessages.get(0).getWriteStatuses(), batches, size, Option.empty(), Option.empty());
|
||||
|
||||
// 2nd batch. abort in the end
|
||||
String instantTime1 = "00" + 1;
|
||||
@@ -274,7 +245,7 @@ public class TestHoodieDataSourceInternalWriter extends HoodieClientTestHarness
|
||||
result = HoodieClientTestUtils.read(jsc, basePath, sqlContext, metaClient.getFs(), partitionPathsAbs.toArray(new String[0]));
|
||||
// verify rows
|
||||
// only rows from first batch should be present
|
||||
assertOutput(totalInputRows, result, instantTime0);
|
||||
assertOutput(totalInputRows, result, instantTime0, Option.empty());
|
||||
}
|
||||
|
||||
private void writeRows(Dataset<Row> inputRows, DataWriter<InternalRow> writer) throws Exception {
|
||||
@@ -284,41 +255,4 @@ public class TestHoodieDataSourceInternalWriter extends HoodieClientTestHarness
|
||||
writer.write(internalRow);
|
||||
}
|
||||
}
|
||||
|
||||
private void assertWriteStatuses(List<HoodieInternalWriteStatus> writeStatuses, int batches, int size) {
|
||||
assertEquals(batches, writeStatuses.size());
|
||||
int counter = 0;
|
||||
for (HoodieInternalWriteStatus writeStatus : writeStatuses) {
|
||||
assertEquals(writeStatus.getPartitionPath(), HoodieTestDataGenerator.DEFAULT_PARTITION_PATHS[counter % 3]);
|
||||
assertEquals(writeStatus.getTotalRecords(), size);
|
||||
assertEquals(writeStatus.getFailedRowsSize(), 0);
|
||||
assertEquals(writeStatus.getTotalErrorRecords(), 0);
|
||||
assertFalse(writeStatus.hasErrors());
|
||||
assertNull(writeStatus.getGlobalError());
|
||||
assertNotNull(writeStatus.getFileId());
|
||||
String fileId = writeStatus.getFileId();
|
||||
HoodieWriteStat writeStat = writeStatus.getStat();
|
||||
assertEquals(size, writeStat.getNumInserts());
|
||||
assertEquals(size, writeStat.getNumWrites());
|
||||
assertEquals(fileId, writeStat.getFileId());
|
||||
assertEquals(HoodieTestDataGenerator.DEFAULT_PARTITION_PATHS[counter++ % 3], writeStat.getPartitionPath());
|
||||
assertEquals(0, writeStat.getNumDeletes());
|
||||
assertEquals(0, writeStat.getNumUpdateWrites());
|
||||
assertEquals(0, writeStat.getTotalWriteErrors());
|
||||
}
|
||||
}
|
||||
|
||||
private void assertOutput(Dataset<Row> expectedRows, Dataset<Row> actualRows, String instantTime) {
|
||||
// verify 3 meta fields that are filled in within create handle
|
||||
actualRows.collectAsList().forEach(entry -> {
|
||||
assertEquals(entry.get(HoodieRecord.HOODIE_META_COLUMNS_NAME_TO_POS.get(HoodieRecord.COMMIT_TIME_METADATA_FIELD)).toString(), instantTime);
|
||||
assertFalse(entry.isNullAt(HoodieRecord.HOODIE_META_COLUMNS_NAME_TO_POS.get(HoodieRecord.FILENAME_METADATA_FIELD)));
|
||||
assertFalse(entry.isNullAt(HoodieRecord.HOODIE_META_COLUMNS_NAME_TO_POS.get(HoodieRecord.COMMIT_SEQNO_METADATA_FIELD)));
|
||||
});
|
||||
|
||||
// after trimming 2 of the meta fields, rest of the fields should match
|
||||
Dataset<Row> trimmedExpected = expectedRows.drop(HoodieRecord.COMMIT_SEQNO_METADATA_FIELD, HoodieRecord.COMMIT_TIME_METADATA_FIELD, HoodieRecord.FILENAME_METADATA_FIELD);
|
||||
Dataset<Row> trimmedActual = actualRows.drop(HoodieRecord.COMMIT_SEQNO_METADATA_FIELD, HoodieRecord.COMMIT_TIME_METADATA_FIELD, HoodieRecord.FILENAME_METADATA_FIELD);
|
||||
assertEquals(0, trimmedActual.except(trimmedExpected).count());
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user