1
0

[HUDI-2804] Add option to skip compaction instants for streaming read (#4051)

This commit is contained in:
Danny Chan
2021-11-21 12:38:56 +08:00
committed by GitHub
parent 74b59a44ec
commit 0411f73c7d
4 changed files with 57 additions and 5 deletions

View File

@@ -242,6 +242,28 @@ public class HoodieDataSourceITCase extends AbstractTestBase {
assertRowsEquals(rows, TestData.DATA_SET_SOURCE_INSERT);
}
@Test
void testStreamWriteReadSkippingCompaction() throws Exception {
// create filesystem table named source
String createSource = TestConfigurations.getFileSourceDDL("source");
streamTableEnv.executeSql(createSource);
String hoodieTableDDL = sql("t1")
.option(FlinkOptions.PATH, tempFile.getAbsolutePath())
.option(FlinkOptions.TABLE_TYPE, FlinkOptions.TABLE_TYPE_MERGE_ON_READ)
.option(FlinkOptions.READ_AS_STREAMING, true)
.option(FlinkOptions.READ_STREAMING_SKIP_COMPACT, true)
.option(FlinkOptions.COMPACTION_DELTA_COMMITS, 1)
.option(FlinkOptions.COMPACTION_TASKS, 1)
.end();
streamTableEnv.executeSql(hoodieTableDDL);
String insertInto = "insert into t1 select * from source";
execInsertSql(streamTableEnv, insertInto);
List<Row> rows = execSelectSql(streamTableEnv, "select * from t1", 10);
assertRowsEquals(rows, TestData.DATA_SET_SOURCE_INSERT_LATEST_COMMIT);
}
@Test
void testStreamWriteWithCleaning() {
// create filesystem table named source