[HUDI-1704] Use PRIMARY KEY syntax to define record keys for Flink Hudi table (#2694)
The SQL PRIMARY KEY semantics is very same with Hoodie record key, using PRIMARY KEY is more straight-forward way instead of a table option: hoodie.datasource.write.recordkey.field. After this change, both PRIMARY KEY and table option can define hoodie record key, while the PRIMARY KEY has higher priority if both are defined. Note: a column with PRIMARY KEY constraint is forced to be non-nullable.
This commit is contained in:
@@ -377,4 +377,12 @@ public class FlinkOptions {
|
||||
map.forEach(configuration::setString);
|
||||
return configuration;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether the given conf defines default value for the option {@code option}.
|
||||
*/
|
||||
public static <T> boolean isDefaultValueDefined(Configuration conf, ConfigOption<T> option) {
|
||||
return !conf.getOptional(option).isPresent()
|
||||
|| conf.get(option).equals(option.defaultValue());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,11 +19,14 @@
|
||||
package org.apache.hudi.table;
|
||||
|
||||
import org.apache.hudi.configuration.FlinkOptions;
|
||||
import org.apache.hudi.keygen.ComplexAvroKeyGenerator;
|
||||
import org.apache.hudi.util.AvroSchemaConverter;
|
||||
|
||||
import org.apache.flink.configuration.Configuration;
|
||||
import org.apache.flink.table.api.TableSchema;
|
||||
import org.apache.flink.table.api.ValidationException;
|
||||
import org.apache.flink.table.api.constraints.UniqueConstraint;
|
||||
import org.apache.flink.table.catalog.CatalogTable;
|
||||
import org.apache.flink.table.data.RowData;
|
||||
import org.apache.flink.table.factories.FactoryUtil;
|
||||
import org.apache.flink.table.factories.TableSinkFactory;
|
||||
@@ -33,6 +36,8 @@ import org.apache.flink.table.sources.TableSource;
|
||||
import org.apache.flink.table.types.logical.LogicalType;
|
||||
import org.apache.flink.table.utils.TableSchemaUtils;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
@@ -43,19 +48,19 @@ import java.util.Map;
|
||||
* Hoodie data source/sink factory.
|
||||
*/
|
||||
public class HoodieTableFactory implements TableSourceFactory<RowData>, TableSinkFactory<RowData> {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(HoodieTableFactory.class);
|
||||
|
||||
public static final String FACTORY_ID = "hudi";
|
||||
|
||||
@Override
|
||||
public TableSource<RowData> createTableSource(TableSourceFactory.Context context) {
|
||||
Configuration conf = FlinkOptions.fromMap(context.getTable().getOptions());
|
||||
conf.setString(FlinkOptions.TABLE_NAME.key(), context.getObjectIdentifier().getObjectName());
|
||||
conf.setString(FlinkOptions.PARTITION_PATH_FIELD, String.join(",", context.getTable().getPartitionKeys()));
|
||||
TableSchema schema = TableSchemaUtils.getPhysicalSchema(context.getTable().getSchema());
|
||||
setupConfOptions(conf, context.getObjectIdentifier().getObjectName(), context.getTable(), schema);
|
||||
Path path = new Path(conf.getOptional(FlinkOptions.PATH).orElseThrow(() ->
|
||||
new ValidationException("Option [path] should be not empty.")));
|
||||
TableSchema tableSchema = TableSchemaUtils.getPhysicalSchema(context.getTable().getSchema());
|
||||
inferAvroSchema(conf, tableSchema.toRowDataType().notNull().getLogicalType());
|
||||
return new HoodieTableSource(
|
||||
tableSchema,
|
||||
schema,
|
||||
path,
|
||||
context.getTable().getPartitionKeys(),
|
||||
conf.getString(FlinkOptions.PARTITION_DEFAULT_NAME),
|
||||
@@ -65,11 +70,9 @@ public class HoodieTableFactory implements TableSourceFactory<RowData>, TableSin
|
||||
@Override
|
||||
public TableSink<RowData> createTableSink(TableSinkFactory.Context context) {
|
||||
Configuration conf = FlinkOptions.fromMap(context.getTable().getOptions());
|
||||
conf.setString(FlinkOptions.TABLE_NAME.key(), context.getObjectIdentifier().getObjectName());
|
||||
conf.setString(FlinkOptions.PARTITION_PATH_FIELD, String.join(",", context.getTable().getPartitionKeys()));
|
||||
TableSchema tableSchema = TableSchemaUtils.getPhysicalSchema(context.getTable().getSchema());
|
||||
inferAvroSchema(conf, tableSchema.toRowDataType().notNull().getLogicalType());
|
||||
return new HoodieTableSink(conf, tableSchema);
|
||||
TableSchema schema = TableSchemaUtils.getPhysicalSchema(context.getTable().getSchema());
|
||||
setupConfOptions(conf, context.getObjectIdentifier().getObjectName(), context.getTable(), schema);
|
||||
return new HoodieTableSink(conf, schema);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -89,6 +92,52 @@ public class HoodieTableFactory implements TableSourceFactory<RowData>, TableSin
|
||||
// Utilities
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Setup the config options based on the table definition, for e.g the table name, primary key.
|
||||
*
|
||||
* @param conf The configuration to setup
|
||||
* @param tableName The table name
|
||||
* @param table The catalog table
|
||||
* @param schema The physical schema
|
||||
*/
|
||||
private static void setupConfOptions(
|
||||
Configuration conf,
|
||||
String tableName,
|
||||
CatalogTable table,
|
||||
TableSchema schema) {
|
||||
// table name
|
||||
conf.setString(FlinkOptions.TABLE_NAME.key(), tableName);
|
||||
// hoodie key about options
|
||||
setupHoodieKeyOptions(conf, table);
|
||||
// infer avro schema from physical DDL schema
|
||||
inferAvroSchema(conf, schema.toRowDataType().notNull().getLogicalType());
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets up the hoodie key options (e.g. record key and partition key) from the table definition.
|
||||
*/
|
||||
private static void setupHoodieKeyOptions(Configuration conf, CatalogTable table) {
|
||||
List<String> pkColumns = table.getSchema().getPrimaryKey()
|
||||
.map(UniqueConstraint::getColumns).orElse(Collections.emptyList());
|
||||
if (pkColumns.size() > 0) {
|
||||
// the PRIMARY KEY syntax always has higher priority than option FlinkOptions#RECORD_KEY_FIELD
|
||||
String recordKey = String.join(",", pkColumns);
|
||||
conf.setString(FlinkOptions.RECORD_KEY_FIELD, recordKey);
|
||||
}
|
||||
List<String> partitions = table.getPartitionKeys();
|
||||
if (partitions.size() > 0) {
|
||||
// the PARTITIONED BY syntax always has higher priority than option FlinkOptions#PARTITION_PATH_FIELD
|
||||
conf.setString(FlinkOptions.PARTITION_PATH_FIELD, String.join(",", partitions));
|
||||
}
|
||||
// tweak the key gen class if possible
|
||||
boolean complexHoodieKey = pkColumns.size() > 1 || partitions.size() > 1;
|
||||
if (complexHoodieKey && FlinkOptions.isDefaultValueDefined(conf, FlinkOptions.KEYGEN_CLASS)) {
|
||||
conf.setString(FlinkOptions.KEYGEN_CLASS, ComplexAvroKeyGenerator.class.getName());
|
||||
LOG.info("Table option [{}] is reset to {} because record key or partition path has two or more fields",
|
||||
FlinkOptions.KEYGEN_CLASS.key(), ComplexAvroKeyGenerator.class.getName());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Inferences the deserialization Avro schema from the table schema (e.g. the DDL)
|
||||
* if both options {@link FlinkOptions#READ_AVRO_SCHEMA_PATH} and
|
||||
@@ -97,7 +146,7 @@ public class HoodieTableFactory implements TableSourceFactory<RowData>, TableSin
|
||||
* @param conf The configuration
|
||||
* @param rowType The specified table row type
|
||||
*/
|
||||
private void inferAvroSchema(Configuration conf, LogicalType rowType) {
|
||||
private static void inferAvroSchema(Configuration conf, LogicalType rowType) {
|
||||
if (!conf.getOptional(FlinkOptions.READ_AVRO_SCHEMA_PATH).isPresent()
|
||||
&& !conf.getOptional(FlinkOptions.READ_AVRO_SCHEMA).isPresent()) {
|
||||
String inferredSchema = AvroSchemaConverter.convertToSchema(rowType).toString();
|
||||
|
||||
Reference in New Issue
Block a user