[HUDI-1851] Adding test suite long running automate scripts for docker (#2880)
This commit is contained in:
committed by
GitHub
parent
7a5af806cf
commit
ac72470e10
@@ -0,0 +1,76 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
dag_name: NAME-clustering.yaml
|
||||
dag_rounds: clustering_num_iterations
|
||||
dag_intermittent_delay_mins: clustering_delay_in_mins
|
||||
dag_content:
|
||||
first_insert:
|
||||
config:
|
||||
record_size: 1000
|
||||
num_partitions_insert: 1
|
||||
repeat_count: 1
|
||||
num_records_insert: 1000
|
||||
type: InsertNode
|
||||
deps: none
|
||||
second_insert:
|
||||
config:
|
||||
record_size: 1000
|
||||
num_partitions_insert: 1
|
||||
repeat_count: 1
|
||||
num_records_insert: 10000
|
||||
deps: first_insert
|
||||
type: InsertNode
|
||||
third_insert:
|
||||
config:
|
||||
record_size: 1000
|
||||
num_partitions_insert: 1
|
||||
repeat_count: 1
|
||||
num_records_insert: 300
|
||||
deps: second_insert
|
||||
type: InsertNode
|
||||
first_delete:
|
||||
config:
|
||||
num_partitions_delete: 1
|
||||
num_records_delete: 9000
|
||||
type: DeleteNode
|
||||
deps: third_insert
|
||||
first_hive_sync:
|
||||
config:
|
||||
queue_name: "adhoc"
|
||||
engine: "mr"
|
||||
type: HiveSyncNode
|
||||
deps: first_delete
|
||||
first_validate:
|
||||
config:
|
||||
validate_hive: true
|
||||
type: ValidateDatasetNode
|
||||
deps: first_hive_sync
|
||||
first_cluster:
|
||||
config:
|
||||
execute_itr_count: clustering_itr_count
|
||||
type: ClusteringNode
|
||||
deps: first_validate
|
||||
second_hive_sync:
|
||||
config:
|
||||
queue_name: "adhoc"
|
||||
engine: "mr"
|
||||
type: HiveSyncNode
|
||||
deps: first_cluster
|
||||
second_validate:
|
||||
config:
|
||||
validate_hive: true
|
||||
type: ValidateDatasetNode
|
||||
deps: second_hive_sync
|
||||
@@ -0,0 +1,89 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
dag_name: NAME-long-running-multi-partitions.yaml
|
||||
dag_rounds: num_iterations
|
||||
dag_intermittent_delay_mins: delay_in_mins
|
||||
dag_content:
|
||||
first_insert:
|
||||
config:
|
||||
record_size: 1000
|
||||
num_partitions_insert: 5
|
||||
repeat_count: 1
|
||||
num_records_insert: 1000
|
||||
type: InsertNode
|
||||
deps: none
|
||||
second_insert:
|
||||
config:
|
||||
record_size: 1000
|
||||
num_partitions_insert: 50
|
||||
repeat_count: 1
|
||||
num_records_insert: 10000
|
||||
deps: first_insert
|
||||
type: InsertNode
|
||||
third_insert:
|
||||
config:
|
||||
record_size: 1000
|
||||
num_partitions_insert: 2
|
||||
repeat_count: 1
|
||||
num_records_insert: 300
|
||||
deps: second_insert
|
||||
type: InsertNode
|
||||
first_hive_sync:
|
||||
config:
|
||||
queue_name: "adhoc"
|
||||
engine: "mr"
|
||||
type: HiveSyncNode
|
||||
deps: third_insert
|
||||
first_validate:
|
||||
config:
|
||||
validate_hive: true
|
||||
type: ValidateDatasetNode
|
||||
deps: first_hive_sync
|
||||
first_upsert:
|
||||
config:
|
||||
record_size: 1000
|
||||
num_partitions_insert: 2
|
||||
num_records_insert: 300
|
||||
repeat_count: 1
|
||||
num_records_upsert: 100
|
||||
num_partitions_upsert: 1
|
||||
type: UpsertNode
|
||||
deps: first_validate
|
||||
first_delete:
|
||||
config:
|
||||
num_partitions_delete: 50
|
||||
num_records_delete: 8000
|
||||
type: DeleteNode
|
||||
deps: first_upsert
|
||||
second_hive_sync:
|
||||
config:
|
||||
queue_name: "adhoc"
|
||||
engine: "mr"
|
||||
type: HiveSyncNode
|
||||
deps: first_delete
|
||||
second_validate:
|
||||
config:
|
||||
validate_hive: true
|
||||
delete_input_data: true
|
||||
type: ValidateDatasetNode
|
||||
deps: second_hive_sync
|
||||
last_validate:
|
||||
config:
|
||||
execute_itr_count: 50
|
||||
validate_clean: true
|
||||
validate_archival: true
|
||||
type: ValidateAsyncOperations
|
||||
deps: second_validate
|
||||
@@ -0,0 +1,34 @@
|
||||
spark-submit \
|
||||
--packages org.apache.spark:spark-avro_2.11:2.4.0 \
|
||||
--conf spark.task.cpus=1 \
|
||||
--conf spark.executor.cores=1 \
|
||||
--conf spark.task.maxFailures=100 \
|
||||
--conf spark.memory.fraction=0.4 \
|
||||
--conf spark.rdd.compress=true \
|
||||
--conf spark.kryoserializer.buffer.max=2000m \
|
||||
--conf spark.serializer=org.apache.spark.serializer.KryoSerializer \
|
||||
--conf spark.memory.storageFraction=0.1 \
|
||||
--conf spark.shuffle.service.enabled=true \
|
||||
--conf spark.sql.hive.convertMetastoreParquet=false \
|
||||
--conf spark.driver.maxResultSize=12g \
|
||||
--conf spark.executor.heartbeatInterval=120s \
|
||||
--conf spark.network.timeout=600s \
|
||||
--conf spark.yarn.max.executor.failures=10 \
|
||||
--conf spark.sql.catalogImplementation=hive \
|
||||
--class org.apache.hudi.integ.testsuite.HoodieTestSuiteJob \
|
||||
/opt/JAR_NAME \
|
||||
--source-ordering-field test_suite_source_ordering_field \
|
||||
--use-deltastreamer \
|
||||
--target-base-path OUTPUT_PATH \
|
||||
--input-base-path INPUT_PATH \
|
||||
--target-table table1 \
|
||||
--props file:/opt/staging/test.properties \
|
||||
--schemaprovider-class org.apache.hudi.integ.testsuite.schema.TestSuiteFileBasedSchemaProvider \
|
||||
--source-class org.apache.hudi.utilities.sources.AvroDFSSource \
|
||||
--input-file-size 125829120 \
|
||||
--workload-yaml-path file:/opt/staging/input_yaml \
|
||||
--workload-generator-classname org.apache.hudi.integ.testsuite.dag.WorkflowDagGenerator \
|
||||
--table-type TABLE_TYPE \
|
||||
--compact-scheduling-minshare 1 \
|
||||
--clean-input \
|
||||
--clean-output
|
||||
@@ -0,0 +1,50 @@
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
hoodie.insert.shuffle.parallelism=100
|
||||
hoodie.upsert.shuffle.parallelism=100
|
||||
hoodie.bulkinsert.shuffle.parallelism=100
|
||||
|
||||
hoodie.deltastreamer.source.test.num_partitions=100
|
||||
hoodie.deltastreamer.source.test.datagen.use_rocksdb_for_storing_existing_keys=false
|
||||
hoodie.deltastreamer.source.test.max_unique_records=100000000
|
||||
hoodie.embed.timeline.server=false
|
||||
hoodie.deltastreamer.source.input.selector=org.apache.hudi.integ.testsuite.helpers.DFSTestSuitePathSelector
|
||||
|
||||
hoodie.datasource.hive_sync.skip_ro_suffix=true
|
||||
|
||||
hoodie.datasource.write.recordkey.field=_row_key
|
||||
hoodie.datasource.write.keygenerator.class=org.apache.hudi.keygen.TimestampBasedKeyGenerator
|
||||
hoodie.datasource.write.partitionpath.field=timestamp
|
||||
|
||||
hoodie.clustering.plan.strategy.sort.columns=_row_key
|
||||
hoodie.clustering.plan.strategy.daybased.lookback.partitions=0
|
||||
hoodie.clustering.inline.max.commits=1
|
||||
|
||||
hoodie.deltastreamer.source.dfs.root=INPUT_PATH
|
||||
hoodie.deltastreamer.schemaprovider.target.schema.file=file:/var/hoodie/ws/docker/demo/config/test-suite/source.avsc
|
||||
hoodie.deltastreamer.schemaprovider.source.schema.file=file:/var/hoodie/ws/docker/demo/config/test-suite/source.avsc
|
||||
hoodie.deltastreamer.keygen.timebased.timestamp.type=UNIX_TIMESTAMP
|
||||
hoodie.deltastreamer.keygen.timebased.output.dateformat=yyyy/MM/dd
|
||||
|
||||
hoodie.datasource.hive_sync.jdbcurl=jdbc:hive2://hiveserver:10000/
|
||||
hoodie.datasource.hive_sync.database=testdb
|
||||
hoodie.datasource.hive_sync.table=table1
|
||||
hoodie.datasource.hive_sync.assume_date_partitioning=false
|
||||
hoodie.datasource.hive_sync.partition_fields=_hoodie_partition_path
|
||||
hoodie.datasource.hive_sync.partition_extractor_class=org.apache.hudi.hive.SlashEncodedDayPartitionValueExtractor
|
||||
|
||||
Reference in New Issue
Block a user