33 Commits

Author SHA1 Message Date
v-zhangjc9
eea6307c87 feat(scheduler): 恢复调度时间点 2025-01-21 15:47:15 +08:00
v-zhangjc9
c5366e006b feat(all): 增加项目说明 2025-01-17 09:33:19 +08:00
v-zhangjc9
f0f295bfc9 feat(all): 增加项目说明 2025-01-10 14:56:16 +08:00
v-zhangjc9
e34c5d2e3e feat(all): 增加项目说明 2025-01-03 14:47:51 +08:00
v-zhangjc9
86d6fcaec7 feat(all): 增加项目说明 2024-12-26 17:37:37 +08:00
v-zhangjc9
2c09d97cec feat(all): 增加项目说明 2024-12-19 19:04:22 +08:00
v-zhangjc9
37ac0cd311 feat(all): 增加项目说明 2024-12-12 15:29:30 +08:00
v-zhangjc9
9fa38a3065 feat(all): 增加项目说明 2024-12-06 17:44:53 +08:00
v-zhangjc9
d908f99fbd feat(all): 增加项目说明 2024-11-28 18:14:12 +08:00
v-zhangjc9
5b0b23336c feat(all): 增加项目说明 2024-11-21 19:22:13 +08:00
v-zhangjc9
263b91c42a feat(all): 增加项目说明 2024-11-15 14:16:58 +08:00
v-zhangjc9
a53c90a348 feat(all): 增加项目说明 2024-11-15 10:06:48 +08:00
v-zhangjc9
e3583dad0c feat(all): 增加项目说明 2024-11-07 18:20:26 +08:00
v-zhangjc9
514a65a5e6 feat(all): 增加项目说明 2024-10-31 17:52:01 +08:00
v-zhangjc9
57a57ace77 feat(sync): 增加日志输出 2024-10-22 17:08:49 +08:00
v-zhangjc9
1338e6458c feat(cli): 增加主机是否启用的能力 2024-10-22 08:59:13 +08:00
v-zhangjc9
9fd46b3a20 feat(scheduler): 微调b12集群的资源限制 2024-10-14 15:23:15 +08:00
v-zhangjc9
8e8b1a7684 feat(bin): 移除ytp传输 2024-10-14 14:41:06 +08:00
v-zhangjc9
4d8238dd7f feat(all): 移除b5集群 2024-10-14 14:32:43 +08:00
v-zhangjc9
73f7d3085a feat(scheduler): 调整b12的资源限制
反正也没有备用集群的需求,一个集群用到头就好
2024-10-14 14:26:27 +08:00
v-zhangjc9
3f8652395c fix(scheduler): 修复闲时压缩crm重点表调度到A4集群 2024-10-14 14:21:53 +08:00
v-zhangjc9
8c6e0aa353 feat(scheduler): 禁止b5、a4在闲时调度期间使用 2024-10-14 10:55:36 +08:00
v-zhangjc9
e15dd6289d feat(web): 增加指标采集进度显示 2024-10-14 10:51:10 +08:00
v-zhangjc9
7d33227d70 feat(monitor): 增加关于hudi表文件数的监控指标 2024-10-12 17:17:15 +08:00
v-zhangjc9
8fda8f7669 fix(hudi-query): 修复接口调用错误 2024-10-12 15:52:05 +08:00
v-zhangjc9
57a828c5b4 feat(hudi-query): 增加关于hdfs文件数相关的接口 2024-10-12 14:43:05 +08:00
v-zhangjc9
b9f6aa0cc2 fix(forest): 修复接口类型错误 2024-10-12 14:42:28 +08:00
v-zhangjc9
dcb9028d86 feat(all): 增加项目说明 2024-09-27 15:23:36 +08:00
v-zhangjc9
57a2787bf8 feat(scheduler): 修复调度 2024-09-24 19:11:57 +08:00
v-zhangjc9
813ddfaeac feat(scheduler): 调整日常调度的时间点
停止11、14点的全表压缩调度
2024-09-24 18:02:49 +08:00
v-zhangjc9
0cbf6b28ef feat(all): 增加项目说明 2024-09-20 14:58:51 +08:00
v-zhangjc9
7efb2527d0 feat(all): 增加项目说明 2024-09-13 10:06:06 +08:00
v-zhangjc9
f085c9d506 feat(all): 增加项目说明 2024-09-06 10:58:01 +08:00
46 changed files with 772 additions and 2406 deletions

View File

@@ -1,3 +1,168 @@
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s20.hdp.dc:19521/hdfs/count?root=hdfs://b2/apps/datalake/hive/dws_wsyyt/external_table_hudi/dws_tb_jt_servuser_data
Authorization: Basic AxhEbscwsJDbYMH2 cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4
User-Agent: IntelliJ HTTP Client/IntelliJ IDEA 2024.1.4
Accept-Encoding: br, deflate, gzip, x-gzip
Accept: */*
content-length: 0
<> 2024-10-12T154854.500.txt
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s12.hdp.dc:25961/hdfs/count?root=hdfs://b2/apps/datalake/hive/dws_wsyyt/external_table_hudi/dws_tb_jt_servuser_data
Authorization: Basic AxhEbscwsJDbYMH2 cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4
User-Agent: IntelliJ HTTP Client/IntelliJ IDEA 2024.1.4
Accept-Encoding: br, deflate, gzip, x-gzip
Accept: */*
Cookie: JSESSIONID=EE0952421D19909D0A80BB5A1216DE93
content-length: 0
<> 2024-10-12T154825.500.txt
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s12.hdp.dc:25961/hdfs/count?root=hdfs://b2/apps/datalake/hive/dws_wsyyt/external_table_hudi/dws_tb_jt_servuser_data
Authorization: Basic AxhEbscwsJDbYMH2 cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4
User-Agent: IntelliJ HTTP Client/IntelliJ IDEA 2024.1.4
Accept-Encoding: br, deflate, gzip, x-gzip
Accept: */*
Cookie: JSESSIONID=EE0952421D19909D0A80BB5A1216DE93
content-length: 0
<> 2024-10-12T154754.500.txt
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s12.hdp.dc:25961/hdfs/count?root=hdfs://b2/apps/datalake/hive/dws_wsyyt/external_table_hudi/dws_tb_jt_servuser_data
Authorization: Basic AxhEbscwsJDbYMH2 cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4
User-Agent: IntelliJ HTTP Client/IntelliJ IDEA 2024.1.4
Accept-Encoding: br, deflate, gzip, x-gzip
Accept: */*
Cookie: JSESSIONID=EE0952421D19909D0A80BB5A1216DE93
content-length: 0
<> 2024-10-12T154616.500.txt
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s12.hdp.dc:25961/hdfs/list?root=hdfs://b2/apps/datalake/hive/dws_wsyyt/external_table_hudi/dws_tb_jt_servuser_data
Authorization: Basic AxhEbscwsJDbYMH2 cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4
User-Agent: IntelliJ HTTP Client/IntelliJ IDEA 2024.1.4
Accept-Encoding: br, deflate, gzip, x-gzip
Accept: */*
Cookie: JSESSIONID=EE0952421D19909D0A80BB5A1216DE93
content-length: 0
<> 2024-10-12T154529.500.txt
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s12.hdp.dc:25961/hdfs/list?root=hdfs://b2/apps/datalake/hive/dws_wsyyt/external_table_hudi/dws_tb_jt_servuser_data
Authorization: Basic AxhEbscwsJDbYMH2 cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4
User-Agent: IntelliJ HTTP Client/IntelliJ IDEA 2024.1.4
Accept-Encoding: br, deflate, gzip, x-gzip
Accept: */*
Cookie: JSESSIONID=EE0952421D19909D0A80BB5A1216DE93
content-length: 0
<> 2024-10-12T151839.500.txt
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s12.hdp.dc:25961/hdfs/file_count?root=hdfs://b2/apps/datalake/hive/dws_wsyyt/external_table_hudi/dws_tb_jt_servuser_data
Authorization: Basic AxhEbscwsJDbYMH2 cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4
User-Agent: IntelliJ HTTP Client/IntelliJ IDEA 2024.1.4
Accept-Encoding: br, deflate, gzip, x-gzip
Accept: */*
Cookie: JSESSIONID=EE0952421D19909D0A80BB5A1216DE93
content-length: 0
<> 2024-10-12T151753.500.txt
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s12.hdp.dc:25961/hdfs/file_count?root=hdfs://b2/apps/datalake/hive/dws_wsyyt/external_table_hudi/dws_tb_jt_servuser_data
Authorization: Basic AxhEbscwsJDbYMH2 cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4
User-Agent: IntelliJ HTTP Client/IntelliJ IDEA 2024.1.4
Accept-Encoding: br, deflate, gzip, x-gzip
Accept: */*
Cookie: JSESSIONID=EE0952421D19909D0A80BB5A1216DE93
content-length: 0
<> 2024-10-12T151727.500.txt
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s12.hdp.dc:25961/hdfs/count?root=hdfs://b2/apps/datalake/hive/dws_wsyyt/external_table_hudi/dws_tb_jt_servuser_data
Authorization: Basic AxhEbscwsJDbYMH2 cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4
User-Agent: IntelliJ HTTP Client/IntelliJ IDEA 2024.1.4
Accept-Encoding: br, deflate, gzip, x-gzip
Accept: */*
content-length: 0
<> 2024-10-12T151704.500.txt
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s22.hdp.dc:13241/hdfs/count?root=hdfs://b2/apps/datalake/hive/dws_wsyyt/external_table_hudi/dws_tb_jt_servuser_data
Authorization: Basic AxhEbscwsJDbYMH2 cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4
User-Agent: IntelliJ HTTP Client/IntelliJ IDEA 2024.1.4
Accept-Encoding: br, deflate, gzip, x-gzip
Accept: */*
Cookie: JSESSIONID=D4B48AD7708DF28D7AFA0A74B26CF45A
content-length: 0
<> 2024-10-12T151540.500.txt
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s22.hdp.dc:13241/hdfs/count?root=hdfs://b2/apps/datalake/hive/dws_wsyyt/external_table_hudi/dws_tb_jt_servuser_data
Authorization: Basic AxhEbscwsJDbYMH2 cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4
User-Agent: IntelliJ HTTP Client/IntelliJ IDEA 2024.1.4
Accept-Encoding: br, deflate, gzip, x-gzip
Accept: */*
Cookie: JSESSIONID=D4B48AD7708DF28D7AFA0A74B26CF45A
content-length: 0
<> 2024-10-12T151442.500.txt
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s22.hdp.dc:13241/hdfs/count?root=hdfs://b2/apps/datalake/hive/dws_wsyyt/external_table_hudi/dws_tb_jt_servuser_data
Authorization: Basic AxhEbscwsJDbYMH2 cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4
User-Agent: IntelliJ HTTP Client/IntelliJ IDEA 2024.1.4
Accept-Encoding: br, deflate, gzip, x-gzip
Accept: */*
Cookie: JSESSIONID=D4B48AD7708DF28D7AFA0A74B26CF45A
content-length: 0
<> 2024-10-12T151417.500.txt
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s22.hdp.dc:13241/hdfs/count?hdfs=hdfs://b2/apps/datalake/hive/dws_wsyyt/external_table_hudi/dws_tb_jt_servuser_data
Authorization: Basic AxhEbscwsJDbYMH2 cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4
User-Agent: IntelliJ HTTP Client/IntelliJ IDEA 2024.1.4
Accept-Encoding: br, deflate, gzip, x-gzip
Accept: */*
Cookie: JSESSIONID=D4B48AD7708DF28D7AFA0A74B26CF45A
content-length: 0
<> 2024-10-12T151409.500.txt
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s22.hdp.dc:13241/hdfs?hdfs=hdfs://b2/apps/datalake/hive/dws_wsyyt/external_table_hudi/dws_tb_jt_servuser_data
Authorization: Basic AxhEbscwsJDbYMH2 cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4
User-Agent: IntelliJ HTTP Client/IntelliJ IDEA 2024.1.4
Accept-Encoding: br, deflate, gzip, x-gzip
Accept: */*
content-length: 0
<> 2024-10-12T151340.404.json
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s10.hdp.dc:31719/task/law_enforcement?pulsar_url=pulsar://132.122.115.158:16650,132.122.115.159:16650,132.122.115.160:16650,132.122.115.161:16650,132.122.115.167:16650,132.122.115.168:16650&pulsar_topic=persistent://odcp/acct_sz/acct_item_755&start_time=1716858000000&end_time=1716861600000&primary_keys=ACCT_ITEM_ID&partition_keys=ACCT_ID GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s10.hdp.dc:31719/task/law_enforcement?pulsar_url=pulsar://132.122.115.158:16650,132.122.115.159:16650,132.122.115.160:16650,132.122.115.161:16650,132.122.115.167:16650,132.122.115.168:16650&pulsar_topic=persistent://odcp/acct_sz/acct_item_755&start_time=1716858000000&end_time=1716861600000&primary_keys=ACCT_ITEM_ID&partition_keys=ACCT_ID
Connection: Keep-Alive Connection: Keep-Alive
User-Agent: Apache-HttpClient/4.5.14 (Java/17.0.10) User-Agent: Apache-HttpClient/4.5.14 (Java/17.0.10)
@@ -358,163 +523,3 @@ Accept-Encoding: br,deflate,gzip,x-gzip
### ###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s10.hdp.dc:33535/task/law_enforcement?pulsar_url=pulsar://132.122.115.158:16650,132.122.115.159:16650,132.122.115.160:16650,132.122.115.161:16650,132.122.115.167:16650,132.122.115.168:16650&pulsar_topic=persistent://odcp/acct_dg/acct_item_760&start_time=1716858000000&end_time=1716861600000&primary_keys=ACCT_ITEM_ID&partition_keys=ACCT_ID
Connection: Keep-Alive
User-Agent: Apache-HttpClient/4.5.14 (Java/17.0.10)
Cookie: JSESSIONID=08E78CE8926806AAB5D110D0FE9B05F7
Accept-Encoding: br,deflate,gzip,x-gzip
<> 2024-05-28T164901.200.txt
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s10.hdp.dc:33535/task/law_enforcement?pulsar_url=pulsar://132.122.115.158:16650,132.122.115.159:16650,132.122.115.160:16650,132.122.115.161:16650,132.122.115.167:16650,132.122.115.168:16650&pulsar_topic=persistent://odcp/acct_dg/acct_item_760&start_time=1716858000000&end_time=1716861600000&primary_keys=ACCT_ITEM_ID&partition_keys=ACCT_ID
Connection: Keep-Alive
User-Agent: Apache-HttpClient/4.5.14 (Java/17.0.10)
Cookie: JSESSIONID=08E78CE8926806AAB5D110D0FE9B05F7
Accept-Encoding: br,deflate,gzip,x-gzip
<> 2024-05-28T164758.200.txt
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s10.hdp.dc:33535/task/law_enforcement?pulsar_url=pulsar://132.122.115.158:16650,132.122.115.159:16650,132.122.115.160:16650,132.122.115.161:16650,132.122.115.167:16650,132.122.115.168:16650&pulsar_topic=persistent://odcp/acct_dg/acct_item_760&start_time=1716858000000&end_time=1716861600000&primary_keys=ACCT_ITEM_ID&partition_keys=ACCT_ID
Connection: Keep-Alive
User-Agent: Apache-HttpClient/4.5.14 (Java/17.0.10)
Cookie: JSESSIONID=08E78CE8926806AAB5D110D0FE9B05F7
Accept-Encoding: br,deflate,gzip,x-gzip
<> 2024-05-28T164303.200.txt
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s10.hdp.dc:33535/task/law_enforcement?pulsar_url=pulsar://132.122.115.158:16650,132.122.115.159:16650,132.122.115.160:16650,132.122.115.161:16650,132.122.115.167:16650,132.122.115.168:16650&pulsar_topic=persistent://odcp/acct_dg/acct_item_760&start_time=1716858000000&end_time=1716861600000&primary_keys=ACCT_ITEM_ID&partition_keys=ACCT_ID
Connection: Keep-Alive
User-Agent: Apache-HttpClient/4.5.14 (Java/17.0.10)
Cookie: JSESSIONID=08E78CE8926806AAB5D110D0FE9B05F7
Accept-Encoding: br,deflate,gzip,x-gzip
<> 2024-05-28T164220.200.txt
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s10.hdp.dc:33535/task/law_enforcement?pulsar_url=pulsar://132.122.115.158:16650,132.122.115.159:16650,132.122.115.160:16650,132.122.115.161:16650,132.122.115.167:16650,132.122.115.168:16650&pulsar_topic=persistent://odcp/acct_dg/acct_item_760&start_time=1716858000000&end_time=1716861600000&primary_keys=ACCT_ITEM_ID&partition_keys=ACCT_ID
Connection: Keep-Alive
User-Agent: Apache-HttpClient/4.5.14 (Java/17.0.10)
Cookie: JSESSIONID=08E78CE8926806AAB5D110D0FE9B05F7
Accept-Encoding: br,deflate,gzip,x-gzip
<> 2024-05-28T164107.200.txt
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s10.hdp.dc:33535/task/law_enforcement?pulsar_url=pulsar://132.122.115.158:16650,132.122.115.159:16650,132.122.115.160:16650,132.122.115.161:16650,132.122.115.167:16650,132.122.115.168:16650&pulsar_topic=persistent://odcp/acct_dg/acct_item_760&start_time=1716858000000&end_time=1716861600000&primary_keys=ACCT_ITEM_ID&partition_keys=ACCT_ID
Connection: Keep-Alive
User-Agent: Apache-HttpClient/4.5.14 (Java/17.0.10)
Cookie: JSESSIONID=C5D2666661F27F68E53223FE5B74AF35
Accept-Encoding: br,deflate,gzip,x-gzip
<> 2024-05-28T163410.200.txt
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@132.126.207.130:35690/hudi_services/queue/queue/clear?name=compaction-queue-pre
Connection: Keep-Alive
User-Agent: Apache-HttpClient/4.5.14 (Java/17.0.10)
Cookie: JSESSIONID=8516C92140B5118AF9AA61025D0F8C93
Accept-Encoding: br,deflate,gzip,x-gzip
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@132.126.207.130:35690/hudi_services/service_scheduler/schedule/all
Connection: Keep-Alive
User-Agent: Apache-HttpClient/4.5.14 (Java/17.0.10)
Cookie: JSESSIONID=7A4C34E0240A98C1186F3A2551BC5E80
Accept-Encoding: br,deflate,gzip,x-gzip
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@132.126.207.130:35690/hudi_services/service_web/cloud/list
Connection: Keep-Alive
User-Agent: Apache-HttpClient/4.5.14 (Java/17.0.10)
Cookie: JSESSIONID=F5F155198FAF72435339CC2E21B873CC
Accept-Encoding: br,deflate,gzip,x-gzip
<> 2024-05-09T170723.200.json
###
GET http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@132.126.207.130:35690/hudi_services/hudi_api/api/message_id?flink_job_id=1542097984132706304&alias=crm_cfguse_mkt_cam_strategy_rel
Connection: Keep-Alive
User-Agent: Apache-HttpClient/4.5.14 (Java/17.0.10)
Cookie: JSESSIONID=F5F155198FAF72435339CC2E21B873CC
Accept-Encoding: br,deflate,gzip,x-gzip
###
POST http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s8.hdp.dc:15391/hdfs/write?root=hdfs://b2/apps/datalake/test/test.txt&overwrite=true
Content-Type: text/plain
Content-Length: 738
Connection: Keep-Alive
User-Agent: Apache-HttpClient/4.5.14 (Java/17.0.10)
Cookie: JSESSIONID=E6FF5447C8553BA4268979B8C5779363
Accept-Encoding: br,deflate,gzip,x-gzip
\#Properties saved on 2023-12-26T09:18:39.583Z
\#Tue Dec 26 17:18:39 CST 2023
hoodie.table.precombine.field=update_ts
hoodie.datasource.write.drop.partition.columns=false
hoodie.table.partition.fields=CITY_ID
hoodie.table.type=MERGE_ON_READ
hoodie.archivelog.folder=archived
hoodie.compaction.payload.class=org.apache.hudi.common.model.OverwriteWithLatestAvroPayload
hoodie.timeline.layout.version=1
hoodie.table.version=4
hoodie.table.recordkey.fields=_key
hoodie.datasource.write.partitionpath.urlencode=false
hoodie.table.name=dws_account
hoodie.table.keygenerator.class=org.apache.hudi.keygen.SimpleKeyGenerator
hoodie.table.timeline.timezone=LOCAL
hoodie.datasource.write.hive_style_partitioning=false
hoodie.table.checksum=989688289
###
POST http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s8.hdp.dc:15391/hdfs/write?root=hdfs://b2/apps/datalake/test/test.txt&overwrite=true
Content-Length: 11
Content-Type: */*; charset=UTF-8
Connection: Keep-Alive
User-Agent: Apache-HttpClient/4.5.14 (Java/17.0.10)
Cookie: JSESSIONID=D12E206603C453F1429C0B7DF1519A4B
Accept-Encoding: br,deflate,gzip,x-gzip
Hello world
###
POST http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s8.hdp.dc:34469/hdfs/write?root=hdfs://b2/apps/datalake/test/test.txt&overwrite=true
Content-Length: 11
Content-Type: */*; charset=UTF-8
Connection: Keep-Alive
User-Agent: Apache-HttpClient/4.5.14 (Java/17.0.10)
Cookie: JSESSIONID=D12E206603C453F1429C0B7DF1519A4B
Accept-Encoding: br,deflate,gzip,x-gzip
Hello world
###
POST http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@b12s8.hdp.dc:34469/hdfs/write?root=hdfs://b2/apps/datalake/test/test.txt
Content-Length: 11
Content-Type: */*; charset=UTF-8
Connection: Keep-Alive
User-Agent: Apache-HttpClient/4.5.14 (Java/17.0.10)
Cookie: JSESSIONID=D12E206603C453F1429C0B7DF1519A4B
Accept-Encoding: br,deflate,gzip,x-gzip
Hello world
<> 2024-05-08T095641.500.txt
###

View File

@@ -5,7 +5,7 @@ mvn install -N -D skipTests
deploy service-common service-dependencies service-configuration service-forest service-cli service-cli/service-cli-core service-executor service-executor/service-executor-core utils/executor deploy service-common service-dependencies service-configuration service-forest service-cli service-cli/service-cli-core service-executor service-executor/service-executor-core utils/executor
package service-api service-check service-cli/service-cli-runner service-cloud-query service-executor/service-executor-manager service-executor/service-executor-task service-command service-command-pro service-exporter service-flink-query service-gateway service-hudi-query service-info-query service-monitor service-loki-query service-pulsar-query service-queue service-scheduler service-uploader service-web service-yarn-query service-zookeeper-query utils/patch utils/sync package service-api service-check service-cli/service-cli-runner service-cloud-query service-executor/service-executor-manager service-executor/service-executor-task service-command service-command-pro service-exporter service-flink-query service-gateway service-hudi-query service-info-query service-monitor service-loki-query service-pulsar-query service-queue service-scheduler service-uploader service-web service-yarn-query service-zookeeper-query utils/patch utils/sync
configs=(b2a4 b2b1 b2b5 b2b12) configs=(b2a4 b2b1 b2b12)
for config in ${configs[*]}; for config in ${configs[*]};
do do
mvn -pl service-launcher clean package -D skipTests -P $config mvn -pl service-launcher clean package -D skipTests -P $config
@@ -34,5 +34,5 @@ upload $root_path/service-yarn-query/target/service-yarn-query-1.0.0-SNAPSHOT.ja
upload $root_path/service-zookeeper-query/target/service-zookeeper-query-1.0.0-SNAPSHOT.jar upload $root_path/service-zookeeper-query/target/service-zookeeper-query-1.0.0-SNAPSHOT.jar
upload $root_path/utils/sync/target/sync-1.0.0-SNAPSHOT.jar upload $root_path/utils/sync/target/sync-1.0.0-SNAPSHOT.jar
upload_ytp $root_path/service-cli/service-cli-runner/target/service-cli-runner-1.0.0-SNAPSHOT.jar upload $root_path/service-cli/service-cli-runner/target/service-cli-runner-1.0.0-SNAPSHOT.jar
upload_ytp $root_path/service-uploader/target/service-uploader-1.0.0-SNAPSHOT.jar upload $root_path/service-uploader/target/service-uploader-1.0.0-SNAPSHOT.jar

View File

@@ -3,4 +3,4 @@ root_path=$(dirname $(cd $(dirname $0);pwd))
source $root_path/bin/library.sh source $root_path/bin/library.sh
deploy service-cli service-cli/service-cli-core deploy service-cli service-cli/service-cli-core
package service-cli/service-cli-runner package service-cli/service-cli-runner
ytp-transfer2 $root_path/service-cli/service-cli-runner/target/service-cli-runner-1.0.0-SNAPSHOT.jar upload $root_path/service-cli/service-cli-runner/target/service-cli-runner-1.0.0-SNAPSHOT.jar

View File

@@ -3,7 +3,7 @@ root_path=$(dirname $(cd $(dirname $0);pwd))
source $root_path/bin/library.sh source $root_path/bin/library.sh
deploy service-common service-dependencies service-configuration service-forest deploy service-common service-dependencies service-configuration service-forest
configs=(b2a4 b2b1 b2b5 b2b12) configs=(b2a4 b2b1 b2b12)
for config in ${configs[*]}; for config in ${configs[*]};
do do
mvn -pl service-launcher clean package -D skipTests -P $config mvn -pl service-launcher clean package -D skipTests -P $config

View File

@@ -3,5 +3,5 @@
root_path=/apps/zone_scfp/hudi/cloud root_path=/apps/zone_scfp/hudi/cloud
jdk_path=/opt/jdk8u252-b09/bin/java jdk_path=/opt/jdk8u252-b09/bin/java
curl ftp://yyy:QeY\!68\)4nH1@132.121.122.15:2222/service-check-1.0.0-SNAPSHOT.jar -o ${root_path}/service-check.jar curl http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@132.126.207.124:36800/file/download/service-check-1.0.0-SNAPSHOT.jar -o ${root_path}/service-check.jar
${jdk_path} -jar ${root_path}/service-check.jar ${jdk_path} -jar ${root_path}/service-check.jar

View File

@@ -5,7 +5,7 @@ jdk_path=/opt/jdk1.8.0_162/bin/java
arguments=$@ arguments=$@
# 手动上传jar包则注释掉这行更显神通吧反正是 # 手动上传jar包则注释掉这行更显神通吧反正是
curl ftp://yyy:QeY\!68\)4nH1@132.121.122.15:2222/service-cli-runner-1.0.0-SNAPSHOT.jar -o ${jars_path}/service-cli-runner.jar curl http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@132.126.207.124:36800/file/download/service-cli-runner-1.0.0-SNAPSHOT.jar -o ${jars_path}/service-cli-runner.jar
${jdk_path} -jar ${jars_path}/service-cli-runner.jar \ ${jdk_path} -jar ${jars_path}/service-cli-runner.jar \
--spring.profiles.active=b12 \ --spring.profiles.active=b12 \
--deploy.generate.command=true \ --deploy.generate.command=true \

View File

@@ -2,14 +2,6 @@
build_profile=b2b12 build_profile=b2b12
iap_username=iap
iap_password=IAPAb123456!
iap_url=$iap_username@132.122.1.162
ytp_username=yyy
ytp_password='QeY\!68\)4nH1'
ytp_url=ftp://$ytp_username:$ytp_password@132.121.122.15:2222
upload_username=AxhEbscwsJDbYMH2 upload_username=AxhEbscwsJDbYMH2
upload_password=cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4 upload_password=cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4
upload_url=http://$upload_username:$upload_password@132.126.207.124:36800 upload_url=http://$upload_username:$upload_password@132.126.207.124:36800
@@ -28,23 +20,6 @@ function upload() {
rm $source_file_path rm $source_file_path
} }
function upload_ytp() {
source_file_path=$(realpath $1)
file_name=$(basename $source_file_path)
echo "↪ Source md5: $(md5sum $source_file_path | awk '{print $1}')"
echo "↪ Uploading $source_file_path ↪ /tmp/$file_name"
sshpass -p $iap_password scp $source_file_path $iap_url:/tmp
echo "↪ Upload 162 success"
target_md5=$(sshpass -p $iap_password ssh -o 'StrictHostKeyChecking no' $iap_url "md5sum /tmp/$file_name | awk '{print \$1}'")
echo "↪ Target md5: $target_md5"
echo "↪ Command: sshpass -p $iap_password ssh -o 'StrictHostKeyChecking no' $iap_url \"curl --retry 5 $ytp_url -T /tmp/$file_name\""
sshpass -p $iap_password ssh -o 'StrictHostKeyChecking no' $iap_url "curl --retry 5 $ytp_url -T /tmp/$file_name"
echo "↪ Upload ytp success"
echo "↪ Download: curl $ytp_url/$file_name -o $file_name"
echo "↪ Delete source"
rm $source_file_path
}
function joining { function joining {
local d=${1-} f=${2-} local d=${1-} f=${2-}
if shift 2; then if shift 2; then

View File

@@ -1,265 +0,0 @@
<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
<property>
<name>fs.azure.user.agent.prefix</name>
<value>User-Agent: APN/1.0 Hortonworks/1.0 HDP/</value>
</property>
<property>
<name>fs.defaultFS</name>
<value>hdfs://b2</value>
<final>true</final>
</property>
<property>
<name>fs.s3a.fast.upload</name>
<value>true</value>
</property>
<property>
<name>fs.s3a.fast.upload.buffer</name>
<value>disk</value>
</property>
<property>
<name>fs.s3a.multipart.size</name>
<value>67108864</value>
</property>
<property>
<name>fs.trash.interval</name>
<value>4320</value>
</property>
<property>
<name>fs.trash.checkpoint.interval</name>
<value>360</value>
</property>
<property>
<name>ha.failover-controller.active-standby-elector.zk.op.retries</name>
<value>120</value>
</property>
<property>
<name>ha.zookeeper.acl</name>
<value>sasl:nn:rwcda</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>b5m1.hdp.dc:2181,b5m2.hdp.dc:2181,b5m3.hdp.dc:2181</value>
</property>
<property>
<name>hadoop.http.authentication.kerberos.keytab</name>
<value>/etc/security/keytabs/spnego.service.keytab</value>
</property>
<property>
<name>hadoop.http.authentication.kerberos.principal</name>
<value>HTTP/_HOST@ECLD.COM</value>
</property>
<property>
<name>hadoop.http.authentication.signature.secret.file</name>
<value>/etc/security/http_secret</value>
</property>
<property>
<name>hadoop.http.authentication.simple.anonymous.allowed</name>
<value>true</value>
</property>
<property>
<name>hadoop.http.authentication.type</name>
<value>simple</value>
</property>
<property>
<name>hadoop.http.cross-origin.allowed-headers</name>
<value>X-Requested-With,Content-Type,Accept,Origin,WWW-Authenticate,Accept-Encoding,Transfer-Encoding</value>
</property>
<property>
<name>hadoop.http.cross-origin.allowed-methods</name>
<value>GET,PUT,POST,OPTIONS,HEAD,DELETE</value>
</property>
<property>
<name>hadoop.http.cross-origin.allowed-origins</name>
<value>*</value>
</property>
<property>
<name>hadoop.http.cross-origin.max-age</name>
<value>1800</value>
</property>
<property>
<name>hadoop.http.filter.initializers</name>
<value>org.apache.hadoop.security.AuthenticationFilterInitializer,org.apache.hadoop.security.HttpCrossOriginFilterInitializer</value>
</property>
<property>
<name>hadoop.proxyuser.hdfs.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.hdfs.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.hive.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.hive.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.HTTP.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.HTTP.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.iap.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.iap.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.livy.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.livy.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.yarn.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.yarn.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.rpc.protection</name>
<value>authentication,privacy</value>
</property>
<property>
<name>hadoop.security.auth_to_local</name>
<value>RULE:[1:$1@$0](hbase-b5@ECLD.COM)s/.*/hbase/
RULE:[1:$1@$0](hdfs-b5@ECLD.COM)s/.*/hdfs/
RULE:[1:$1@$0](spark-b5@ECLD.COM)s/.*/spark/
RULE:[1:$1@$0](yarn-ats-b5@ECLD.COM)s/.*/yarn-ats/
RULE:[1:$1@$0](.*@ECLD.COM)s/@.*//
RULE:[2:$1@$0](dn@ECLD.COM)s/.*/hdfs/
RULE:[2:$1@$0](hbase@ECLD.COM)s/.*/hbase/
RULE:[2:$1@$0](hive@ECLD.COM)s/.*/hive/
RULE:[2:$1@$0](jhs@ECLD.COM)s/.*/mapred/
RULE:[2:$1@$0](jn@ECLD.COM)s/.*/hdfs/
RULE:[2:$1@$0](livy@ECLD.COM)s/.*/livy/
RULE:[2:$1@$0](nm@ECLD.COM)s/.*/yarn/
RULE:[2:$1@$0](nn@ECLD.COM)s/.*/hdfs/
RULE:[2:$1@$0](rangeradmin@ECLD.COM)s/.*/ranger/
RULE:[2:$1@$0](rangerlookup@ECLD.COM)s/.*/ranger/
RULE:[2:$1@$0](rangertagsync@ECLD.COM)s/.*/rangertagsync/
RULE:[2:$1@$0](rangerusersync@ECLD.COM)s/.*/rangerusersync/
RULE:[2:$1@$0](rm@ECLD.COM)s/.*/yarn/
RULE:[2:$1@$0](spark@ECLD.COM)s/.*/spark/
RULE:[2:$1@$0](yarn@ECLD.COM)s/.*/yarn/
RULE:[2:$1@$0](yarn-ats-hbase@ECLD.COM)s/.*/yarn-ats/
DEFAULT</value>
</property>
<property>
<name>hadoop.security.authentication</name>
<value>kerberos</value>
</property>
<property>
<name>hadoop.security.authorization</name>
<value>true</value>
</property>
<property>
<name>hadoop.security.instrumentation.requires.admin</name>
<value>false</value>
</property>
<property>
<name>io.compression.codec.lzo.class</name>
<value>com.hadoop.compression.lzo.LzoCodec</value>
</property>
<property>
<name>io.compression.codecs</name>
<value>org.apache.hadoop.io.compress.GzipCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<property>
<name>io.serializations</name>
<value>org.apache.hadoop.io.serializer.WritableSerialization</value>
</property>
<property>
<name>ipc.client.connect.max.retries</name>
<value>50</value>
</property>
<property>
<name>ipc.client.connection.maxidletime</name>
<value>30000</value>
</property>
<property>
<name>ipc.client.idlethreshold</name>
<value>8000</value>
</property>
<property>
<name>ipc.server.tcpnodelay</name>
<value>true</value>
</property>
<property>
<name>mapreduce.jobtracker.webinterface.trusted</name>
<value>false</value>
</property>
<property>
<name>ipc.client.fallback-to-simple-auth-allowed</name>
<value>true</value>
</property>
<property>
<name>fs.hdfs.impl.disable.cache</name>
<value>true</value>
</property>
</configuration>

View File

@@ -1,713 +0,0 @@
<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
<property>
<name>dfs.block.access.token.enable</name>
<value>true</value>
</property>
<property>
<name>dfs.blockreport.initialDelay</name>
<value>120</value>
</property>
<property>
<name>dfs.blocksize</name>
<value>134217728</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.b5</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.client.read.shortcircuit</name>
<value>true</value>
</property>
<property>
<name>dfs.client.read.shortcircuit.streams.cache.size</name>
<value>4096</value>
</property>
<property>
<name>dfs.client.retry.policy.enabled</name>
<value>false</value>
</property>
<property>
<name>dfs.cluster.administrators</name>
<value> hdfs</value>
</property>
<property>
<name>dfs.content-summary.limit</name>
<value>5000</value>
</property>
<property>
<name>dfs.data.transfer.protection</name>
<value>authentication,privacy</value>
</property>
<property>
<name>dfs.datanode.address</name>
<value>0.0.0.0:1019</value>
</property>
<property>
<name>dfs.datanode.balance.bandwidthPerSec</name>
<value>6250000</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>[DISK]file:///data1/hadoop/hdfs/data,[DISK]file:///data2/hadoop/hdfs/data,[DISK]file:///data3/hadoop/hdfs/data,[DISK]file:///data4/hadoop/hdfs/data,[DISK]file:///data5/hadoop/hdfs/data,[DISK]file:///data6/hadoop/hdfs/data,[DISK]file:///data7/hadoop/hdfs/data,[DISK]file:///data8/hadoop/hdfs/data,[DISK]file:///data9/hadoop/hdfs/data,[DISK]file:///data10/hadoop/hdfs/data,[DISK]file:///data11/hadoop/hdfs/data,[DISK]file:///data12/hadoop/hdfs/data,[DISK]file:///data13/hadoop/hdfs/data,[DISK]file:///data14/hadoop/hdfs/data,[DISK]file:///data15/hadoop/hdfs/data,[DISK]file:///data16/hadoop/hdfs/data,[DISK]file:///data17/hadoop/hdfs/data,[DISK]file:///data18/hadoop/hdfs/data,[DISK]file:///data19/hadoop/hdfs/data,[DISK]file:///data20/hadoop/hdfs/data,[DISK]file:///data21/hadoop/hdfs/data,[DISK]file:///data22/hadoop/hdfs/data,[DISK]file:///data23/hadoop/hdfs/data,[DISK]file:///data24/hadoop/hdfs/data</value>
<final>true</final>
</property>
<property>
<name>dfs.datanode.data.dir.perm</name>
<value>750</value>
</property>
<property>
<name>dfs.datanode.du.reserved</name>
<value>26405499904</value>
</property>
<property>
<name>dfs.datanode.failed.volumes.tolerated</name>
<value>2</value>
<final>true</final>
</property>
<property>
<name>dfs.datanode.http.address</name>
<value>0.0.0.0:1022</value>
</property>
<property>
<name>dfs.datanode.https.address</name>
<value>0.0.0.0:50475</value>
</property>
<property>
<name>dfs.datanode.ipc.address</name>
<value>0.0.0.0:8010</value>
</property>
<property>
<name>dfs.datanode.kerberos.principal</name>
<value>dn/_HOST@ECLD.COM</value>
</property>
<property>
<name>dfs.datanode.keytab.file</name>
<value>/etc/security/keytabs/dn.service.keytab</value>
</property>
<property>
<name>dfs.datanode.max.transfer.threads</name>
<value>16384</value>
</property>
<property>
<name>dfs.domain.socket.path</name>
<value>/var/lib/hadoop-hdfs/dn_socket</value>
</property>
<property>
<name>dfs.encrypt.data.transfer.cipher.suites</name>
<value>AES/CTR/NoPadding</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>shell(/bin/true)</value>
</property>
<property>
<name>dfs.ha.namenodes.b5</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.heartbeat.interval</name>
<value>3</value>
</property>
<property>
<name>dfs.hosts.exclude</name>
<value>/etc/hadoop/conf/dfs.exclude</value>
</property>
<property>
<name>dfs.http.policy</name>
<value>HTTP_ONLY</value>
</property>
<property>
<name>dfs.https.port</name>
<value>50470</value>
</property>
<property>
<name>dfs.internal.nameservices</name>
<value>b5</value>
</property>
<property>
<name>dfs.journalnode.edits.dir.b5</name>
<value>/data2/hadoop/hdfs/journal</value>
</property>
<property>
<name>dfs.journalnode.http-address</name>
<value>0.0.0.0:8480</value>
</property>
<property>
<name>dfs.journalnode.https-address</name>
<value>0.0.0.0:8481</value>
</property>
<property>
<name>dfs.journalnode.kerberos.internal.spnego.principal</name>
<value>HTTP/_HOST@ECLD.COM</value>
</property>
<property>
<name>dfs.journalnode.kerberos.principal</name>
<value>jn/_HOST@ECLD.COM</value>
</property>
<property>
<name>dfs.journalnode.keytab.file</name>
<value>/etc/security/keytabs/jn.service.keytab</value>
</property>
<property>
<name>dfs.namenode.accesstime.precision</name>
<value>0</value>
</property>
<property>
<name>dfs.namenode.acls.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.audit.log.async</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.avoid.read.stale.datanode</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.avoid.write.stale.datanode</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.checkpoint.dir</name>
<value>/data/hadoop/hdfs/namesecondary</value>
</property>
<property>
<name>dfs.namenode.checkpoint.edits.dir</name>
<value>${dfs.namenode.checkpoint.dir}</value>
</property>
<property>
<name>dfs.namenode.checkpoint.period</name>
<value>21600</value>
</property>
<property>
<name>dfs.namenode.checkpoint.txns</name>
<value>1000000</value>
</property>
<property>
<name>dfs.namenode.fslock.fair</name>
<value>false</value>
</property>
<property>
<name>dfs.namenode.handler.count</name>
<value>100</value>
</property>
<property>
<name>dfs.namenode.http-address.b5.nn1</name>
<value>b5m2.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.b5.nn2</name>
<value>b5m3.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.b5.nn1</name>
<value>b5m2.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.b5.nn2</name>
<value>b5m3.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.kerberos.internal.spnego.principal</name>
<value>HTTP/_HOST@ECLD.COM</value>
</property>
<property>
<name>dfs.namenode.kerberos.principal</name>
<value>nn/_HOST@ECLD.COM</value>
</property>
<property>
<name>dfs.namenode.keytab.file</name>
<value>/etc/security/keytabs/nn.service.keytab</value>
</property>
<property>
<name>dfs.namenode.max.extra.edits.segments.retained</name>
<value>180</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/data1/hadoop/hdfs/namenode,/data2/hadoop/hdfs/namenode</value>
<final>true</final>
</property>
<property>
<name>dfs.namenode.name.dir.restore</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.num.extra.edits.retained</name>
<value>18000</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b5.nn1</name>
<value>b5m2.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b5.nn2</name>
<value>b5m3.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.safemode.threshold-pct</name>
<value>0.99</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir.b5</name>
<value>qjournal://b5m1.hdp.dc:8485;b5m2.hdp.dc:8485;b5m3.hdp.dc:8485/b5</value>
</property>
<property>
<name>dfs.namenode.stale.datanode.interval</name>
<value>30000</value>
</property>
<property>
<name>dfs.namenode.startup.delay.block.deletion.sec</name>
<value>3600</value>
</property>
<property>
<name>dfs.namenode.write.stale.datanode.ratio</name>
<value>1.0f</value>
</property>
<property>
<name>dfs.nameservices</name>
<value>b5,b1,b2,b3,b4,a3,a4,f1,e1,d2</value>
</property>
<property>
<name>dfs.permissions.ContentSummary.subAccess</name>
<value>true</value>
</property>
<property>
<name>dfs.permissions.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.permissions.superusergroup</name>
<value>hdfs</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.replication.max</name>
<value>50</value>
</property>
<property>
<name>dfs.web.authentication.kerberos.keytab</name>
<value>/etc/security/keytabs/spnego.service.keytab</value>
</property>
<property>
<name>dfs.web.authentication.kerberos.principal</name>
<value>HTTP/_HOST@ECLD.COM</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
<final>true</final>
</property>
<property>
<name>fs.permissions.umask-mode</name>
<value>022</value>
</property>
<property>
<name>hadoop.caller.context.enabled</name>
<value>true</value>
</property>
<property>
<name>manage.include.files</name>
<value>false</value>
</property>
<property>
<name>nfs.exports.allowed.hosts</name>
<value>* rw</value>
</property>
<property>
<name>nfs.file.dump.dir</name>
<value>/tmp/.hdfs-nfs</value>
</property>
<property>
<name>dfs.client.datanode-restart.timeout</name>
<value>30</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.a4</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.namenodes.a4</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.http-address.a4.nn1</name>
<value>a4m1.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.a4.nn2</name>
<value>a4m2.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.a4.nn1</name>
<value>a4m1.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.a4.nn2</name>
<value>a4m2.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.rpc-address.a4.nn1</name>
<value>a4m1.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.a4.nn2</name>
<value>a4m2.hdp.dc:8020</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.a3</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.namenodes.a3</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.http-address.a3.nn1</name>
<value>a3m2.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.a3.nn2</name>
<value>a3m3.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.a3.nn1</name>
<value>a3m2.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.a3.nn2</name>
<value>a3m3.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.rpc-address.a3.nn1</name>
<value>a3m2.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.a3.nn2</name>
<value>a3m3.hdp.dc:8020</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.b3</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.namenodes.b3</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.http-address.b3.nn1</name>
<value>b3m2.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.b3.nn2</name>
<value>b3m3.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.b3.nn1</name>
<value>b3m2.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.b3.nn2</name>
<value>b3m3.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b3.nn1</name>
<value>b3m2.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b3.nn2</name>
<value>b3m3.hdp.dc:8020</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.b1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.b2</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.namenodes.b1</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.ha.namenodes.b2</name>
<value>nn3,nn4</value>
</property>
<property>
<name>dfs.namenode.http-address.b1.nn1</name>
<value>b1m2.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.b1.nn2</name>
<value>b1m3.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.b1.nn1</name>
<value>b1m2.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.b1.nn2</name>
<value>b1m3.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b1.nn1</name>
<value>b1m2.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b1.nn2</name>
<value>b1m3.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.http-address.b2.nn3</name>
<value>b1m5.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.b2.nn4</name>
<value>b1m6.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.b2.nn3</name>
<value>b1m5.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.b2.nn4</name>
<value>b1m6.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b2.nn3</name>
<value>b1m5.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b2.nn4</name>
<value>b1m6.hdp.dc:8020</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.f1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.namenodes.f1</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.http-address.f1.nn1</name>
<value>f1m2.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.f1.nn2</name>
<value>f1m3.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.f1.nn1</name>
<value>f1m2.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.f1.nn2</name>
<value>f1m3.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.rpc-address.f1.nn1</name>
<value>f1m2.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.f1.nn2</name>
<value>f1m3.hdp.dc:8020</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.d2</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.namenodes.d2</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.http-address.d2.nn1</name>
<value>d2m2.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.d2.nn2</name>
<value>d2m3.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.d2.nn1</name>
<value>d2m2.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.d2.nn2</name>
<value>d2m3.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.rpc-address.d2.nn1</name>
<value>d2m2.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.d2.nn2</name>
<value>d2m3.hdp.dc:8020</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.e1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.namenodes.e1</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.http-address.e1.nn1</name>
<value>e1m2.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.e1.nn2</name>
<value>e1m3.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.e1.nn1</name>
<value>e1m2.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.e1.nn2</name>
<value>e1m3.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.rpc-address.e1.nn1</name>
<value>e1m2.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.e1.nn2</name>
<value>e1m3.hdp.dc:8020</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.b4</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.namenodes.b4</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.http-address.b4.nn1</name>
<value>b4m2.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.b4.nn2</name>
<value>b4m3.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.b4.nn1</name>
<value>b4m2.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.b4.nn2</name>
<value>b4m3.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b4.nn1</name>
<value>b4m2.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b4.nn2</name>
<value>b4m3.hdp.dc:8020</value>
</property>
</configuration>

File diff suppressed because it is too large Load Diff

View File

@@ -8,7 +8,7 @@
<artifactId>hudi-service</artifactId> <artifactId>hudi-service</artifactId>
<version>1.0.0-SNAPSHOT</version> <version>1.0.0-SNAPSHOT</version>
<packaging>pom</packaging> <packaging>pom</packaging>
<description>Hudi服务应用集合</description> <description>Hudi服务模块系列应用</description>
<modules> <modules>
<module>service-common</module> <module>service-common</module>
<module>service-dependencies</module> <module>service-dependencies</module>
@@ -74,12 +74,6 @@
<build-tag>b2b1</build-tag> <build-tag>b2b1</build-tag>
</properties> </properties>
</profile> </profile>
<profile>
<id>b2b5</id>
<properties>
<build-tag>b2b5</build-tag>
</properties>
</profile>
<profile> <profile>
<id>b2b12</id> <id>b2b12</id>
<properties> <properties>

View File

@@ -8,6 +8,7 @@ package com.lanyuanxiaoyao.service.cli.core;
*/ */
public class HostInfo { public class HostInfo {
private String ip; private String ip;
private Boolean enabled = true;
private Boolean useAuthority = false; private Boolean useAuthority = false;
private String username; private String username;
private String password; private String password;
@@ -20,6 +21,14 @@ public class HostInfo {
this.ip = ip; this.ip = ip;
} }
public Boolean getEnabled() {
return enabled;
}
public void setEnabled(Boolean enabled) {
this.enabled = enabled;
}
public Boolean getUseAuthority() { public Boolean getUseAuthority() {
return useAuthority; return useAuthority;
} }
@@ -47,7 +56,8 @@ public class HostInfo {
@Override @Override
public String toString() { public String toString() {
return "HostInfo{" + return "HostInfo{" +
"ip='" + ip + '\'' + "enabled=" + enabled +
", ip='" + ip + '\'' +
", useAuthority=" + useAuthority + ", useAuthority=" + useAuthority +
", username='" + username + '\'' + ", username='" + username + '\'' +
", password='" + password + '\'' + ", password='" + password + '\'' +

View File

@@ -26,6 +26,10 @@ public class HostInfoWrapper {
return hostInfo.getIp(); return hostInfo.getIp();
} }
public Boolean getEnabled() {
return hostInfo.getEnabled();
}
public Boolean getUseAuthority() { public Boolean getUseAuthority() {
return hostInfo.getUseAuthority(); return hostInfo.getUseAuthority();
} }

View File

@@ -82,6 +82,7 @@ public class RunnerApplication implements ApplicationRunner {
return serviceInfo.getReplicas() == 0 return serviceInfo.getReplicas() == 0
? hostInfoList ? hostInfoList
.stream() .stream()
.filter(HostInfoWrapper::getEnabled)
.map(HostInfoWrapper::getIp) .map(HostInfoWrapper::getIp)
.sorted(Comparator.naturalOrder()) .sorted(Comparator.naturalOrder())
.collect(Collectors.toList()) .collect(Collectors.toList())
@@ -89,6 +90,7 @@ public class RunnerApplication implements ApplicationRunner {
RandomUtil.randomEleList( RandomUtil.randomEleList(
hostInfoList hostInfoList
.stream() .stream()
.filter(HostInfoWrapper::getEnabled)
.map(HostInfoWrapper::getIp) .map(HostInfoWrapper::getIp)
.collect(Collectors.toList() .collect(Collectors.toList()
), serviceInfo.getReplicas() ), serviceInfo.getReplicas()
@@ -148,6 +150,15 @@ public class RunnerApplication implements ApplicationRunner {
selectedHosts = selectHosts(serviceInfo); selectedHosts = selectHosts(serviceInfo);
deployPlans.put(serviceInfo.getName(), selectedHosts); deployPlans.put(serviceInfo.getName(), selectedHosts);
} }
// 排除不可用的主机
List<String> validIps = hostInfoList.stream()
.filter(HostInfoWrapper::getEnabled)
.map(HostInfoWrapper::getIp)
.collect(Collectors.toList());
selectedHosts = selectedHosts
.stream()
.filter(validIps::contains)
.collect(Collectors.toList());
} else { } else {
selectedHosts = selectHosts(serviceInfo); selectedHosts = selectHosts(serviceInfo);
deployPlans.put(serviceInfo.getName(), selectedHosts); deployPlans.put(serviceInfo.getName(), selectedHosts);

View File

@@ -59,19 +59,17 @@ deploy:
# hudi同步运行集群 # hudi同步运行集群
sync-clusters: b12 sync-clusters: b12
# hudi压缩运行集群 # hudi压缩运行集群
compaction-clusters: b12,b1,b5,a4 compaction-clusters: b12,b1,a4
# 覆盖service的公共配置主要需要修改的就是部署副本数 # 覆盖service的公共配置主要需要修改的就是部署副本数
services: services:
service-api: service-api:
replicas: 10 replicas: 10
service-launcher-b1: service-launcher-b1:
replicas: 8 replicas: 8
service-launcher-b5:
replicas: 6
service-launcher-a4: service-launcher-a4:
replicas: 6 replicas: 6
service-launcher-b12: service-launcher-b12:
replicas: 10 replicas: 15
service-info-query: service-info-query:
replicas: 10 replicas: 10
service-yarn-query: service-yarn-query:

View File

@@ -46,27 +46,6 @@ deploy:
"[connector.cluster.sync-queue-name]": sync-queue-b1 "[connector.cluster.sync-queue-name]": sync-queue-b1
"[connector.cluster.compaction-queue-name]": compaction-queue-b1 "[connector.cluster.compaction-queue-name]": compaction-queue-b1
"[connector.zookeeper.connect-url]": ${deploy.runtime.connector-zk-url} "[connector.zookeeper.connect-url]": ${deploy.runtime.connector-zk-url}
service-launcher-b5:
order: 4
groups:
- "service"
- "service-hudi"
- "service-hudi-launcher"
source-jar: service-launcher-b2b5-1.0.0-SNAPSHOT.jar
replicas: 6
environments:
"[connector.hadoop.kerberos-principal]": ${deploy.runtime.user}/$\{hostname}.hdp.dc@ECLD.COM
"[connector.hadoop.kerberos-keytab-path]": ${deploy.runtime.kerberos-keytab-path}
"[connector.hudi.app-hdfs-path]": ${deploy.runtime.hudi.app-hdfs-path}
"[connector.hudi.app-test-hdfs-path]": ${deploy.runtime.hudi.app-test-hdfs-path}
"[connector.hudi.victoria-push-url]": ${deploy.runtime.hudi.victoria-push-url}
"[connector.hudi.loki-push-url]": ${deploy.runtime.hudi.loki-push-url}
arguments:
"[spring.application.name]": service-launcher-b5
"[connector.cluster.cluster]": b5
"[connector.cluster.sync-queue-name]": sync-queue-b5
"[connector.cluster.compaction-queue-name]": compaction-queue-b5
"[connector.zookeeper.connect-url]": ${deploy.runtime.connector-zk-url}
service-launcher-a4: service-launcher-a4:
order: 4 order: 4
groups: groups:

View File

@@ -205,6 +205,41 @@ public class HudiCommand {
fileSystem.close(); fileSystem.close();
} }
@ShellMethod("Max meta files")
public void maxMetaFiles() throws IOException {
MutableList<P> list = Lists.mutable.<P>empty().asSynchronized();
FileSystem fileSystem = FileSystem.get(new Configuration());
infoService
.tableMetaList()
.collect(TableMeta::getHudi)
.collect(TableMeta.HudiMeta::getTargetHdfsPath)
.asParallel(ExecutorProvider.EXECUTORS_20, 1)
.forEach(hdfs -> {
Path root = new Path(hdfs, ".hoodie");
try {
FileStatus[] statuses = fileSystem.listStatus(root);
long num = 0;
for (FileStatus status : statuses) {
if (status.isFile()) {
num++;
}
if (StrUtil.containsIgnoreCase(status.getPath().toString(), "INVALID")) {
logger.info("{}", status.getPath().toString());
}
}
list.add(new P(num, hdfs));
logger.info("Count: {} Hdfs: {}", num, hdfs);
} catch (IOException e) {
logger.warn("List file error", e);
}
});
MutableList<P> listP = list.select(p -> p.count > 1000);
for (P maxP : listP) {
logger.info("Max: {} Hdfs: {}", maxP.count, maxP.hdfs);
}
fileSystem.close();
}
@ShellMethod("Get timeline instants") @ShellMethod("Get timeline instants")
public void timelineInstant(@ShellOption(help = "root hdfs path") String hdfs) { public void timelineInstant(@ShellOption(help = "root hdfs path") String hdfs) {
hudiService.timelineHdfsAllActive(hdfs).forEach(instant -> logger.info(instant.toString())); hudiService.timelineHdfsAllActive(hdfs).forEach(instant -> logger.info(instant.toString()));
@@ -229,4 +264,14 @@ public class HudiCommand {
public interface Runnable { public interface Runnable {
void run(LongAdder counter); void run(LongAdder counter);
} }
private static final class P {
String hdfs;
long count;
public P(long count, String hdfs) {
this.count = count;
this.hdfs = hdfs;
}
}
} }

View File

@@ -128,6 +128,12 @@ public interface Constants {
String METRICS_PULSAR_PREFIX = METRICS_PREFIX + "_pulsar"; String METRICS_PULSAR_PREFIX = METRICS_PREFIX + "_pulsar";
String METRICS_PULSAR_BACKLOG = METRICS_PULSAR_PREFIX + "_backlog"; String METRICS_PULSAR_BACKLOG = METRICS_PULSAR_PREFIX + "_backlog";
String METRICS_HUDI_TABLE = METRICS_PREFIX + "_hudi_table";
String METRICS_HUDI_TABLE_FILE_COUNT = METRICS_HUDI_TABLE + "_file_count";
String METRICS_HUDI_TABLE_FILE_COUNT_AVERAGE_PER_TABLE = METRICS_HUDI_TABLE_FILE_COUNT + "_average_per_table";
String METRICS_HUDI_TABLE_TIMELINE_FILE_COUNT = METRICS_HUDI_TABLE + "_timeline_file_count";
String METRICS_HUDI_TABLE_TIMELINE_FILE_COUNT_AVERAGE_PER_TABLE = METRICS_HUDI_TABLE_TIMELINE_FILE_COUNT + "_average_per_table";
String METRICS_LABEL_FLINK_JOB_ID = "flink_job_id"; String METRICS_LABEL_FLINK_JOB_ID = "flink_job_id";
String METRICS_LABEL_FLINK_JOB_NAME = "flink_job_name"; String METRICS_LABEL_FLINK_JOB_NAME = "flink_job_name";
String METRICS_LABEL_FLINK_NATIVE_JOB_ID = "flink_native_job_id"; String METRICS_LABEL_FLINK_NATIVE_JOB_ID = "flink_native_job_id";
@@ -221,12 +227,10 @@ public interface Constants {
String COMPACTION_QUEUE_PRE = "compaction-queue-pre"; String COMPACTION_QUEUE_PRE = "compaction-queue-pre";
String COMPACTION_QUEUE_B1 = "compaction-queue-b1"; String COMPACTION_QUEUE_B1 = "compaction-queue-b1";
String COMPACTION_QUEUE_B5 = "compaction-queue-b5";
String COMPACTION_QUEUE_A4 = "compaction-queue-a4"; String COMPACTION_QUEUE_A4 = "compaction-queue-a4";
String COMPACTION_QUEUE_B12 = "compaction-queue-b12"; String COMPACTION_QUEUE_B12 = "compaction-queue-b12";
String CLUSTER_B1 = "b1"; String CLUSTER_B1 = "b1";
String CLUSTER_B5 = "b5";
String CLUSTER_A4 = "a4"; String CLUSTER_A4 = "a4";
String CLUSTER_B12 = "b12"; String CLUSTER_B12 = "b12";

View File

@@ -0,0 +1,55 @@
package com.lanyuanxiaoyao.service.configuration.entity.monitor;
/**
* 指标运行进度
*
* @author lanyuanxiaoyao
* @date 2024-10-14
*/
public class MetricsProgress {
private String name;
private Boolean running;
private Double progress;
public MetricsProgress() {
}
public MetricsProgress(String name, Boolean running, Double progress) {
this.name = name;
this.running = running;
this.progress = progress;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Double getProgress() {
return progress;
}
public void setProgress(Double progress) {
this.progress = progress;
}
public Boolean getRunning() {
return running;
}
public void setRunning(Boolean running) {
this.running = running;
}
@Override
public String toString() {
return "MetricsProgress{" +
"name='" + name + '\'' +
", running=" + running +
", progress=" + progress +
'}';
}
}

View File

@@ -22,7 +22,6 @@ public class YarnClusters {
MapUtil.<String, Cluster>builder() MapUtil.<String, Cluster>builder()
.put("a4", new Cluster("http://132.121.107.91:8088")) .put("a4", new Cluster("http://132.121.107.91:8088"))
.put("b1", new Cluster("http://132.122.98.13:8088")) .put("b1", new Cluster("http://132.122.98.13:8088"))
.put("b5", new Cluster("http://132.122.116.12:8088"))
.put("b12", new Cluster("http://132.126.207.125:8088")) .put("b12", new Cluster("http://132.126.207.125:8088"))
.build() .build()
); );

View File

@@ -106,5 +106,14 @@ public interface HudiService {
InputStream download(@Query("root") String root); InputStream download(@Query("root") String root);
@Get("/hdfs/size") @Get("/hdfs/size")
String size(@Query("root") String root); Long size(@Query("root") String root);
@Get("/hdfs/count")
Long count(@Query("root") String root);
@Get("/hdfs/file_count")
Long fileCount(@Query("root") String root);
@Get("/hdfs/directory_count")
Long directoryCount(@Query("root") String root);
} }

View File

@@ -0,0 +1,18 @@
package com.lanyuanxiaoyao.service.forest.service;
import com.dtflys.forest.annotation.BaseRequest;
import com.dtflys.forest.annotation.Get;
import com.lanyuanxiaoyao.service.configuration.entity.monitor.MetricsProgress;
import org.eclipse.collections.api.list.ImmutableList;
/**
* 监控指标查询
*
* @author lanyuanxiaoyao
* @date 2024-10-14
*/
@BaseRequest(baseURL = "http://service-monitor")
public interface MonitorService {
@Get("/metrics_control/progress")
ImmutableList<MetricsProgress> progress();
}

View File

@@ -1,12 +0,0 @@
package com.lanyuanxiaoyao.service.forest.service.launcher.impl;
import com.dtflys.forest.annotation.BaseRequest;
import com.lanyuanxiaoyao.service.forest.service.launcher.LauncherService;
/**
* @author lanyuanxiaoyao
* @date 2023-06-06
*/
@BaseRequest(baseURL = "http://service-launcher-b5")
public interface B5LauncherService extends LauncherService {
}

View File

@@ -82,4 +82,19 @@ public class HdfsController {
public Long size(@RequestParam("root") String root) throws IOException { public Long size(@RequestParam("root") String root) throws IOException {
return hdfsService.size(root); return hdfsService.size(root);
} }
@GetMapping("count")
public Long count(@RequestParam("root") String root) throws IOException {
return hdfsService.count(root);
}
@GetMapping("file_count")
public Long fileCount(@RequestParam("root") String root) throws IOException {
return hdfsService.countFiles(root);
}
@GetMapping("directory_count")
public Long DirectoryCount(@RequestParam("root") String root) throws IOException {
return hdfsService.countDirectories(root);
}
} }

View File

@@ -8,6 +8,7 @@ import com.lanyuanxiaoyao.service.forest.service.InfoService;
import java.io.IOException; import java.io.IOException;
import java.io.OutputStream; import java.io.OutputStream;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
@@ -160,4 +161,26 @@ public class HdfsService {
return fileSystem.getContentSummary(new Path(root)).getLength(); return fileSystem.getContentSummary(new Path(root)).getLength();
} }
} }
@Cacheable(value = "count-hpath", sync = true)
public Long count(String root) throws IOException {
try (FileSystem fileSystem = FileSystem.get(new Configuration())) {
ContentSummary summary = fileSystem.getContentSummary(new Path(root));
return summary.getFileCount() + summary.getDirectoryCount();
}
}
@Cacheable(value = "file-count-hpath", sync = true)
public Long countFiles(String root) throws IOException {
try (FileSystem fileSystem = FileSystem.get(new Configuration())) {
return fileSystem.getContentSummary(new Path(root)).getFileCount();
}
}
@Cacheable(value = "directory-count-hpath", sync = true)
public Long countDirectories(String root) throws IOException {
try (FileSystem fileSystem = FileSystem.get(new Configuration())) {
return fileSystem.getContentSummary(new Path(root)).getDirectoryCount();
}
}
} }

View File

@@ -0,0 +1,40 @@
package com.lanyuanxiaoyao.service.monitor.controller;
import com.lanyuanxiaoyao.service.configuration.entity.monitor.MetricsProgress;
import com.lanyuanxiaoyao.service.monitor.metric.Metrics;
import java.util.Map;
import org.eclipse.collections.api.factory.Lists;
import org.eclipse.collections.api.list.ImmutableList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.ApplicationContext;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
/**
* 操作进度
*
* @author lanyuanxiaoyao
* @date 2024-10-14
*/
@RestController
@RequestMapping("metrics_control")
public class MetricsController {
private static final Logger logger = LoggerFactory.getLogger(MetricsController.class);
private final ApplicationContext context;
public MetricsController(ApplicationContext context) {
this.context = context;
}
@GetMapping("progress")
public ImmutableList<MetricsProgress> progress() {
Map<String, Metrics> metricsMap = context.getBeansOfType(Metrics.class);
return Lists.immutable.ofAll(metricsMap.entrySet())
.toImmutableSortedList(Map.Entry.comparingByKey())
.collect(Map.Entry::getValue)
.collect(metrics -> new MetricsProgress(metrics.name(), metrics.running(), metrics.progress()));
}
}

View File

@@ -0,0 +1,111 @@
package com.lanyuanxiaoyao.service.monitor.metric;
import cn.hutool.core.util.StrUtil;
import com.lanyuanxiaoyao.service.common.Constants;
import com.lanyuanxiaoyao.service.common.entity.TableMeta;
import com.lanyuanxiaoyao.service.configuration.ExecutorProvider;
import com.lanyuanxiaoyao.service.forest.service.HudiService;
import com.lanyuanxiaoyao.service.forest.service.InfoService;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.core.instrument.Tag;
import java.util.concurrent.atomic.AtomicLong;
import org.eclipse.collections.api.factory.Lists;
import org.eclipse.collections.api.factory.Maps;
import org.eclipse.collections.api.list.ImmutableList;
import org.eclipse.collections.api.map.MutableMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Service;
import static com.lanyuanxiaoyao.service.common.Constants.MINUTE;
/**
* Hudi表相关指标
*
* @author lanyuanxiaoyao
* @date 2024-03-05
*/
@SuppressWarnings("SpringJavaInjectionPointsAutowiringInspection")
@Service
public class HudiTableFilesCountMetrics extends Metrics {
private static final Logger logger = LoggerFactory.getLogger(HudiTableFilesCountMetrics.class);
private final MeterRegistry registry;
private final InfoService infoService;
private final HudiService hudiService;
private final MutableMap<String, AtomicLong> fileCountCacheMap;
private final MutableMap<String, AtomicLong> timelineFileCountCacheMap;
public HudiTableFilesCountMetrics(MeterRegistry registry, InfoService infoService, HudiService hudiService) {
this.registry = registry;
this.infoService = infoService;
this.hudiService = hudiService;
fileCountCacheMap = Maps.mutable.empty();
timelineFileCountCacheMap = Maps.mutable.empty();
}
@Override
public String name() {
return "Hudi表文件数量监控";
}
@Scheduled(fixedDelay = 30 * MINUTE, initialDelay = MINUTE)
@Override
public void update() {
try {
start();
ImmutableList<TableMeta> metas = infoService.tableMetaList();
setTotal(metas.size());
metas
.asParallel(ExecutorProvider.EXECUTORS_2, 1)
.reject(meta -> StrUtil.isBlank(meta.getPulsarAddress()))
.forEach(meta -> {
try {
AtomicLong filecountCache = fileCountCacheMap.getIfAbsentPut(
meta.getAlias(),
registry.gauge(
Constants.METRICS_HUDI_TABLE_FILE_COUNT,
Lists.immutable.of(
Tag.of(Constants.METRICS_LABEL_FLINK_JOB_ID, meta.getJob().getId().toString()),
Tag.of(Constants.METRICS_LABEL_ALIAS, meta.getAlias()),
Tag.of(Constants.METRICS_LABEL_SCHEMA, meta.getSchema()),
Tag.of(Constants.METRICS_LABEL_TABLE, meta.getTable())
),
new AtomicLong(0)
)
);
AtomicLong timelineFileCountCache = timelineFileCountCacheMap.getIfAbsentPut(
meta.getAlias(),
registry.gauge(
Constants.METRICS_HUDI_TABLE_TIMELINE_FILE_COUNT,
Lists.immutable.of(
Tag.of(Constants.METRICS_LABEL_FLINK_JOB_ID, meta.getJob().getId().toString()),
Tag.of(Constants.METRICS_LABEL_ALIAS, meta.getAlias()),
Tag.of(Constants.METRICS_LABEL_SCHEMA, meta.getSchema()),
Tag.of(Constants.METRICS_LABEL_TABLE, meta.getTable())
),
new AtomicLong(0)
)
);
String hdfs = meta.getHudi().getTargetHdfsPath();
if (hudiService.existsHudiTable(hdfs)) {
Long count = hudiService.fileCount(hdfs);
filecountCache.set(count);
String timelineHdfs = hdfs + "/.hoodie";
timelineFileCountCache.set(hudiService.fileCount(timelineHdfs));
}
} catch (Exception exception) {
logger.warn("Get file count fail for {}", meta.getAlias(), exception);
}
finished();
});
} finally {
reset();
}
}
}

View File

@@ -1,9 +1,56 @@
package com.lanyuanxiaoyao.service.monitor.metric; package com.lanyuanxiaoyao.service.monitor.metric;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
/** /**
* @author lanyuanxiaoyao * @author lanyuanxiaoyao
* @date 2024-03-05 * @date 2024-03-05
*/ */
public abstract class Metrics { public abstract class Metrics {
abstract void update(); private final AtomicBoolean running = new AtomicBoolean(false);
private final AtomicLong finished = new AtomicLong(0);
private final AtomicLong total = new AtomicLong(0);
public abstract String name();
public abstract void update();
public boolean running() {
return running.get();
}
public double progress() {
if (total.get() == 0) {
return 0;
} else {
return finished.get() * 1.0 / total.get();
}
}
protected void start() {
running.set(true);
}
protected void stop() {
running.set(false);
}
protected void setTotal(Long total) {
this.total.set(total);
}
protected void setTotal(Integer total) {
this.total.set(total);
}
protected void finished() {
finished.incrementAndGet();
}
protected void reset() {
stop();
setTotal(0);
finished.set(0);
}
} }

View File

@@ -0,0 +1,97 @@
package com.lanyuanxiaoyao.service.monitor.metric;
import cn.hutool.core.util.StrUtil;
import com.lanyuanxiaoyao.service.common.Constants;
import com.lanyuanxiaoyao.service.common.entity.TableMeta;
import com.lanyuanxiaoyao.service.common.utils.NameHelper;
import com.lanyuanxiaoyao.service.configuration.ExecutorProvider;
import com.lanyuanxiaoyao.service.configuration.HudiServiceProperties;
import com.lanyuanxiaoyao.service.forest.service.InfoService;
import com.lanyuanxiaoyao.service.forest.service.PulsarService;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.core.instrument.Tag;
import java.util.concurrent.atomic.AtomicLong;
import org.eclipse.collections.api.factory.Lists;
import org.eclipse.collections.api.factory.Maps;
import org.eclipse.collections.api.list.ImmutableList;
import org.eclipse.collections.api.map.MutableMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Service;
import static com.lanyuanxiaoyao.service.common.Constants.MINUTE;
/**
* Pulsar
*
* @author lanyuanxiaoyao
* @date 2024-03-05
*/
@SuppressWarnings("SpringJavaInjectionPointsAutowiringInspection")
@Service
public class PulsarBacklogMetrics extends Metrics {
private static final Logger logger = LoggerFactory.getLogger(PulsarBacklogMetrics.class);
private final MeterRegistry registry;
private final InfoService infoService;
private final PulsarService pulsarService;
private final HudiServiceProperties hudiServiceProperties;
private final MutableMap<String, AtomicLong> backlogMap;
public PulsarBacklogMetrics(MeterRegistry registry, InfoService infoService, PulsarService pulsarService, HudiServiceProperties hudiServiceProperties) {
this.registry = registry;
this.infoService = infoService;
this.pulsarService = pulsarService;
this.hudiServiceProperties = hudiServiceProperties;
backlogMap = Maps.mutable.empty();
}
@Override
public String name() {
return "Pulsar backlog监控";
}
@Scheduled(fixedDelay = 30 * MINUTE, initialDelay = MINUTE)
@Override
public void update() {
try {
start();
ImmutableList<TableMeta> metas = infoService.tableMetaList();
setTotal(metas.size());
metas
.asParallel(ExecutorProvider.EXECUTORS_2, 1)
.reject(meta -> StrUtil.isBlank(meta.getPulsarAddress()))
.forEach(meta -> {
try {
AtomicLong backlogCache = backlogMap.getIfAbsentPut(
meta.getAlias(),
registry.gauge(
Constants.METRICS_PULSAR_BACKLOG,
Lists.immutable.of(
Tag.of(Constants.METRICS_LABEL_FLINK_JOB_ID, meta.getJob().getId().toString()),
Tag.of(Constants.METRICS_LABEL_ALIAS, meta.getAlias()),
Tag.of(Constants.METRICS_LABEL_SCHEMA, meta.getSchema()),
Tag.of(Constants.METRICS_LABEL_TABLE, meta.getTable())
),
new AtomicLong(0)
)
);
String name = pulsarService.name(meta.getPulsarAddress());
if (StrUtil.isNotBlank(name)) {
Long backlog = pulsarService.backlog(name, meta.getTopic(), NameHelper.pulsarSubscriptionName(meta.getJob().getId(), meta.getAlias(), hudiServiceProperties.getSignature()));
backlogCache.set(backlog);
infoService.savePulsarBacklog(meta.getJob().getId(), meta.getAlias(), backlog);
}
} catch (Exception exception) {
logger.warn("Update pulsar backlog fail for {}", meta.getAlias(), exception);
}
finished();
});
} finally {
reset();
}
}
}

View File

@@ -1,82 +0,0 @@
package com.lanyuanxiaoyao.service.monitor.metric;
import cn.hutool.core.util.StrUtil;
import com.lanyuanxiaoyao.service.common.Constants;
import com.lanyuanxiaoyao.service.common.utils.NameHelper;
import com.lanyuanxiaoyao.service.configuration.ExecutorProvider;
import com.lanyuanxiaoyao.service.configuration.HudiServiceProperties;
import com.lanyuanxiaoyao.service.forest.service.InfoService;
import com.lanyuanxiaoyao.service.forest.service.PulsarService;
import io.micrometer.core.instrument.MeterRegistry;
import io.micrometer.core.instrument.Tag;
import java.util.concurrent.atomic.AtomicLong;
import org.eclipse.collections.api.factory.Lists;
import org.eclipse.collections.api.factory.Maps;
import org.eclipse.collections.api.map.MutableMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Service;
import static com.lanyuanxiaoyao.service.common.Constants.MINUTE;
/**
* Pulsar
*
* @author lanyuanxiaoyao
* @date 2024-03-05
*/
@SuppressWarnings("SpringJavaInjectionPointsAutowiringInspection")
@Service
public class PulsarMetrics extends Metrics {
private static final Logger logger = LoggerFactory.getLogger(PulsarMetrics.class);
private final MeterRegistry registry;
private final InfoService infoService;
private final PulsarService pulsarService;
private final HudiServiceProperties hudiServiceProperties;
private final MutableMap<String, AtomicLong> backlogMap;
public PulsarMetrics(MeterRegistry registry, InfoService infoService, PulsarService pulsarService, HudiServiceProperties hudiServiceProperties) {
this.registry = registry;
this.infoService = infoService;
this.pulsarService = pulsarService;
this.hudiServiceProperties = hudiServiceProperties;
backlogMap = Maps.mutable.empty();
}
@Scheduled(fixedDelay = 30 * MINUTE, initialDelay = MINUTE)
@Override
void update() {
infoService.tableMetaList()
.asParallel(ExecutorProvider.EXECUTORS_2, 1)
.reject(meta -> StrUtil.isBlank(meta.getPulsarAddress()))
.forEach(meta -> {
try {
AtomicLong backlogCache = backlogMap.getIfAbsentPut(
meta.getAlias(),
registry.gauge(
Constants.METRICS_PULSAR_BACKLOG,
Lists.immutable.of(
Tag.of(Constants.METRICS_LABEL_FLINK_JOB_ID, meta.getJob().getId().toString()),
Tag.of(Constants.METRICS_LABEL_ALIAS, meta.getAlias()),
Tag.of(Constants.METRICS_LABEL_SCHEMA, meta.getSchema()),
Tag.of(Constants.METRICS_LABEL_TABLE, meta.getTable())
),
new AtomicLong(0)
)
);
String name = pulsarService.name(meta.getPulsarAddress());
if (StrUtil.isNotBlank(name)) {
Long backlog = pulsarService.backlog(name, meta.getTopic(), NameHelper.pulsarSubscriptionName(meta.getJob().getId(), meta.getAlias(), hudiServiceProperties.getSignature()));
backlogCache.set(backlog);
infoService.savePulsarBacklog(meta.getJob().getId(), meta.getAlias(), backlog);
}
} catch (Exception exception) {
logger.warn("Update pulsar backlog fail for " + meta.getAlias(), exception);
}
});
}
}

View File

@@ -27,6 +27,10 @@ public class ScheduleStrategyProvider {
ScheduleStrategyImpl.simple(false, "distribute_schedule", "定时分布式调度", DistributeScheduleJob.class, "0/2 * * * * ?"), ScheduleStrategyImpl.simple(false, "distribute_schedule", "定时分布式调度", DistributeScheduleJob.class, "0/2 * * * * ?"),
// 普通调度 // 普通调度
ScheduleStrategyImpl.simple("daily_schedule", "普通全表调度", DailyScheduleJob.class, "0 50 1,4,7,10,13,16,19 * * ?"), ScheduleStrategyImpl.simple("daily_schedule", "普通全表调度", DailyScheduleJob.class, "0 50 1,4,7,10,13,16,19 * * ?"),
// 普通调度20240925不调度11点、14点
// ScheduleStrategyImpl.simple("daily_schedule", "普通全表调度", DailyScheduleJob.class, "0 50 1,4,7,16,19 * * ?"),
// 普通调度20240925不调度8点、11点、14点
// ScheduleStrategyImpl.simple("daily_schedule", "普通全表调度", DailyScheduleJob.class, "0 50 1,4,16,19 * * ?"),
// 重点表调度 // 重点表调度
ScheduleStrategyImpl.simple("focus_evening_schedule", "晚间重点表调度", FocusScheduleJob.class, "0 50 20,21,22 * * ?"), ScheduleStrategyImpl.simple("focus_evening_schedule", "晚间重点表调度", FocusScheduleJob.class, "0 50 20,21,22 * * ?"),
// ODS重点表调度 // ODS重点表调度

View File

@@ -70,7 +70,7 @@ public class DistributeScheduleJob extends BaseScheduleJob {
if (cluster.isPresent() && cluster.get().available(metadata)) { if (cluster.isPresent() && cluster.get().available(metadata)) {
return cluster.get().queue(); return cluster.get().queue();
} else { } else {
logger.warn(StrUtil.format("{} cluster not found or busy")); logger.warn(StrUtil.format("{} cluster not found or busy", recommendCluster));
} }
} }
for (Cluster cluster : clusters) { for (Cluster cluster : clusters) {

View File

@@ -3,6 +3,7 @@ package com.lanyuanxiaoyao.service.scheduler.quartz.distribute.cluster;
import com.lanyuanxiaoyao.service.common.Constants; import com.lanyuanxiaoyao.service.common.Constants;
import com.lanyuanxiaoyao.service.forest.service.YarnService; import com.lanyuanxiaoyao.service.forest.service.YarnService;
import com.lanyuanxiaoyao.service.scheduler.quartz.distribute.strategy.AvailableStrategy; import com.lanyuanxiaoyao.service.scheduler.quartz.distribute.strategy.AvailableStrategy;
import com.lanyuanxiaoyao.service.scheduler.quartz.distribute.strategy.DatetimeLimit;
import com.lanyuanxiaoyao.service.scheduler.quartz.distribute.strategy.QueueSizeLimit; import com.lanyuanxiaoyao.service.scheduler.quartz.distribute.strategy.QueueSizeLimit;
import com.lanyuanxiaoyao.service.scheduler.quartz.distribute.strategy.YarnQueueUsedLimit; import com.lanyuanxiaoyao.service.scheduler.quartz.distribute.strategy.YarnQueueUsedLimit;
import org.springframework.cloud.client.discovery.DiscoveryClient; import org.springframework.cloud.client.discovery.DiscoveryClient;
@@ -23,7 +24,8 @@ public class A4Cluster extends Cluster {
Constants.COMPACTION_QUEUE_A4, Constants.COMPACTION_QUEUE_A4,
AvailableStrategy.and( AvailableStrategy.and(
new QueueSizeLimit(client, Constants.COMPACTION_QUEUE_A4, 10), new QueueSizeLimit(client, Constants.COMPACTION_QUEUE_A4, 10),
new YarnQueueUsedLimit(yarnService, Constants.CLUSTER_A4, "ten_iap.datalake", 0.8) new YarnQueueUsedLimit(yarnService, Constants.CLUSTER_A4, "ten_iap.datalake", 0.8),
new DatetimeLimit(false, "* * 7-22 * * ?")
) )
); );
} }

View File

@@ -22,8 +22,8 @@ public class B12Cluster extends Cluster {
Constants.CLUSTER_B12, Constants.CLUSTER_B12,
Constants.COMPACTION_QUEUE_B12, Constants.COMPACTION_QUEUE_B12,
AvailableStrategy.and( AvailableStrategy.and(
new QueueSizeLimit(client, Constants.COMPACTION_QUEUE_B12, 20), new QueueSizeLimit(client, Constants.COMPACTION_QUEUE_B12, 50),
new YarnQueueUsedLimit(yarnService, Constants.CLUSTER_B12, "default", 0.9) new YarnQueueUsedLimit(yarnService, Constants.CLUSTER_B12, "default", 1.0)
) )
); );
} }

View File

@@ -3,6 +3,7 @@ package com.lanyuanxiaoyao.service.scheduler.quartz.distribute.cluster;
import com.lanyuanxiaoyao.service.common.Constants; import com.lanyuanxiaoyao.service.common.Constants;
import com.lanyuanxiaoyao.service.forest.service.YarnService; import com.lanyuanxiaoyao.service.forest.service.YarnService;
import com.lanyuanxiaoyao.service.scheduler.quartz.distribute.strategy.AvailableStrategy; import com.lanyuanxiaoyao.service.scheduler.quartz.distribute.strategy.AvailableStrategy;
import com.lanyuanxiaoyao.service.scheduler.quartz.distribute.strategy.DatetimeLimit;
import com.lanyuanxiaoyao.service.scheduler.quartz.distribute.strategy.QueueSizeLimit; import com.lanyuanxiaoyao.service.scheduler.quartz.distribute.strategy.QueueSizeLimit;
import com.lanyuanxiaoyao.service.scheduler.quartz.distribute.strategy.YarnQueueUsedLimit; import com.lanyuanxiaoyao.service.scheduler.quartz.distribute.strategy.YarnQueueUsedLimit;
import org.springframework.cloud.client.discovery.DiscoveryClient; import org.springframework.cloud.client.discovery.DiscoveryClient;
@@ -23,7 +24,8 @@ public class B1Cluster extends Cluster {
Constants.COMPACTION_QUEUE_B1, Constants.COMPACTION_QUEUE_B1,
AvailableStrategy.and( AvailableStrategy.and(
new QueueSizeLimit(client, Constants.COMPACTION_QUEUE_B1, 20), new QueueSizeLimit(client, Constants.COMPACTION_QUEUE_B1, 20),
new YarnQueueUsedLimit(yarnService, Constants.CLUSTER_B1, "datalake", 1.0) new YarnQueueUsedLimit(yarnService, Constants.CLUSTER_B1, "datalake", 1.0),
new DatetimeLimit(false, "* * 7-22 * * ?")
) )
); );
} }

View File

@@ -1,28 +0,0 @@
package com.lanyuanxiaoyao.service.scheduler.quartz.distribute.cluster;
import com.lanyuanxiaoyao.service.common.Constants;
import com.lanyuanxiaoyao.service.forest.service.YarnService;
import org.springframework.cloud.client.discovery.DiscoveryClient;
import org.springframework.stereotype.Component;
/**
* B5
*
* @author lanyuanxiaoyao
* @date 2023-06-08
*/
@SuppressWarnings("SpringJavaInjectionPointsAutowiringInspection")
@Component
public class B5Cluster extends Cluster {
public B5Cluster(DiscoveryClient client, YarnService yarnService) {
super(
Constants.CLUSTER_B5,
Constants.COMPACTION_QUEUE_B5,
/* AvailableStrategy.and(
new QueueSizeLimit(client, Constants.COMPACTION_QUEUE_B5, 10),
new YarnQueueUsedLimit(yarnService, Constants.CLUSTER_B5, "ten_iap.datalake", 0.9)
) */
metadata -> false
);
}
}

View File

@@ -117,7 +117,7 @@ public class ScheduleHelper {
// 统一在这里覆盖特定请求 // 统一在这里覆盖特定请求
// CRM重点表独占A4集群 // CRM重点表独占A4集群
if (TagsHelper.existsTag(meta.getTags(), Constants.TAGS_CRM_FOCUS)) { if (TagsHelper.existsTag(meta.getTags(), Constants.TAGS_CRM_FOCUS)) {
finalMetadata.put(Constants.SCHEDULE_FORCE, Constants.CLUSTER_A4); finalMetadata.put(Constants.SCHEDULE_RECOMMEND, Constants.CLUSTER_A4);
} else { } else {
finalMetadata.put(Constants.SCHEDULE_ESCAPE, Constants.CLUSTER_A4); finalMetadata.put(Constants.SCHEDULE_ESCAPE, Constants.CLUSTER_A4);
} }

View File

@@ -6,10 +6,16 @@ import com.lanyuanxiaoyao.service.common.entity.FlinkJob;
import com.lanyuanxiaoyao.service.common.utils.NameHelper; import com.lanyuanxiaoyao.service.common.utils.NameHelper;
import com.lanyuanxiaoyao.service.configuration.ExecutorProvider; import com.lanyuanxiaoyao.service.configuration.ExecutorProvider;
import com.lanyuanxiaoyao.service.configuration.entity.info.JobIdAndAlias; import com.lanyuanxiaoyao.service.configuration.entity.info.JobIdAndAlias;
import com.lanyuanxiaoyao.service.configuration.entity.monitor.MetricsProgress;
import com.lanyuanxiaoyao.service.configuration.entity.yarn.YarnApplication; import com.lanyuanxiaoyao.service.configuration.entity.yarn.YarnApplication;
import com.lanyuanxiaoyao.service.configuration.entity.yarn.YarnRootQueue; import com.lanyuanxiaoyao.service.configuration.entity.yarn.YarnRootQueue;
import com.lanyuanxiaoyao.service.configuration.entity.zookeeper.ZookeeperNode; import com.lanyuanxiaoyao.service.configuration.entity.zookeeper.ZookeeperNode;
import com.lanyuanxiaoyao.service.forest.service.*; import com.lanyuanxiaoyao.service.forest.service.InfoService;
import com.lanyuanxiaoyao.service.forest.service.MonitorService;
import com.lanyuanxiaoyao.service.forest.service.QueueService;
import com.lanyuanxiaoyao.service.forest.service.ScheduleService;
import com.lanyuanxiaoyao.service.forest.service.YarnService;
import com.lanyuanxiaoyao.service.forest.service.ZookeeperService;
import com.lanyuanxiaoyao.service.web.controller.base.AmisCrudResponse; import com.lanyuanxiaoyao.service.web.controller.base.AmisCrudResponse;
import com.lanyuanxiaoyao.service.web.controller.base.AmisMapResponse; import com.lanyuanxiaoyao.service.web.controller.base.AmisMapResponse;
import com.lanyuanxiaoyao.service.web.controller.base.AmisResponse; import com.lanyuanxiaoyao.service.web.controller.base.AmisResponse;
@@ -48,14 +54,16 @@ public class OverviewController extends BaseController {
private final QueueService queueService; private final QueueService queueService;
private final ScheduleService scheduleService; private final ScheduleService scheduleService;
private final ZookeeperService zookeeperService; private final ZookeeperService zookeeperService;
private final MonitorService monitorService;
@SuppressWarnings("SpringJavaInjectionPointsAutowiringInspection") @SuppressWarnings("SpringJavaInjectionPointsAutowiringInspection")
public OverviewController(InfoService infoService, YarnService yarnService, QueueService queueService, ScheduleService scheduleService, ZookeeperService zookeeperService) { public OverviewController(InfoService infoService, YarnService yarnService, QueueService queueService, ScheduleService scheduleService, ZookeeperService zookeeperService, MonitorService monitorService) {
this.infoService = infoService; this.infoService = infoService;
this.yarnService = yarnService; this.yarnService = yarnService;
this.queueService = queueService; this.queueService = queueService;
this.scheduleService = scheduleService; this.scheduleService = scheduleService;
this.zookeeperService = zookeeperService; this.zookeeperService = zookeeperService;
this.monitorService = monitorService;
} }
@GetMapping("") @GetMapping("")
@@ -207,4 +215,12 @@ public class OverviewController extends BaseController {
.setData("unRunningTable", unRunningTable.size()) .setData("unRunningTable", unRunningTable.size())
.setData("unRunningTableList", unRunningTable); .setData("unRunningTableList", unRunningTable);
} }
@GetMapping("monitor_progress")
public AmisMapResponse monitorProgress() {
return AmisCrudResponse.responseCrudData(
monitorService.progress()
.collect(p -> new MetricsProgress(p.getName(), p.getRunning(), p.getProgress()))
);
}
} }

View File

@@ -1,6 +1,6 @@
const commonInfo = { const commonInfo = {
// baseUrl: 'http://132.126.207.130:35690/hudi_services/service_web', baseUrl: 'http://132.126.207.130:35690/hudi_services/service_web',
baseUrl: '/hudi_services/service_web', // baseUrl: '/hudi_services/service_web',
clusters: { clusters: {
// hudi同步运行集群和yarn队列名称 // hudi同步运行集群和yarn队列名称
sync: { sync: {
@@ -13,7 +13,6 @@ const commonInfo = {
compaction: { compaction: {
'b12': 'default', 'b12': 'default',
'b1': 'datalake', 'b1': 'datalake',
'b5': 'ten_iap.datalake',
'a4': 'ten_iap.datalake', 'a4': 'ten_iap.datalake',
}, },
compaction_names() { compaction_names() {

View File

@@ -408,26 +408,38 @@ function overviewTab() {
} }
] ]
}, },
/*{type: 'divider'}, {type: 'divider'},
{ {
type: 'service', type: 'crud',
api: '${base}/overview/schedule_times', title: '监控指标运行进度',
interval: 60000, api: `\${base}/overview/monitor_progress`,
silentPolling: true, ...crudCommonOptions(),
body: [ interval: 2000,
'调度时间点', loadDataOnce: true,
columns: [
{ {
type: 'each', name: 'name',
name: 'items', label: '名称',
className: 'grid', width: 120,
items: { },
type: 'tag', {
color: '${color}', name: 'running',
label: '${DATETOSTR(TIMESTAMP(time, \'x\'), \'HH:mm:ss\')}' label: '状态',
type: 'mapping',
width: 50,
map: {
'true': '运行中',
'false': '未运行',
} }
} },
{
label: '进度',
type: 'progress',
value: '${ROUND(progress * 100)}',
map: 'bg-primary',
},
] ]
}*/ }
] ]
} }
} }

View File

@@ -7,7 +7,4 @@ yarn:
web-urls: web-urls:
a4: http://132.121.107.91:8088 a4: http://132.121.107.91:8088
b1: http://132.122.98.13:8088 b1: http://132.122.98.13:8088
b4: http://132.122.112.30:8088
b5: http://132.122.116.12:8088
t5: http://132.121.126.84:8088
b12: http://132.126.207.125:8088 b12: http://132.126.207.125:8088

View File

@@ -134,3 +134,7 @@ GET http://{{username}}:{{password}}@b12s10.hdp.dc:31719/task/law_enforcement?
pulsar_url=pulsar://132.122.115.158:16650,132.122.115.159:16650,132.122.115.160:16650,132.122.115.161:16650,132.122.115.167:16650,132.122.115.168:16650& pulsar_url=pulsar://132.122.115.158:16650,132.122.115.159:16650,132.122.115.160:16650,132.122.115.161:16650,132.122.115.167:16650,132.122.115.168:16650&
pulsar_topic=persistent://odcp/acct_sz/acct_item_755&start_time=1716858000000&end_time=1716861600000& pulsar_topic=persistent://odcp/acct_sz/acct_item_755&start_time=1716858000000&end_time=1716861600000&
primary_keys=ACCT_ITEM_ID&partition_keys=ACCT_ID primary_keys=ACCT_ITEM_ID&partition_keys=ACCT_ID
### Count files
GET http://{{username}}:{{password}}@b12s20.hdp.dc:19521/hdfs/count?
root=hdfs://b2/apps/datalake/hive/dws_wsyyt/external_table_hudi/dws_tb_jt_servuser_data

View File

@@ -11,6 +11,7 @@ import com.lanyuanxiaoyao.service.common.utils.RecordHelper;
import com.lanyuanxiaoyao.service.common.utils.TableMetaHelper; import com.lanyuanxiaoyao.service.common.utils.TableMetaHelper;
import com.lanyuanxiaoyao.service.sync.configuration.GlobalConfiguration; import com.lanyuanxiaoyao.service.sync.configuration.GlobalConfiguration;
import com.lanyuanxiaoyao.service.sync.functions.type.TypeConverter; import com.lanyuanxiaoyao.service.sync.functions.type.TypeConverter;
import com.lanyuanxiaoyao.service.sync.utils.ExceptionUtils;
import com.lanyuanxiaoyao.service.sync.utils.JacksonUtils; import com.lanyuanxiaoyao.service.sync.utils.JacksonUtils;
import com.lanyuanxiaoyao.service.sync.utils.MetricsUtils; import com.lanyuanxiaoyao.service.sync.utils.MetricsUtils;
import com.lanyuanxiaoyao.service.sync.utils.StatusUtils; import com.lanyuanxiaoyao.service.sync.utils.StatusUtils;
@@ -94,7 +95,7 @@ public class Record2RowDataFunction extends RichMapFunction<Record, List<RowData
Map<String, Object> current = RecordHelper.getCurrentStatement(record); Map<String, Object> current = RecordHelper.getCurrentStatement(record);
if (Objects.isNull(current)) { if (Objects.isNull(current)) {
logger.error("Record: {}", mapper.writeValueAsString(record)); logger.error("Record: {}", mapper.writeValueAsString(record));
throw new RuntimeException("Current cannot be null"); return ExceptionUtils.throwAndPrint(logger, "Current cannot be null");
} }
// 如果 update 改变了过滤字段的值也需要先删除 // 如果 update 改变了过滤字段的值也需要先删除

View File

@@ -5,6 +5,7 @@ import cn.hutool.core.util.StrUtil;
import com.lanyuanxiaoyao.service.common.Constants; import com.lanyuanxiaoyao.service.common.Constants;
import com.lanyuanxiaoyao.service.common.entity.TableMeta; import com.lanyuanxiaoyao.service.common.entity.TableMeta;
import com.lanyuanxiaoyao.service.common.utils.LogHelper; import com.lanyuanxiaoyao.service.common.utils.LogHelper;
import com.lanyuanxiaoyao.service.sync.utils.ExceptionUtils;
import java.math.BigDecimal; import java.math.BigDecimal;
import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatter;
import java.util.ArrayList; import java.util.ArrayList;
@@ -115,7 +116,8 @@ public class TypeConverterV2 implements TypeConverter {
return NULLABLE_STRING_SCHEMA; return NULLABLE_STRING_SCHEMA;
} }
} catch (Throwable throwable) { } catch (Throwable throwable) {
throw new RuntimeException( return ExceptionUtils.throwAndPrint(
logger,
StrUtil.format("Convert type failure {} {} {} length: {} scala: {}", table, field, type, length, scala), StrUtil.format("Convert type failure {} {} {} length: {} scala: {}", table, field, type, length, scala),
throwable throwable
); );
@@ -162,7 +164,8 @@ public class TypeConverterV2 implements TypeConverter {
return value; return value;
} }
} catch (Throwable throwable) { } catch (Throwable throwable) {
throw new RuntimeException( return ExceptionUtils.throwAndPrint(
logger,
StrUtil.format("Convert value failure {} {} {}", schema.toString(), name, value), StrUtil.format("Convert value failure {} {} {}", schema.toString(), name, value),
throwable throwable
); );

View File

@@ -0,0 +1,26 @@
package com.lanyuanxiaoyao.service.sync.utils;
import org.slf4j.Logger;
/**
* 处理异常抛出和打印
*
* @author lanyuanxiaoyao
* @date 2024-10-22
*/
public class ExceptionUtils {
public static <T> T throwAndPrint(Logger logger, String content) {
logger.error(content);
throw new RuntimeException(content);
}
public static <T> T throwAndPrint(Logger logger, Throwable throwable) {
logger.error(throwable.getMessage(), throwable);
throw new RuntimeException(throwable);
}
public static <T> T throwAndPrint(Logger logger, String content, Throwable throwable) {
logger.error(content, throwable);
throw new RuntimeException(content, throwable);
}
}

View File

@@ -75,11 +75,9 @@ public class ZkUtils {
.withMode(CreateMode.EPHEMERAL) .withMode(CreateMode.EPHEMERAL)
.forPath(lockPath, runMeta.getBytes()); .forPath(lockPath, runMeta.getBytes());
} catch (KeeperException.NodeExistsException e) { } catch (KeeperException.NodeExistsException e) {
logger.error("Lock exists for " + lockPath, e); ExceptionUtils.throwAndPrint(logger, "Lock exists for " + lockPath, e);
throw new RuntimeException(e);
} catch (Exception e) { } catch (Exception e) {
logger.error("Unknown error", e); ExceptionUtils.throwAndPrint(logger, "Unknown error", e);
throw new RuntimeException(e);
} }
} }
@@ -91,8 +89,7 @@ public class ZkUtils {
} }
} }
} catch (Exception e) { } catch (Exception e) {
logger.error("Unknown error", e); ExceptionUtils.throwAndPrint(logger, "Unknown error", e);
throw new RuntimeException(e);
} }
} }
} }