Compare commits
33 Commits
1e88c62987
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eea6307c87 | ||
|
|
c5366e006b | ||
|
|
f0f295bfc9 | ||
|
|
e34c5d2e3e | ||
|
|
86d6fcaec7 | ||
|
|
2c09d97cec | ||
|
|
37ac0cd311 | ||
|
|
9fa38a3065 | ||
|
|
d908f99fbd | ||
|
|
5b0b23336c | ||
|
|
263b91c42a | ||
|
|
a53c90a348 | ||
|
|
e3583dad0c | ||
|
|
514a65a5e6 | ||
|
|
57a57ace77 | ||
|
|
1338e6458c | ||
|
|
9fd46b3a20 | ||
|
|
8e8b1a7684 | ||
|
|
4d8238dd7f | ||
|
|
73f7d3085a | ||
|
|
3f8652395c | ||
|
|
8c6e0aa353 | ||
|
|
e15dd6289d | ||
|
|
7d33227d70 | ||
|
|
8fda8f7669 | ||
|
|
57a828c5b4 | ||
|
|
b9f6aa0cc2 | ||
|
|
dcb9028d86 | ||
|
|
57a2787bf8 | ||
|
|
813ddfaeac | ||
|
|
0cbf6b28ef | ||
|
|
7efb2527d0 | ||
|
|
f085c9d506 |
@@ -5,7 +5,7 @@ mvn install -N -D skipTests
|
||||
deploy service-common service-dependencies service-configuration service-forest service-cli service-cli/service-cli-core service-executor service-executor/service-executor-core utils/executor
|
||||
package service-api service-check service-cli/service-cli-runner service-cloud-query service-executor/service-executor-manager service-executor/service-executor-task service-command service-command-pro service-exporter service-flink-query service-gateway service-hudi-query service-info-query service-monitor service-loki-query service-pulsar-query service-queue service-scheduler service-uploader service-web service-yarn-query service-zookeeper-query utils/patch utils/sync
|
||||
|
||||
configs=(b2a4 b2b1 b2b5 b2b12)
|
||||
configs=(b2a4 b2b1 b2b12)
|
||||
for config in ${configs[*]};
|
||||
do
|
||||
mvn -pl service-launcher clean package -D skipTests -P $config
|
||||
@@ -34,5 +34,5 @@ upload $root_path/service-yarn-query/target/service-yarn-query-1.0.0-SNAPSHOT.ja
|
||||
upload $root_path/service-zookeeper-query/target/service-zookeeper-query-1.0.0-SNAPSHOT.jar
|
||||
upload $root_path/utils/sync/target/sync-1.0.0-SNAPSHOT.jar
|
||||
|
||||
upload_ytp $root_path/service-cli/service-cli-runner/target/service-cli-runner-1.0.0-SNAPSHOT.jar
|
||||
upload_ytp $root_path/service-uploader/target/service-uploader-1.0.0-SNAPSHOT.jar
|
||||
upload $root_path/service-cli/service-cli-runner/target/service-cli-runner-1.0.0-SNAPSHOT.jar
|
||||
upload $root_path/service-uploader/target/service-uploader-1.0.0-SNAPSHOT.jar
|
||||
|
||||
@@ -3,4 +3,4 @@ root_path=$(dirname $(cd $(dirname $0);pwd))
|
||||
source $root_path/bin/library.sh
|
||||
deploy service-cli service-cli/service-cli-core
|
||||
package service-cli/service-cli-runner
|
||||
ytp-transfer2 $root_path/service-cli/service-cli-runner/target/service-cli-runner-1.0.0-SNAPSHOT.jar
|
||||
upload $root_path/service-cli/service-cli-runner/target/service-cli-runner-1.0.0-SNAPSHOT.jar
|
||||
@@ -3,7 +3,7 @@ root_path=$(dirname $(cd $(dirname $0);pwd))
|
||||
source $root_path/bin/library.sh
|
||||
deploy service-common service-dependencies service-configuration service-forest
|
||||
|
||||
configs=(b2a4 b2b1 b2b5 b2b12)
|
||||
configs=(b2a4 b2b1 b2b12)
|
||||
for config in ${configs[*]};
|
||||
do
|
||||
mvn -pl service-launcher clean package -D skipTests -P $config
|
||||
|
||||
@@ -3,5 +3,5 @@
|
||||
root_path=/apps/zone_scfp/hudi/cloud
|
||||
jdk_path=/opt/jdk8u252-b09/bin/java
|
||||
|
||||
curl ftp://yyy:QeY\!68\)4nH1@132.121.122.15:2222/service-check-1.0.0-SNAPSHOT.jar -o ${root_path}/service-check.jar
|
||||
curl http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@132.126.207.124:36800/file/download/service-check-1.0.0-SNAPSHOT.jar -o ${root_path}/service-check.jar
|
||||
${jdk_path} -jar ${root_path}/service-check.jar
|
||||
|
||||
@@ -5,7 +5,7 @@ jdk_path=/opt/jdk1.8.0_162/bin/java
|
||||
|
||||
arguments=$@
|
||||
# 手动上传jar包则注释掉这行,更显神通吧反正是
|
||||
curl ftp://yyy:QeY\!68\)4nH1@132.121.122.15:2222/service-cli-runner-1.0.0-SNAPSHOT.jar -o ${jars_path}/service-cli-runner.jar
|
||||
curl http://AxhEbscwsJDbYMH2:cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4@132.126.207.124:36800/file/download/service-cli-runner-1.0.0-SNAPSHOT.jar -o ${jars_path}/service-cli-runner.jar
|
||||
${jdk_path} -jar ${jars_path}/service-cli-runner.jar \
|
||||
--spring.profiles.active=b12 \
|
||||
--deploy.generate.command=true \
|
||||
|
||||
@@ -2,14 +2,6 @@
|
||||
|
||||
build_profile=b2b12
|
||||
|
||||
iap_username=iap
|
||||
iap_password=IAPAb123456!
|
||||
iap_url=$iap_username@132.122.1.162
|
||||
|
||||
ytp_username=yyy
|
||||
ytp_password='QeY\!68\)4nH1'
|
||||
ytp_url=ftp://$ytp_username:$ytp_password@132.121.122.15:2222
|
||||
|
||||
upload_username=AxhEbscwsJDbYMH2
|
||||
upload_password=cYxg3b4PtWoVD5SjFayWxtnSVsjzRsg4
|
||||
upload_url=http://$upload_username:$upload_password@132.126.207.124:36800
|
||||
@@ -28,23 +20,6 @@ function upload() {
|
||||
rm $source_file_path
|
||||
}
|
||||
|
||||
function upload_ytp() {
|
||||
source_file_path=$(realpath $1)
|
||||
file_name=$(basename $source_file_path)
|
||||
echo "↪ Source md5: $(md5sum $source_file_path | awk '{print $1}')"
|
||||
echo "↪ Uploading $source_file_path ↪ /tmp/$file_name"
|
||||
sshpass -p $iap_password scp $source_file_path $iap_url:/tmp
|
||||
echo "↪ Upload 162 success"
|
||||
target_md5=$(sshpass -p $iap_password ssh -o 'StrictHostKeyChecking no' $iap_url "md5sum /tmp/$file_name | awk '{print \$1}'")
|
||||
echo "↪ Target md5: $target_md5"
|
||||
echo "↪ Command: sshpass -p $iap_password ssh -o 'StrictHostKeyChecking no' $iap_url \"curl --retry 5 $ytp_url -T /tmp/$file_name\""
|
||||
sshpass -p $iap_password ssh -o 'StrictHostKeyChecking no' $iap_url "curl --retry 5 $ytp_url -T /tmp/$file_name"
|
||||
echo "↪ Upload ytp success"
|
||||
echo "↪ Download: curl $ytp_url/$file_name -o $file_name"
|
||||
echo "↪ Delete source"
|
||||
rm $source_file_path
|
||||
}
|
||||
|
||||
function joining {
|
||||
local d=${1-} f=${2-}
|
||||
if shift 2; then
|
||||
|
||||
@@ -1,265 +0,0 @@
|
||||
<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
|
||||
|
||||
<property>
|
||||
<name>fs.azure.user.agent.prefix</name>
|
||||
<value>User-Agent: APN/1.0 Hortonworks/1.0 HDP/</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.defaultFS</name>
|
||||
<value>hdfs://b2</value>
|
||||
<final>true</final>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.s3a.fast.upload</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.s3a.fast.upload.buffer</name>
|
||||
<value>disk</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.s3a.multipart.size</name>
|
||||
<value>67108864</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.trash.interval</name>
|
||||
<value>4320</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.trash.checkpoint.interval</name>
|
||||
<value>360</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ha.failover-controller.active-standby-elector.zk.op.retries</name>
|
||||
<value>120</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ha.zookeeper.acl</name>
|
||||
<value>sasl:nn:rwcda</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ha.zookeeper.quorum</name>
|
||||
<value>b5m1.hdp.dc:2181,b5m2.hdp.dc:2181,b5m3.hdp.dc:2181</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.http.authentication.kerberos.keytab</name>
|
||||
<value>/etc/security/keytabs/spnego.service.keytab</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.http.authentication.kerberos.principal</name>
|
||||
<value>HTTP/_HOST@ECLD.COM</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.http.authentication.signature.secret.file</name>
|
||||
<value>/etc/security/http_secret</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.http.authentication.simple.anonymous.allowed</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.http.authentication.type</name>
|
||||
<value>simple</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.http.cross-origin.allowed-headers</name>
|
||||
<value>X-Requested-With,Content-Type,Accept,Origin,WWW-Authenticate,Accept-Encoding,Transfer-Encoding</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.http.cross-origin.allowed-methods</name>
|
||||
<value>GET,PUT,POST,OPTIONS,HEAD,DELETE</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.http.cross-origin.allowed-origins</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.http.cross-origin.max-age</name>
|
||||
<value>1800</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.http.filter.initializers</name>
|
||||
<value>org.apache.hadoop.security.AuthenticationFilterInitializer,org.apache.hadoop.security.HttpCrossOriginFilterInitializer</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.proxyuser.hdfs.groups</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.proxyuser.hdfs.hosts</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.proxyuser.hive.groups</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.proxyuser.hive.hosts</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.proxyuser.HTTP.groups</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.proxyuser.HTTP.hosts</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.proxyuser.iap.groups</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.proxyuser.iap.hosts</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.proxyuser.livy.groups</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.proxyuser.livy.hosts</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.proxyuser.yarn.groups</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.proxyuser.yarn.hosts</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.rpc.protection</name>
|
||||
<value>authentication,privacy</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.auth_to_local</name>
|
||||
<value>RULE:[1:$1@$0](hbase-b5@ECLD.COM)s/.*/hbase/
|
||||
RULE:[1:$1@$0](hdfs-b5@ECLD.COM)s/.*/hdfs/
|
||||
RULE:[1:$1@$0](spark-b5@ECLD.COM)s/.*/spark/
|
||||
RULE:[1:$1@$0](yarn-ats-b5@ECLD.COM)s/.*/yarn-ats/
|
||||
RULE:[1:$1@$0](.*@ECLD.COM)s/@.*//
|
||||
RULE:[2:$1@$0](dn@ECLD.COM)s/.*/hdfs/
|
||||
RULE:[2:$1@$0](hbase@ECLD.COM)s/.*/hbase/
|
||||
RULE:[2:$1@$0](hive@ECLD.COM)s/.*/hive/
|
||||
RULE:[2:$1@$0](jhs@ECLD.COM)s/.*/mapred/
|
||||
RULE:[2:$1@$0](jn@ECLD.COM)s/.*/hdfs/
|
||||
RULE:[2:$1@$0](livy@ECLD.COM)s/.*/livy/
|
||||
RULE:[2:$1@$0](nm@ECLD.COM)s/.*/yarn/
|
||||
RULE:[2:$1@$0](nn@ECLD.COM)s/.*/hdfs/
|
||||
RULE:[2:$1@$0](rangeradmin@ECLD.COM)s/.*/ranger/
|
||||
RULE:[2:$1@$0](rangerlookup@ECLD.COM)s/.*/ranger/
|
||||
RULE:[2:$1@$0](rangertagsync@ECLD.COM)s/.*/rangertagsync/
|
||||
RULE:[2:$1@$0](rangerusersync@ECLD.COM)s/.*/rangerusersync/
|
||||
RULE:[2:$1@$0](rm@ECLD.COM)s/.*/yarn/
|
||||
RULE:[2:$1@$0](spark@ECLD.COM)s/.*/spark/
|
||||
RULE:[2:$1@$0](yarn@ECLD.COM)s/.*/yarn/
|
||||
RULE:[2:$1@$0](yarn-ats-hbase@ECLD.COM)s/.*/yarn-ats/
|
||||
DEFAULT</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.authentication</name>
|
||||
<value>kerberos</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.authorization</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.instrumentation.requires.admin</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.compression.codec.lzo.class</name>
|
||||
<value>com.hadoop.compression.lzo.LzoCodec</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.compression.codecs</name>
|
||||
<value>org.apache.hadoop.io.compress.GzipCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.file.buffer.size</name>
|
||||
<value>131072</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.serializations</name>
|
||||
<value>org.apache.hadoop.io.serializer.WritableSerialization</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.client.connect.max.retries</name>
|
||||
<value>50</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.client.connection.maxidletime</name>
|
||||
<value>30000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.client.idlethreshold</name>
|
||||
<value>8000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.server.tcpnodelay</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.jobtracker.webinterface.trusted</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.client.fallback-to-simple-auth-allowed</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.hdfs.impl.disable.cache</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
</configuration>
|
||||
@@ -1,713 +0,0 @@
|
||||
<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
|
||||
|
||||
<property>
|
||||
<name>dfs.block.access.token.enable</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.blockreport.initialDelay</name>
|
||||
<value>120</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.blocksize</name>
|
||||
<value>134217728</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.client.failover.proxy.provider.b5</name>
|
||||
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.client.read.shortcircuit</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.client.read.shortcircuit.streams.cache.size</name>
|
||||
<value>4096</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.client.retry.policy.enabled</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.cluster.administrators</name>
|
||||
<value> hdfs</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.content-summary.limit</name>
|
||||
<value>5000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.data.transfer.protection</name>
|
||||
<value>authentication,privacy</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.address</name>
|
||||
<value>0.0.0.0:1019</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.balance.bandwidthPerSec</name>
|
||||
<value>6250000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.data.dir</name>
|
||||
<value>[DISK]file:///data1/hadoop/hdfs/data,[DISK]file:///data2/hadoop/hdfs/data,[DISK]file:///data3/hadoop/hdfs/data,[DISK]file:///data4/hadoop/hdfs/data,[DISK]file:///data5/hadoop/hdfs/data,[DISK]file:///data6/hadoop/hdfs/data,[DISK]file:///data7/hadoop/hdfs/data,[DISK]file:///data8/hadoop/hdfs/data,[DISK]file:///data9/hadoop/hdfs/data,[DISK]file:///data10/hadoop/hdfs/data,[DISK]file:///data11/hadoop/hdfs/data,[DISK]file:///data12/hadoop/hdfs/data,[DISK]file:///data13/hadoop/hdfs/data,[DISK]file:///data14/hadoop/hdfs/data,[DISK]file:///data15/hadoop/hdfs/data,[DISK]file:///data16/hadoop/hdfs/data,[DISK]file:///data17/hadoop/hdfs/data,[DISK]file:///data18/hadoop/hdfs/data,[DISK]file:///data19/hadoop/hdfs/data,[DISK]file:///data20/hadoop/hdfs/data,[DISK]file:///data21/hadoop/hdfs/data,[DISK]file:///data22/hadoop/hdfs/data,[DISK]file:///data23/hadoop/hdfs/data,[DISK]file:///data24/hadoop/hdfs/data</value>
|
||||
<final>true</final>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.data.dir.perm</name>
|
||||
<value>750</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.du.reserved</name>
|
||||
<value>26405499904</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.failed.volumes.tolerated</name>
|
||||
<value>2</value>
|
||||
<final>true</final>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.http.address</name>
|
||||
<value>0.0.0.0:1022</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.https.address</name>
|
||||
<value>0.0.0.0:50475</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.ipc.address</name>
|
||||
<value>0.0.0.0:8010</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.kerberos.principal</name>
|
||||
<value>dn/_HOST@ECLD.COM</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.keytab.file</name>
|
||||
<value>/etc/security/keytabs/dn.service.keytab</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.max.transfer.threads</name>
|
||||
<value>16384</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.domain.socket.path</name>
|
||||
<value>/var/lib/hadoop-hdfs/dn_socket</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.encrypt.data.transfer.cipher.suites</name>
|
||||
<value>AES/CTR/NoPadding</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.ha.automatic-failover.enabled</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.ha.fencing.methods</name>
|
||||
<value>shell(/bin/true)</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.ha.namenodes.b5</name>
|
||||
<value>nn1,nn2</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.heartbeat.interval</name>
|
||||
<value>3</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.hosts.exclude</name>
|
||||
<value>/etc/hadoop/conf/dfs.exclude</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.http.policy</name>
|
||||
<value>HTTP_ONLY</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.https.port</name>
|
||||
<value>50470</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.internal.nameservices</name>
|
||||
<value>b5</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.journalnode.edits.dir.b5</name>
|
||||
<value>/data2/hadoop/hdfs/journal</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.journalnode.http-address</name>
|
||||
<value>0.0.0.0:8480</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.journalnode.https-address</name>
|
||||
<value>0.0.0.0:8481</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.journalnode.kerberos.internal.spnego.principal</name>
|
||||
<value>HTTP/_HOST@ECLD.COM</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.journalnode.kerberos.principal</name>
|
||||
<value>jn/_HOST@ECLD.COM</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.journalnode.keytab.file</name>
|
||||
<value>/etc/security/keytabs/jn.service.keytab</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.accesstime.precision</name>
|
||||
<value>0</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.acls.enabled</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.audit.log.async</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.avoid.read.stale.datanode</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.avoid.write.stale.datanode</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.checkpoint.dir</name>
|
||||
<value>/data/hadoop/hdfs/namesecondary</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.checkpoint.edits.dir</name>
|
||||
<value>${dfs.namenode.checkpoint.dir}</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.checkpoint.period</name>
|
||||
<value>21600</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.checkpoint.txns</name>
|
||||
<value>1000000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.fslock.fair</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.handler.count</name>
|
||||
<value>100</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.b5.nn1</name>
|
||||
<value>b5m2.hdp.dc:50070</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.b5.nn2</name>
|
||||
<value>b5m3.hdp.dc:50070</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.https-address.b5.nn1</name>
|
||||
<value>b5m2.hdp.dc:50470</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.https-address.b5.nn2</name>
|
||||
<value>b5m3.hdp.dc:50470</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.kerberos.internal.spnego.principal</name>
|
||||
<value>HTTP/_HOST@ECLD.COM</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.kerberos.principal</name>
|
||||
<value>nn/_HOST@ECLD.COM</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.keytab.file</name>
|
||||
<value>/etc/security/keytabs/nn.service.keytab</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.max.extra.edits.segments.retained</name>
|
||||
<value>180</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.name.dir</name>
|
||||
<value>/data1/hadoop/hdfs/namenode,/data2/hadoop/hdfs/namenode</value>
|
||||
<final>true</final>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.name.dir.restore</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.num.extra.edits.retained</name>
|
||||
<value>18000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.b5.nn1</name>
|
||||
<value>b5m2.hdp.dc:8020</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.b5.nn2</name>
|
||||
<value>b5m3.hdp.dc:8020</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.safemode.threshold-pct</name>
|
||||
<value>0.99</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.shared.edits.dir.b5</name>
|
||||
<value>qjournal://b5m1.hdp.dc:8485;b5m2.hdp.dc:8485;b5m3.hdp.dc:8485/b5</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.stale.datanode.interval</name>
|
||||
<value>30000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.startup.delay.block.deletion.sec</name>
|
||||
<value>3600</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.write.stale.datanode.ratio</name>
|
||||
<value>1.0f</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.nameservices</name>
|
||||
<value>b5,b1,b2,b3,b4,a3,a4,f1,e1,d2</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.permissions.ContentSummary.subAccess</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.permissions.enabled</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.permissions.superusergroup</name>
|
||||
<value>hdfs</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.replication</name>
|
||||
<value>3</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.replication.max</name>
|
||||
<value>50</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.web.authentication.kerberos.keytab</name>
|
||||
<value>/etc/security/keytabs/spnego.service.keytab</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.web.authentication.kerberos.principal</name>
|
||||
<value>HTTP/_HOST@ECLD.COM</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.webhdfs.enabled</name>
|
||||
<value>true</value>
|
||||
<final>true</final>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.permissions.umask-mode</name>
|
||||
<value>022</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.caller.context.enabled</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>manage.include.files</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>nfs.exports.allowed.hosts</name>
|
||||
<value>* rw</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>nfs.file.dump.dir</name>
|
||||
<value>/tmp/.hdfs-nfs</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.client.datanode-restart.timeout</name>
|
||||
<value>30</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.client.failover.proxy.provider.a4</name>
|
||||
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.ha.namenodes.a4</name>
|
||||
<value>nn1,nn2</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.a4.nn1</name>
|
||||
<value>a4m1.hdp.dc:50070</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.a4.nn2</name>
|
||||
<value>a4m2.hdp.dc:50070</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.https-address.a4.nn1</name>
|
||||
<value>a4m1.hdp.dc:50470</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.https-address.a4.nn2</name>
|
||||
<value>a4m2.hdp.dc:50470</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.a4.nn1</name>
|
||||
<value>a4m1.hdp.dc:8020</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.a4.nn2</name>
|
||||
<value>a4m2.hdp.dc:8020</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.client.failover.proxy.provider.a3</name>
|
||||
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.ha.namenodes.a3</name>
|
||||
<value>nn1,nn2</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.a3.nn1</name>
|
||||
<value>a3m2.hdp.dc:50070</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.a3.nn2</name>
|
||||
<value>a3m3.hdp.dc:50070</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.https-address.a3.nn1</name>
|
||||
<value>a3m2.hdp.dc:50470</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.https-address.a3.nn2</name>
|
||||
<value>a3m3.hdp.dc:50470</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.a3.nn1</name>
|
||||
<value>a3m2.hdp.dc:8020</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.a3.nn2</name>
|
||||
<value>a3m3.hdp.dc:8020</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.client.failover.proxy.provider.b3</name>
|
||||
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.ha.namenodes.b3</name>
|
||||
<value>nn1,nn2</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.b3.nn1</name>
|
||||
<value>b3m2.hdp.dc:50070</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.b3.nn2</name>
|
||||
<value>b3m3.hdp.dc:50070</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.https-address.b3.nn1</name>
|
||||
<value>b3m2.hdp.dc:50470</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.https-address.b3.nn2</name>
|
||||
<value>b3m3.hdp.dc:50470</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.b3.nn1</name>
|
||||
<value>b3m2.hdp.dc:8020</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.b3.nn2</name>
|
||||
<value>b3m3.hdp.dc:8020</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.client.failover.proxy.provider.b1</name>
|
||||
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.client.failover.proxy.provider.b2</name>
|
||||
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.ha.namenodes.b1</name>
|
||||
<value>nn1,nn2</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.ha.namenodes.b2</name>
|
||||
<value>nn3,nn4</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.b1.nn1</name>
|
||||
<value>b1m2.hdp.dc:50070</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.b1.nn2</name>
|
||||
<value>b1m3.hdp.dc:50070</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.https-address.b1.nn1</name>
|
||||
<value>b1m2.hdp.dc:50470</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.https-address.b1.nn2</name>
|
||||
<value>b1m3.hdp.dc:50470</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.b1.nn1</name>
|
||||
<value>b1m2.hdp.dc:8020</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.b1.nn2</name>
|
||||
<value>b1m3.hdp.dc:8020</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.b2.nn3</name>
|
||||
<value>b1m5.hdp.dc:50070</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.b2.nn4</name>
|
||||
<value>b1m6.hdp.dc:50070</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.https-address.b2.nn3</name>
|
||||
<value>b1m5.hdp.dc:50470</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.https-address.b2.nn4</name>
|
||||
<value>b1m6.hdp.dc:50470</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.b2.nn3</name>
|
||||
<value>b1m5.hdp.dc:8020</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.b2.nn4</name>
|
||||
<value>b1m6.hdp.dc:8020</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.client.failover.proxy.provider.f1</name>
|
||||
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.ha.namenodes.f1</name>
|
||||
<value>nn1,nn2</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.f1.nn1</name>
|
||||
<value>f1m2.hdp.dc:50070</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.f1.nn2</name>
|
||||
<value>f1m3.hdp.dc:50070</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.https-address.f1.nn1</name>
|
||||
<value>f1m2.hdp.dc:50470</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.https-address.f1.nn2</name>
|
||||
<value>f1m3.hdp.dc:50470</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.f1.nn1</name>
|
||||
<value>f1m2.hdp.dc:8020</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.f1.nn2</name>
|
||||
<value>f1m3.hdp.dc:8020</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.client.failover.proxy.provider.d2</name>
|
||||
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.ha.namenodes.d2</name>
|
||||
<value>nn1,nn2</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.d2.nn1</name>
|
||||
<value>d2m2.hdp.dc:50070</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.d2.nn2</name>
|
||||
<value>d2m3.hdp.dc:50070</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.https-address.d2.nn1</name>
|
||||
<value>d2m2.hdp.dc:50470</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.https-address.d2.nn2</name>
|
||||
<value>d2m3.hdp.dc:50470</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.d2.nn1</name>
|
||||
<value>d2m2.hdp.dc:8020</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.d2.nn2</name>
|
||||
<value>d2m3.hdp.dc:8020</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.client.failover.proxy.provider.e1</name>
|
||||
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.ha.namenodes.e1</name>
|
||||
<value>nn1,nn2</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.e1.nn1</name>
|
||||
<value>e1m2.hdp.dc:50070</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.e1.nn2</name>
|
||||
<value>e1m3.hdp.dc:50070</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.https-address.e1.nn1</name>
|
||||
<value>e1m2.hdp.dc:50470</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.https-address.e1.nn2</name>
|
||||
<value>e1m3.hdp.dc:50470</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.e1.nn1</name>
|
||||
<value>e1m2.hdp.dc:8020</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.e1.nn2</name>
|
||||
<value>e1m3.hdp.dc:8020</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.client.failover.proxy.provider.b4</name>
|
||||
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.ha.namenodes.b4</name>
|
||||
<value>nn1,nn2</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.b4.nn1</name>
|
||||
<value>b4m2.hdp.dc:50070</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.b4.nn2</name>
|
||||
<value>b4m3.hdp.dc:50070</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.https-address.b4.nn1</name>
|
||||
<value>b4m2.hdp.dc:50470</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.https-address.b4.nn2</name>
|
||||
<value>b4m3.hdp.dc:50470</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.b4.nn1</name>
|
||||
<value>b4m2.hdp.dc:8020</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.b4.nn2</name>
|
||||
<value>b4m3.hdp.dc:8020</value>
|
||||
</property>
|
||||
|
||||
</configuration>
|
||||
File diff suppressed because it is too large
Load Diff
8
pom.xml
8
pom.xml
@@ -8,7 +8,7 @@
|
||||
<artifactId>hudi-service</artifactId>
|
||||
<version>1.0.0-SNAPSHOT</version>
|
||||
<packaging>pom</packaging>
|
||||
<description>Hudi服务应用集合</description>
|
||||
<description>Hudi服务模块系列应用</description>
|
||||
<modules>
|
||||
<module>service-common</module>
|
||||
<module>service-dependencies</module>
|
||||
@@ -74,12 +74,6 @@
|
||||
<build-tag>b2b1</build-tag>
|
||||
</properties>
|
||||
</profile>
|
||||
<profile>
|
||||
<id>b2b5</id>
|
||||
<properties>
|
||||
<build-tag>b2b5</build-tag>
|
||||
</properties>
|
||||
</profile>
|
||||
<profile>
|
||||
<id>b2b12</id>
|
||||
<properties>
|
||||
|
||||
@@ -8,6 +8,7 @@ package com.lanyuanxiaoyao.service.cli.core;
|
||||
*/
|
||||
public class HostInfo {
|
||||
private String ip;
|
||||
private Boolean enabled = true;
|
||||
private Boolean useAuthority = false;
|
||||
private String username;
|
||||
private String password;
|
||||
@@ -20,6 +21,14 @@ public class HostInfo {
|
||||
this.ip = ip;
|
||||
}
|
||||
|
||||
public Boolean getEnabled() {
|
||||
return enabled;
|
||||
}
|
||||
|
||||
public void setEnabled(Boolean enabled) {
|
||||
this.enabled = enabled;
|
||||
}
|
||||
|
||||
public Boolean getUseAuthority() {
|
||||
return useAuthority;
|
||||
}
|
||||
@@ -47,7 +56,8 @@ public class HostInfo {
|
||||
@Override
|
||||
public String toString() {
|
||||
return "HostInfo{" +
|
||||
"ip='" + ip + '\'' +
|
||||
"enabled=" + enabled +
|
||||
", ip='" + ip + '\'' +
|
||||
", useAuthority=" + useAuthority +
|
||||
", username='" + username + '\'' +
|
||||
", password='" + password + '\'' +
|
||||
|
||||
@@ -26,6 +26,10 @@ public class HostInfoWrapper {
|
||||
return hostInfo.getIp();
|
||||
}
|
||||
|
||||
public Boolean getEnabled() {
|
||||
return hostInfo.getEnabled();
|
||||
}
|
||||
|
||||
public Boolean getUseAuthority() {
|
||||
return hostInfo.getUseAuthority();
|
||||
}
|
||||
|
||||
@@ -82,6 +82,7 @@ public class RunnerApplication implements ApplicationRunner {
|
||||
return serviceInfo.getReplicas() == 0
|
||||
? hostInfoList
|
||||
.stream()
|
||||
.filter(HostInfoWrapper::getEnabled)
|
||||
.map(HostInfoWrapper::getIp)
|
||||
.sorted(Comparator.naturalOrder())
|
||||
.collect(Collectors.toList())
|
||||
@@ -89,6 +90,7 @@ public class RunnerApplication implements ApplicationRunner {
|
||||
RandomUtil.randomEleList(
|
||||
hostInfoList
|
||||
.stream()
|
||||
.filter(HostInfoWrapper::getEnabled)
|
||||
.map(HostInfoWrapper::getIp)
|
||||
.collect(Collectors.toList()
|
||||
), serviceInfo.getReplicas()
|
||||
@@ -148,6 +150,15 @@ public class RunnerApplication implements ApplicationRunner {
|
||||
selectedHosts = selectHosts(serviceInfo);
|
||||
deployPlans.put(serviceInfo.getName(), selectedHosts);
|
||||
}
|
||||
// 排除不可用的主机
|
||||
List<String> validIps = hostInfoList.stream()
|
||||
.filter(HostInfoWrapper::getEnabled)
|
||||
.map(HostInfoWrapper::getIp)
|
||||
.collect(Collectors.toList());
|
||||
selectedHosts = selectedHosts
|
||||
.stream()
|
||||
.filter(validIps::contains)
|
||||
.collect(Collectors.toList());
|
||||
} else {
|
||||
selectedHosts = selectHosts(serviceInfo);
|
||||
deployPlans.put(serviceInfo.getName(), selectedHosts);
|
||||
|
||||
@@ -59,19 +59,17 @@ deploy:
|
||||
# hudi同步运行集群
|
||||
sync-clusters: b12
|
||||
# hudi压缩运行集群
|
||||
compaction-clusters: b12,b1,b5,a4
|
||||
compaction-clusters: b12,b1,a4
|
||||
# 覆盖service的公共配置,主要需要修改的就是部署副本数
|
||||
services:
|
||||
service-api:
|
||||
replicas: 10
|
||||
service-launcher-b1:
|
||||
replicas: 8
|
||||
service-launcher-b5:
|
||||
replicas: 6
|
||||
service-launcher-a4:
|
||||
replicas: 6
|
||||
service-launcher-b12:
|
||||
replicas: 10
|
||||
replicas: 15
|
||||
service-info-query:
|
||||
replicas: 10
|
||||
service-yarn-query:
|
||||
|
||||
@@ -46,27 +46,6 @@ deploy:
|
||||
"[connector.cluster.sync-queue-name]": sync-queue-b1
|
||||
"[connector.cluster.compaction-queue-name]": compaction-queue-b1
|
||||
"[connector.zookeeper.connect-url]": ${deploy.runtime.connector-zk-url}
|
||||
service-launcher-b5:
|
||||
order: 4
|
||||
groups:
|
||||
- "service"
|
||||
- "service-hudi"
|
||||
- "service-hudi-launcher"
|
||||
source-jar: service-launcher-b2b5-1.0.0-SNAPSHOT.jar
|
||||
replicas: 6
|
||||
environments:
|
||||
"[connector.hadoop.kerberos-principal]": ${deploy.runtime.user}/$\{hostname}.hdp.dc@ECLD.COM
|
||||
"[connector.hadoop.kerberos-keytab-path]": ${deploy.runtime.kerberos-keytab-path}
|
||||
"[connector.hudi.app-hdfs-path]": ${deploy.runtime.hudi.app-hdfs-path}
|
||||
"[connector.hudi.app-test-hdfs-path]": ${deploy.runtime.hudi.app-test-hdfs-path}
|
||||
"[connector.hudi.victoria-push-url]": ${deploy.runtime.hudi.victoria-push-url}
|
||||
"[connector.hudi.loki-push-url]": ${deploy.runtime.hudi.loki-push-url}
|
||||
arguments:
|
||||
"[spring.application.name]": service-launcher-b5
|
||||
"[connector.cluster.cluster]": b5
|
||||
"[connector.cluster.sync-queue-name]": sync-queue-b5
|
||||
"[connector.cluster.compaction-queue-name]": compaction-queue-b5
|
||||
"[connector.zookeeper.connect-url]": ${deploy.runtime.connector-zk-url}
|
||||
service-launcher-a4:
|
||||
order: 4
|
||||
groups:
|
||||
|
||||
@@ -227,12 +227,10 @@ public interface Constants {
|
||||
|
||||
String COMPACTION_QUEUE_PRE = "compaction-queue-pre";
|
||||
String COMPACTION_QUEUE_B1 = "compaction-queue-b1";
|
||||
String COMPACTION_QUEUE_B5 = "compaction-queue-b5";
|
||||
String COMPACTION_QUEUE_A4 = "compaction-queue-a4";
|
||||
String COMPACTION_QUEUE_B12 = "compaction-queue-b12";
|
||||
|
||||
String CLUSTER_B1 = "b1";
|
||||
String CLUSTER_B5 = "b5";
|
||||
String CLUSTER_A4 = "a4";
|
||||
String CLUSTER_B12 = "b12";
|
||||
|
||||
|
||||
@@ -22,7 +22,6 @@ public class YarnClusters {
|
||||
MapUtil.<String, Cluster>builder()
|
||||
.put("a4", new Cluster("http://132.121.107.91:8088"))
|
||||
.put("b1", new Cluster("http://132.122.98.13:8088"))
|
||||
.put("b5", new Cluster("http://132.122.116.12:8088"))
|
||||
.put("b12", new Cluster("http://132.126.207.125:8088"))
|
||||
.build()
|
||||
);
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
package com.lanyuanxiaoyao.service.forest.service.launcher.impl;
|
||||
|
||||
import com.dtflys.forest.annotation.BaseRequest;
|
||||
import com.lanyuanxiaoyao.service.forest.service.launcher.LauncherService;
|
||||
|
||||
/**
|
||||
* @author lanyuanxiaoyao
|
||||
* @date 2023-06-06
|
||||
*/
|
||||
@BaseRequest(baseURL = "http://service-launcher-b5")
|
||||
public interface B5LauncherService extends LauncherService {
|
||||
}
|
||||
@@ -26,9 +26,9 @@ public class ScheduleStrategyProvider {
|
||||
return Lists.immutable.of(
|
||||
ScheduleStrategyImpl.simple(false, "distribute_schedule", "定时分布式调度", DistributeScheduleJob.class, "0/2 * * * * ?"),
|
||||
// 普通调度
|
||||
// ScheduleStrategyImpl.simple("daily_schedule", "普通全表调度", DailyScheduleJob.class, "0 50 1,4,7,10,13,16,19 * * ?"),
|
||||
ScheduleStrategyImpl.simple("daily_schedule", "普通全表调度", DailyScheduleJob.class, "0 50 1,4,7,10,13,16,19 * * ?"),
|
||||
// 普通调度(20240925不调度11点、14点)
|
||||
ScheduleStrategyImpl.simple("daily_schedule", "普通全表调度", DailyScheduleJob.class, "0 50 1,4,7,16,19 * * ?"),
|
||||
// ScheduleStrategyImpl.simple("daily_schedule", "普通全表调度", DailyScheduleJob.class, "0 50 1,4,7,16,19 * * ?"),
|
||||
// 普通调度(20240925不调度8点、11点、14点)
|
||||
// ScheduleStrategyImpl.simple("daily_schedule", "普通全表调度", DailyScheduleJob.class, "0 50 1,4,16,19 * * ?"),
|
||||
// 重点表调度
|
||||
|
||||
@@ -70,7 +70,7 @@ public class DistributeScheduleJob extends BaseScheduleJob {
|
||||
if (cluster.isPresent() && cluster.get().available(metadata)) {
|
||||
return cluster.get().queue();
|
||||
} else {
|
||||
logger.warn(StrUtil.format("{} cluster not found or busy"));
|
||||
logger.warn(StrUtil.format("{} cluster not found or busy", recommendCluster));
|
||||
}
|
||||
}
|
||||
for (Cluster cluster : clusters) {
|
||||
|
||||
@@ -22,8 +22,8 @@ public class B12Cluster extends Cluster {
|
||||
Constants.CLUSTER_B12,
|
||||
Constants.COMPACTION_QUEUE_B12,
|
||||
AvailableStrategy.and(
|
||||
new QueueSizeLimit(client, Constants.COMPACTION_QUEUE_B12, 20),
|
||||
new YarnQueueUsedLimit(yarnService, Constants.CLUSTER_B12, "default", 0.9)
|
||||
new QueueSizeLimit(client, Constants.COMPACTION_QUEUE_B12, 50),
|
||||
new YarnQueueUsedLimit(yarnService, Constants.CLUSTER_B12, "default", 1.0)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
package com.lanyuanxiaoyao.service.scheduler.quartz.distribute.cluster;
|
||||
|
||||
import com.lanyuanxiaoyao.service.common.Constants;
|
||||
import com.lanyuanxiaoyao.service.forest.service.YarnService;
|
||||
import org.springframework.cloud.client.discovery.DiscoveryClient;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
/**
|
||||
* B5
|
||||
*
|
||||
* @author lanyuanxiaoyao
|
||||
* @date 2023-06-08
|
||||
*/
|
||||
@SuppressWarnings("SpringJavaInjectionPointsAutowiringInspection")
|
||||
@Component
|
||||
public class B5Cluster extends Cluster {
|
||||
public B5Cluster(DiscoveryClient client, YarnService yarnService) {
|
||||
super(
|
||||
Constants.CLUSTER_B5,
|
||||
Constants.COMPACTION_QUEUE_B5,
|
||||
/* AvailableStrategy.and(
|
||||
new QueueSizeLimit(client, Constants.COMPACTION_QUEUE_B5, 10),
|
||||
new YarnQueueUsedLimit(yarnService, Constants.CLUSTER_B5, "ten_iap.datalake", 0.9)
|
||||
) */
|
||||
metadata -> false
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -117,7 +117,7 @@ public class ScheduleHelper {
|
||||
// 统一在这里覆盖特定请求
|
||||
// CRM重点表独占A4集群
|
||||
if (TagsHelper.existsTag(meta.getTags(), Constants.TAGS_CRM_FOCUS)) {
|
||||
finalMetadata.put(Constants.SCHEDULE_FORCE, Constants.CLUSTER_A4);
|
||||
finalMetadata.put(Constants.SCHEDULE_RECOMMEND, Constants.CLUSTER_A4);
|
||||
} else {
|
||||
finalMetadata.put(Constants.SCHEDULE_ESCAPE, Constants.CLUSTER_A4);
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ const commonInfo = {
|
||||
compaction: {
|
||||
'b12': 'default',
|
||||
'b1': 'datalake',
|
||||
'b5': 'ten_iap.datalake',
|
||||
'a4': 'ten_iap.datalake',
|
||||
},
|
||||
compaction_names() {
|
||||
|
||||
@@ -7,7 +7,4 @@ yarn:
|
||||
web-urls:
|
||||
a4: http://132.121.107.91:8088
|
||||
b1: http://132.122.98.13:8088
|
||||
b4: http://132.122.112.30:8088
|
||||
b5: http://132.122.116.12:8088
|
||||
t5: http://132.121.126.84:8088
|
||||
b12: http://132.126.207.125:8088
|
||||
|
||||
@@ -11,6 +11,7 @@ import com.lanyuanxiaoyao.service.common.utils.RecordHelper;
|
||||
import com.lanyuanxiaoyao.service.common.utils.TableMetaHelper;
|
||||
import com.lanyuanxiaoyao.service.sync.configuration.GlobalConfiguration;
|
||||
import com.lanyuanxiaoyao.service.sync.functions.type.TypeConverter;
|
||||
import com.lanyuanxiaoyao.service.sync.utils.ExceptionUtils;
|
||||
import com.lanyuanxiaoyao.service.sync.utils.JacksonUtils;
|
||||
import com.lanyuanxiaoyao.service.sync.utils.MetricsUtils;
|
||||
import com.lanyuanxiaoyao.service.sync.utils.StatusUtils;
|
||||
@@ -94,7 +95,7 @@ public class Record2RowDataFunction extends RichMapFunction<Record, List<RowData
|
||||
Map<String, Object> current = RecordHelper.getCurrentStatement(record);
|
||||
if (Objects.isNull(current)) {
|
||||
logger.error("Record: {}", mapper.writeValueAsString(record));
|
||||
throw new RuntimeException("Current cannot be null");
|
||||
return ExceptionUtils.throwAndPrint(logger, "Current cannot be null");
|
||||
}
|
||||
|
||||
// 如果 update 改变了过滤字段的值也需要先删除
|
||||
|
||||
@@ -5,6 +5,7 @@ import cn.hutool.core.util.StrUtil;
|
||||
import com.lanyuanxiaoyao.service.common.Constants;
|
||||
import com.lanyuanxiaoyao.service.common.entity.TableMeta;
|
||||
import com.lanyuanxiaoyao.service.common.utils.LogHelper;
|
||||
import com.lanyuanxiaoyao.service.sync.utils.ExceptionUtils;
|
||||
import java.math.BigDecimal;
|
||||
import java.time.format.DateTimeFormatter;
|
||||
import java.util.ArrayList;
|
||||
@@ -115,7 +116,8 @@ public class TypeConverterV2 implements TypeConverter {
|
||||
return NULLABLE_STRING_SCHEMA;
|
||||
}
|
||||
} catch (Throwable throwable) {
|
||||
throw new RuntimeException(
|
||||
return ExceptionUtils.throwAndPrint(
|
||||
logger,
|
||||
StrUtil.format("Convert type failure {} {} {} length: {} scala: {}", table, field, type, length, scala),
|
||||
throwable
|
||||
);
|
||||
@@ -162,7 +164,8 @@ public class TypeConverterV2 implements TypeConverter {
|
||||
return value;
|
||||
}
|
||||
} catch (Throwable throwable) {
|
||||
throw new RuntimeException(
|
||||
return ExceptionUtils.throwAndPrint(
|
||||
logger,
|
||||
StrUtil.format("Convert value failure {} {} {}", schema.toString(), name, value),
|
||||
throwable
|
||||
);
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
package com.lanyuanxiaoyao.service.sync.utils;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
|
||||
/**
|
||||
* 处理异常抛出和打印
|
||||
*
|
||||
* @author lanyuanxiaoyao
|
||||
* @date 2024-10-22
|
||||
*/
|
||||
public class ExceptionUtils {
|
||||
public static <T> T throwAndPrint(Logger logger, String content) {
|
||||
logger.error(content);
|
||||
throw new RuntimeException(content);
|
||||
}
|
||||
|
||||
public static <T> T throwAndPrint(Logger logger, Throwable throwable) {
|
||||
logger.error(throwable.getMessage(), throwable);
|
||||
throw new RuntimeException(throwable);
|
||||
}
|
||||
|
||||
public static <T> T throwAndPrint(Logger logger, String content, Throwable throwable) {
|
||||
logger.error(content, throwable);
|
||||
throw new RuntimeException(content, throwable);
|
||||
}
|
||||
}
|
||||
@@ -75,11 +75,9 @@ public class ZkUtils {
|
||||
.withMode(CreateMode.EPHEMERAL)
|
||||
.forPath(lockPath, runMeta.getBytes());
|
||||
} catch (KeeperException.NodeExistsException e) {
|
||||
logger.error("Lock exists for " + lockPath, e);
|
||||
throw new RuntimeException(e);
|
||||
ExceptionUtils.throwAndPrint(logger, "Lock exists for " + lockPath, e);
|
||||
} catch (Exception e) {
|
||||
logger.error("Unknown error", e);
|
||||
throw new RuntimeException(e);
|
||||
ExceptionUtils.throwAndPrint(logger, "Unknown error", e);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -91,8 +89,7 @@ public class ZkUtils {
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("Unknown error", e);
|
||||
throw new RuntimeException(e);
|
||||
ExceptionUtils.throwAndPrint(logger, "Unknown error", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user