diff --git a/config/b1e1/core-site.xml b/config/b1e1/core-site.xml
new file mode 100644
index 0000000..9d72427
--- /dev/null
+++ b/config/b1e1/core-site.xml
@@ -0,0 +1,262 @@
+
+
+
+
+
+ fs.azure.user.agent.prefix
+ User-Agent: APN/1.0 Hortonworks/1.0 HDP/
+
+
+
+ fs.defaultFS
+ hdfs://b1
+ true
+
+
+
+ fs.s3a.fast.upload
+ true
+
+
+
+ fs.s3a.fast.upload.buffer
+ disk
+
+
+
+ fs.s3a.multipart.size
+ 67108864
+
+
+
+ fs.trash.interval
+ 360
+
+
+
+ ha.failover-controller.active-standby-elector.zk.op.retries
+ 120
+
+
+
+ ha.zookeeper.acl
+ sasl:nn:rwcda
+
+
+
+ ha.zookeeper.quorum
+ b1m2.hdp.dc:2181,b1m3.hdp.dc:2181,b1m4.hdp.dc:2181,b1m5.hdp.dc:2181,b1m6.hdp.dc:2181
+
+
+
+ hadoop.http.authentication.kerberos.keytab
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ hadoop.http.authentication.kerberos.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ hadoop.http.authentication.signature.secret.file
+ /etc/security/http_secret
+
+
+
+ hadoop.http.authentication.simple.anonymous.allowed
+ true
+
+
+
+ hadoop.http.authentication.type
+ simple
+
+
+
+ hadoop.http.cross-origin.allowed-headers
+ X-Requested-With,Content-Type,Accept,Origin,WWW-Authenticate,Accept-Encoding,Transfer-Encoding
+
+
+
+ hadoop.http.cross-origin.allowed-methods
+ GET,PUT,POST,OPTIONS,HEAD,DELETE
+
+
+
+ hadoop.http.cross-origin.allowed-origins
+ *
+
+
+
+ hadoop.http.cross-origin.max-age
+ 1800
+
+
+
+ hadoop.http.filter.initializers
+ org.apache.hadoop.security.AuthenticationFilterInitializer,org.apache.hadoop.security.HttpCrossOriginFilterInitializer
+
+
+
+ hadoop.proxyuser.hdfs.groups
+ *
+
+
+
+ hadoop.proxyuser.hdfs.hosts
+ *
+
+
+
+ hadoop.proxyuser.hive.groups
+ *
+
+
+
+ hadoop.proxyuser.hive.hosts
+ *
+
+
+
+ hadoop.proxyuser.HTTP.groups
+ *
+
+
+
+ hadoop.proxyuser.HTTP.hosts
+ *
+
+
+
+ hadoop.proxyuser.iap.groups
+ *
+
+
+
+ hadoop.proxyuser.iap.hosts
+ *
+
+
+
+ hadoop.proxyuser.livy.groups
+ *
+
+
+
+ hadoop.proxyuser.livy.hosts
+ *
+
+
+
+ hadoop.proxyuser.yarn.groups
+ *
+
+
+
+ hadoop.proxyuser.yarn.hosts
+ *
+
+
+
+ hadoop.rpc.protection
+ authentication,privacy
+
+
+
+ hadoop.security.auth_to_local
+ RULE:[1:$1@$0](hbase-b1@ECLD.COM)s/.*/hbase/
+RULE:[1:$1@$0](hdfs-b1@ECLD.COM)s/.*/hdfs/
+RULE:[1:$1@$0](spark-b1@ECLD.COM)s/.*/spark/
+RULE:[1:$1@$0](yarn-ats-b1@ECLD.COM)s/.*/yarn-ats/
+RULE:[1:$1@$0](.*@ECLD.COM)s/@.*//
+RULE:[2:$1@$0](dn@ECLD.COM)s/.*/hdfs/
+RULE:[2:$1@$0](hbase@ECLD.COM)s/.*/hbase/
+RULE:[2:$1@$0](hive@ECLD.COM)s/.*/hive/
+RULE:[2:$1@$0](jhs@ECLD.COM)s/.*/mapred/
+RULE:[2:$1@$0](jn@ECLD.COM)s/.*/hdfs/
+RULE:[2:$1@$0](livy@ECLD.COM)s/.*/livy/
+RULE:[2:$1@$0](nm@ECLD.COM)s/.*/yarn/
+RULE:[2:$1@$0](nn@ECLD.COM)s/.*/hdfs/
+RULE:[2:$1@$0](rangeradmin@ECLD.COM)s/.*/ranger/
+RULE:[2:$1@$0](rangerlookup@ECLD.COM)s/.*/ranger/
+RULE:[2:$1@$0](rangertagsync@ECLD.COM)s/.*/rangertagsync/
+RULE:[2:$1@$0](rangerusersync@ECLD.COM)s/.*/rangerusersync/
+RULE:[2:$1@$0](rm@ECLD.COM)s/.*/yarn/
+RULE:[2:$1@$0](spark@ECLD.COM)s/.*/spark/
+RULE:[2:$1@$0](yarn@ECLD.COM)s/.*/yarn/
+RULE:[2:$1@$0](yarn-ats-hbase@ECLD.COM)s/.*/yarn-ats/
+DEFAULT
+
+
+
+ hadoop.security.authentication
+ kerberos
+
+
+
+ hadoop.security.authorization
+ true
+
+
+
+ hadoop.security.instrumentation.requires.admin
+ false
+
+
+
+ io.compression.codec.lzo.class
+ com.hadoop.compression.lzo.LzoCodec
+
+
+
+ io.compression.codecs
+ org.apache.hadoop.io.compress.GzipCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec
+
+
+
+ io.file.buffer.size
+ 131072
+
+
+
+ io.serializations
+ org.apache.hadoop.io.serializer.WritableSerialization
+
+
+
+ ipc.client.connect.max.retries
+ 50
+
+
+
+ ipc.client.connection.maxidletime
+ 30000
+
+
+
+ ipc.client.idlethreshold
+ 8000
+
+
+
+ ipc.server.tcpnodelay
+ true
+
+
+
+ mapreduce.jobtracker.webinterface.trusted
+ false
+
+
+
+ ipc.client.fallback-to-simple-auth-allowed
+ true
+
+
+
+ fs.hdfs.impl.disable.cache
+ true
+
+
+
diff --git a/config/b1e1/hdfs-site.xml b/config/b1e1/hdfs-site.xml
new file mode 100644
index 0000000..9b4eda4
--- /dev/null
+++ b/config/b1e1/hdfs-site.xml
@@ -0,0 +1,698 @@
+
+
+
+ dfs.block.access.token.enable
+ true
+
+
+
+ dfs.blockreport.initialDelay
+ 120
+
+
+
+ dfs.blocksize
+ 134217728
+
+
+
+ dfs.client.failover.proxy.provider.b1
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+
+ dfs.client.failover.proxy.provider.b2
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+
+ dfs.client.read.shortcircuit
+ true
+
+
+
+ dfs.client.read.shortcircuit.streams.cache.size
+ 4096
+
+
+
+ dfs.client.retry.policy.enabled
+ false
+
+
+
+ dfs.cluster.administrators
+ hdfs
+
+
+
+ dfs.content-summary.limit
+ 5000
+
+
+
+ dfs.data.transfer.protection
+ authentication,privacy
+
+
+
+ dfs.datanode.address
+ 0.0.0.0:1019
+
+
+
+ dfs.datanode.balance.bandwidthPerSec
+ 6250000
+
+
+
+ dfs.datanode.data.dir
+ [DISK]file:///data1/hadoop/hdfs/data,[DISK]file:///data2/hadoop/hdfs/data,[DISK]file:///data3/hadoop/hdfs/data,[DISK]file:///data4/hadoop/hdfs/data,[DISK]file:///data5/hadoop/hdfs/data,[DISK]file:///data6/hadoop/hdfs/data,[DISK]file:///data7/hadoop/hdfs/data,[DISK]file:///data8/hadoop/hdfs/data,[DISK]file:///data9/hadoop/hdfs/data,[DISK]file:///data10/hadoop/hdfs/data,[DISK]file:///data11/hadoop/hdfs/data,[DISK]file:///data12/hadoop/hdfs/data
+ true
+
+
+
+ dfs.datanode.data.dir.perm
+ 750
+
+
+
+ dfs.datanode.du.reserved
+ 26405499904
+
+
+
+ dfs.datanode.failed.volumes.tolerated
+ 2
+ true
+
+
+
+ dfs.datanode.http.address
+ 0.0.0.0:1022
+
+
+
+ dfs.datanode.https.address
+ 0.0.0.0:50475
+
+
+
+ dfs.datanode.ipc.address
+ 0.0.0.0:8010
+
+
+
+ dfs.datanode.kerberos.principal
+ dn/_HOST@ECLD.COM
+
+
+
+ dfs.datanode.keytab.file
+ /etc/security/keytabs/dn.service.keytab
+
+
+
+ dfs.datanode.max.transfer.threads
+ 16384
+
+
+
+ dfs.domain.socket.path
+ /var/lib/hadoop-hdfs/dn_socket
+
+
+
+ dfs.encrypt.data.transfer.cipher.suites
+ AES/CTR/NoPadding
+
+
+
+ dfs.ha.automatic-failover.enabled
+ true
+
+
+
+ dfs.ha.fencing.methods
+ shell(/bin/true)
+
+
+
+ dfs.ha.namenodes.b1
+ nn1,nn2
+
+
+
+ dfs.ha.namenodes.b2
+ nn3,nn4
+
+
+
+ dfs.heartbeat.interval
+ 3
+
+
+
+ dfs.hosts.exclude
+ /etc/hadoop/conf/dfs.exclude
+
+
+
+ dfs.http.policy
+ HTTP_ONLY
+
+
+
+ dfs.https.port
+ 50470
+
+
+
+ dfs.internal.nameservices
+ b1,b2
+
+
+
+ dfs.journalnode.edits.dir.b1
+ /data2/hadoop/hdfs/journal
+
+
+
+ dfs.journalnode.edits.dir.b2
+ /data2/hadoop/hdfs/journal
+
+
+
+ dfs.journalnode.http-address
+ 0.0.0.0:8480
+
+
+
+ dfs.journalnode.https-address
+ 0.0.0.0:8481
+
+
+
+ dfs.journalnode.kerberos.internal.spnego.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ dfs.journalnode.kerberos.principal
+ jn/_HOST@ECLD.COM
+
+
+
+ dfs.journalnode.keytab.file
+ /etc/security/keytabs/jn.service.keytab
+
+
+
+ dfs.namenode.accesstime.precision
+ 0
+
+
+
+ dfs.namenode.acls.enabled
+ true
+
+
+
+ dfs.namenode.audit.log.async
+ true
+
+
+
+ dfs.namenode.avoid.read.stale.datanode
+ true
+
+
+
+ dfs.namenode.avoid.write.stale.datanode
+ true
+
+
+
+ dfs.namenode.checkpoint.dir
+ /data/hadoop/hdfs/namesecondary
+
+
+
+ dfs.namenode.checkpoint.edits.dir
+ ${dfs.namenode.checkpoint.dir}
+
+
+
+ dfs.namenode.checkpoint.period
+ 21600
+
+
+
+ dfs.namenode.checkpoint.txns
+ 1000000
+
+
+
+ dfs.namenode.fslock.fair
+ false
+
+
+
+ dfs.namenode.handler.count
+ 200
+
+
+
+ dfs.namenode.http-address.b1.nn1
+ b1m2.hdp.dc:50070
+
+
+
+ dfs.namenode.http-address.b1.nn2
+ b1m3.hdp.dc:50070
+
+
+
+ dfs.namenode.http-address.b2.nn3
+ b1m5.hdp.dc:50070
+
+
+
+ dfs.namenode.http-address.b2.nn4
+ b1m6.hdp.dc:50070
+
+
+
+ dfs.namenode.https-address.b1.nn1
+ b1m2.hdp.dc:50470
+
+
+
+ dfs.namenode.https-address.b1.nn2
+ b1m3.hdp.dc:50470
+
+
+
+ dfs.namenode.https-address.b2.nn3
+ b1m5.hdp.dc:50470
+
+
+
+ dfs.namenode.https-address.b2.nn4
+ b1m6.hdp.dc:50470
+
+
+
+ dfs.namenode.kerberos.internal.spnego.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ dfs.namenode.kerberos.principal
+ nn/_HOST@ECLD.COM
+
+
+
+ dfs.namenode.keytab.file
+ /etc/security/keytabs/nn.service.keytab
+
+
+
+ dfs.namenode.max.extra.edits.segments.retained
+ 180
+
+
+
+ dfs.namenode.name.dir
+ /data1/hadoop/hdfs/namenode,/data2/hadoop/hdfs/namenode
+ true
+
+
+
+ dfs.namenode.name.dir.restore
+ true
+
+
+
+ dfs.namenode.num.extra.edits.retained
+ 18000
+
+
+
+ dfs.namenode.rpc-address.b1.nn1
+ b1m2.hdp.dc:8020
+
+
+
+ dfs.namenode.rpc-address.b1.nn2
+ b1m3.hdp.dc:8020
+
+
+
+ dfs.namenode.rpc-address.b2.nn3
+ b1m5.hdp.dc:8020
+
+
+
+ dfs.namenode.rpc-address.b2.nn4
+ b1m6.hdp.dc:8020
+
+
+
+ dfs.namenode.safemode.threshold-pct
+ 0.99
+
+
+
+ dfs.namenode.shared.edits.dir.b1
+ qjournal://b1m2.hdp.dc:8485;b1m3.hdp.dc:8485;b1m5.hdp.dc:8485/b1
+
+
+
+ dfs.namenode.shared.edits.dir.b2
+ qjournal://b1m2.hdp.dc:8485;b1m3.hdp.dc:8485;b1m5.hdp.dc:8485/b2
+
+
+
+ dfs.namenode.stale.datanode.interval
+ 30000
+
+
+
+ dfs.namenode.startup.delay.block.deletion.sec
+ 3600
+
+
+
+ dfs.namenode.write.stale.datanode.ratio
+ 1.0f
+
+
+
+ dfs.nameservices
+ b1,b2,b3,b4,a3,a4,f1,d2,e1
+
+
+
+ dfs.permissions.ContentSummary.subAccess
+ true
+
+
+
+ dfs.permissions.enabled
+ true
+
+
+
+ dfs.permissions.superusergroup
+ hdfs
+
+
+
+ dfs.replication
+ 3
+
+
+
+ dfs.replication.max
+ 50
+
+
+
+ dfs.web.authentication.kerberos.keytab
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ dfs.web.authentication.kerberos.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ dfs.webhdfs.enabled
+ true
+ true
+
+
+
+ fs.permissions.umask-mode
+ 022
+
+
+
+ hadoop.caller.context.enabled
+ true
+
+
+
+ manage.include.files
+ false
+
+
+
+ nfs.exports.allowed.hosts
+ * rw
+
+
+
+ nfs.file.dump.dir
+ /tmp/.hdfs-nfs
+
+
+
+ dfs.client.datanode-restart.timeout
+ 30
+
+
+
+ dfs.client.failover.proxy.provider.a4
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.a4
+ nn1,nn2
+
+
+ dfs.namenode.http-address.a4.nn1
+ a4m1.hdp.dc:50070
+
+
+ dfs.namenode.http-address.a4.nn2
+ a4m2.hdp.dc:50070
+
+
+ dfs.namenode.https-address.a4.nn1
+ a4m1.hdp.dc:50470
+
+
+ dfs.namenode.https-address.a4.nn2
+ a4m2.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.a4.nn1
+ a4m1.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.a4.nn2
+ a4m2.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.a3
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.a3
+ nn1,nn2
+
+
+ dfs.namenode.http-address.a3.nn1
+ a3m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.a3.nn2
+ a3m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.a3.nn1
+ a3m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.a3.nn2
+ a3m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.a3.nn1
+ a3m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.a3.nn2
+ a3m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.b3
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.b3
+ nn1,nn2
+
+
+ dfs.namenode.http-address.b3.nn1
+ b3m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.b3.nn2
+ b3m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.b3.nn1
+ b3m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.b3.nn2
+ b3m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.b3.nn1
+ b3m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.b3.nn2
+ b3m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.b4
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.b4
+ nn1,nn2
+
+
+ dfs.namenode.http-address.b4.nn1
+ b4m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.b4.nn2
+ b4m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.b4.nn1
+ b4m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.b4.nn2
+ b4m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.b4.nn1
+ b4m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.b4.nn2
+ b4m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.f1
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.f1
+ nn1,nn2
+
+
+ dfs.namenode.http-address.f1.nn1
+ f1m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.f1.nn2
+ f1m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.f1.nn1
+ f1m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.f1.nn2
+ f1m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.f1.nn1
+ f1m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.f1.nn2
+ f1m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.d2
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.d2
+ nn1,nn2
+
+
+ dfs.namenode.http-address.d2.nn1
+ d2m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.d2.nn2
+ d2m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.d2.nn1
+ d2m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.d2.nn2
+ d2m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.d2.nn1
+ d2m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.d2.nn2
+ d2m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.e1
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.e1
+ nn1,nn2
+
+
+ dfs.namenode.http-address.e1.nn1
+ e1m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.e1.nn2
+ e1m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.e1.nn1
+ e1m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.e1.nn2
+ e1m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.e1.nn1
+ e1m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.e1.nn2
+ e1m3.hdp.dc:8020
+
+
+
diff --git a/config/b1e1/yarn-site.xml b/config/b1e1/yarn-site.xml
new file mode 100644
index 0000000..407d1f5
--- /dev/null
+++ b/config/b1e1/yarn-site.xml
@@ -0,0 +1,1026 @@
+
+
+
+ hadoop.http.cross-origin.allowed-origins
+ regex:.*[.]hdp[.]dc(:\d*)?
+
+
+
+ hadoop.registry.client.auth
+ kerberos
+
+
+
+ hadoop.registry.dns.bind-address
+ 0.0.0.0
+
+
+
+ hadoop.registry.dns.bind-port
+ 5354
+ true
+
+
+
+ hadoop.registry.dns.domain-name
+ ECLD.COM
+
+
+
+ hadoop.registry.dns.enabled
+ true
+
+
+
+ hadoop.registry.dns.zone-mask
+ 255.255.255.0
+
+
+
+ hadoop.registry.dns.zone-subnet
+ 172.17.0.0
+
+
+
+ hadoop.registry.jaas.context
+ Client
+
+
+
+ hadoop.registry.secure
+ true
+
+
+
+ hadoop.registry.system.accounts
+ sasl:yarn,sasl:jhs,sasl:hdfs-b1,sasl:rm,sasl:hive,sasl:spark
+
+
+
+ hadoop.registry.zk.quorum
+ b1m2.hdp.dc:2181,b1m3.hdp.dc:2181,b1m4.hdp.dc:2181,b1m5.hdp.dc:2181,b1m6.hdp.dc:2181
+
+
+
+ manage.include.files
+ false
+
+
+
+ yarn.acl.enable
+ true
+
+
+
+ yarn.admin.acl
+ *
+
+
+
+ yarn.application.classpath
+ $HADOOP_CONF_DIR,/usr/lib/edp/hadoop-3.2.2/share/hadoop/common/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/common/lib/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/hdfs/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/hdfs/lib/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/yarn/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/yarn/lib/*
+
+
+
+ yarn.client.nodemanager-connect.max-wait-ms
+ 60000
+
+
+
+ yarn.client.nodemanager-connect.retry-interval-ms
+ 10000
+
+
+
+ yarn.http.policy
+ HTTP_ONLY
+
+
+
+ yarn.log-aggregation-enable
+ true
+
+
+
+ yarn.log-aggregation.retain-seconds
+ 2592000
+
+
+
+ yarn.log.server.url
+ http://b1m4.hdp.dc:19888/jobhistory/logs
+
+
+
+ yarn.log.server.web-service.url
+ http://b1m4.hdp.dc:8188/ws/v1/applicationhistory
+
+
+
+ yarn.node-labels.enabled
+ false
+
+
+
+ yarn.node-labels.fs-store.retry-policy-spec
+ 2000, 500
+
+
+
+ yarn.node-labels.fs-store.root-dir
+ /system/yarn/node-labels
+
+
+
+ yarn.nodemanager.address
+ 0.0.0.0:45454
+
+
+
+ yarn.nodemanager.admin-env
+ MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX
+
+
+
+ yarn.nodemanager.aux-services
+ mapreduce_shuffle,spark_shuffle,timeline_collector,sparkv2_shuffle
+
+
+
+ yarn.nodemanager.aux-services.mapreduce_shuffle.class
+ org.apache.hadoop.mapred.ShuffleHandler
+
+
+
+ yarn.nodemanager.aux-services.spark_shuffle.class
+ org.apache.spark.network.yarn.YarnShuffleService
+
+
+
+ yarn.nodemanager.aux-services.spark_shuffle.classpath
+ /usr/lib/edp/spark-3.1.1-bin-hadoop3.2/yarn/*
+
+
+
+ yarn.nodemanager.aux-services.timeline_collector.class
+ org.apache.hadoop.yarn.server.timelineservice.collector.PerNodeTimelineCollectorsAuxService
+
+
+
+ yarn.nodemanager.aux-services.sparkv2_shuffle.class
+ org.apache.spark.network.yarn.YarnShuffleService
+
+
+
+ yarn.nodemanager.aux-services.sparkv2_shuffle.classpath
+ /usr/lib/edp/spark-2.4.7-bin-hadoop-3.1.2/yarn/*
+
+
+
+ yarn.nodemanager.bind-host
+ 0.0.0.0
+
+
+
+ yarn.nodemanager.container-executor.class
+ org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor
+
+
+
+ yarn.nodemanager.container-metrics.unregister-delay-ms
+ 60000
+
+
+
+ yarn.nodemanager.container-monitor.interval-ms
+ 3000
+
+
+
+ yarn.nodemanager.delete.debug-delay-sec
+ 86400
+
+
+
+ yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage
+ 90
+
+
+
+ yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb
+ 1000
+
+
+
+ yarn.nodemanager.disk-health-checker.min-healthy-disks
+ 0.25
+
+
+
+ yarn.nodemanager.health-checker.interval-ms
+ 135000
+
+
+
+ yarn.nodemanager.health-checker.script.timeout-ms
+ 60000
+
+
+
+ yarn.nodemanager.keytab
+ /etc/security/keytabs/nm.service.keytab
+
+
+
+ yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage
+ false
+
+
+
+ yarn.nodemanager.linux-container-executor.group
+ hadoop
+
+
+
+ yarn.nodemanager.linux-container-executor.nonsecure-mode.limit-users
+ true
+
+
+
+ yarn.nodemanager.local-dirs
+ /data1/hadoop/yarn/local,/data2/hadoop/yarn/local,/data3/hadoop/yarn/local,/data4/hadoop/yarn/local,/data5/hadoop/yarn/local,/data6/hadoop/yarn/local,/data7/hadoop/yarn/local,/data8/hadoop/yarn/local,/data9/hadoop/yarn/local,/data10/hadoop/yarn/local,/data11/hadoop/yarn/local,/data12/hadoop/yarn/local
+
+
+
+ yarn.nodemanager.log-aggregation.compression-type
+ gz
+
+
+
+ yarn.nodemanager.log-aggregation.debug-enabled
+ false
+
+
+
+ yarn.nodemanager.log-aggregation.num-log-files-per-app
+ 30
+
+
+
+ yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds
+ 3600
+
+
+
+ yarn.nodemanager.log-dirs
+ /data1/hadoop/yarn/log,/data2/hadoop/yarn/log,/data3/hadoop/yarn/log,/data4/hadoop/yarn/log,/data5/hadoop/yarn/log,/data6/hadoop/yarn/log,/data7/hadoop/yarn/log,/data8/hadoop/yarn/log,/data9/hadoop/yarn/log,/data10/hadoop/yarn/log,/data11/hadoop/yarn/log,/data12/hadoop/yarn/log
+
+
+
+ yarn.nodemanager.log.retain-seconds
+ 604800
+
+
+
+ yarn.nodemanager.principal
+ nm/_HOST@ECLD.COM
+
+
+
+ yarn.nodemanager.recovery.dir
+ /var/log/hadoop-yarn/nodemanager/recovery-state
+
+
+
+ yarn.nodemanager.recovery.enabled
+ true
+
+
+
+ yarn.nodemanager.recovery.supervised
+ true
+
+
+
+ yarn.nodemanager.remote-app-log-dir
+ /app-logs
+
+
+
+ yarn.nodemanager.remote-app-log-dir-suffix
+ logs
+
+
+
+ yarn.nodemanager.resource-plugins
+
+
+
+
+ yarn.nodemanager.resource-plugins.gpu.allowed-gpu-devices
+
+
+
+
+ yarn.nodemanager.resource-plugins.gpu.docker-plugin
+
+
+
+
+ yarn.nodemanager.resource-plugins.gpu.docker-plugin.nvidiadocker-v1.endpoint
+
+
+
+
+ yarn.nodemanager.resource-plugins.gpu.path-to-discovery-executables
+
+
+
+
+ yarn.nodemanager.resource.cpu-vcores
+ 39
+
+
+
+ yarn.nodemanager.resource.memory-mb
+ 184320
+
+
+
+ yarn.nodemanager.resource.percentage-physical-cpu-limit
+ 80
+
+
+
+ yarn.nodemanager.resourcemanager.connect.wait.secs
+ 1800
+
+
+
+ yarn.nodemanager.runtime.linux.allowed-runtimes
+ default,docker
+
+
+
+ yarn.nodemanager.runtime.linux.docker.allowed-container-networks
+ host,none,bridge
+
+
+
+ yarn.nodemanager.runtime.linux.docker.capabilities
+
+ CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,
+ SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE
+
+
+
+ yarn.nodemanager.runtime.linux.docker.default-container-network
+ host
+
+
+
+ yarn.nodemanager.runtime.linux.docker.privileged-containers.acl
+
+
+
+
+ yarn.nodemanager.runtime.linux.docker.privileged-containers.allowed
+ false
+
+
+
+ yarn.nodemanager.vmem-check-enabled
+ false
+
+
+
+ yarn.nodemanager.vmem-pmem-ratio
+ 2.1
+
+
+
+ yarn.nodemanager.webapp.cross-origin.enabled
+ true
+
+
+
+ yarn.nodemanager.webapp.spnego-keytab-file
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ yarn.nodemanager.webapp.spnego-principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ yarn.resourcemanager.address
+ b1m2.hdp.dc:8050
+
+
+
+ yarn.resourcemanager.admin.address
+ b1m2.hdp.dc:8141
+
+
+
+ yarn.resourcemanager.am.max-attempts
+ 2
+
+
+
+ yarn.resourcemanager.bind-host
+ 0.0.0.0
+
+
+
+ yarn.resourcemanager.cluster-id
+ yarn-cluster
+
+
+
+ yarn.resourcemanager.connect.max-wait.ms
+ 900000
+
+
+
+ yarn.resourcemanager.connect.retry-interval.ms
+ 30000
+
+
+
+ yarn.resourcemanager.display.per-user-apps
+ true
+
+
+
+ yarn.resourcemanager.fs.state-store.retry-policy-spec
+ 2000, 500
+
+
+
+ yarn.resourcemanager.fs.state-store.uri
+
+
+
+
+ yarn.resourcemanager.ha.automatic-failover.zk-base-path
+ /yarn-leader-election
+
+
+
+ yarn.resourcemanager.ha.enabled
+ true
+
+
+
+ yarn.resourcemanager.ha.rm-ids
+ rm1,rm2
+
+
+
+ yarn.resourcemanager.hostname
+ b1m2.hdp.dc
+
+
+
+ yarn.resourcemanager.hostname.rm1
+ b1m2.hdp.dc
+
+
+
+ yarn.resourcemanager.hostname.rm2
+ b1m3.hdp.dc
+
+
+
+ yarn.resourcemanager.keytab
+ /etc/security/keytabs/rm.service.keytab
+
+
+
+ yarn.resourcemanager.monitor.capacity.preemption.intra-queue-preemption.enabled
+ true
+
+
+
+ yarn.resourcemanager.monitor.capacity.preemption.monitoring_interval
+ 15000
+
+
+
+ yarn.resourcemanager.monitor.capacity.preemption.natural_termination_factor
+ 1
+
+
+
+ yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round
+ 0.1
+
+
+
+ yarn.resourcemanager.nodes.exclude-path
+ /etc/hadoop/conf/yarn.exclude
+
+
+
+ yarn.resourcemanager.placement-constraints.handler
+ scheduler
+
+
+
+ yarn.resourcemanager.principal
+ rm/_HOST@ECLD.COM
+
+
+
+ yarn.resourcemanager.proxy-user-privileges.enabled
+ true
+
+
+
+ yarn.resourcemanager.proxyuser.*.groups
+
+
+
+
+ yarn.resourcemanager.proxyuser.*.hosts
+
+
+
+
+ yarn.resourcemanager.proxyuser.*.users
+
+
+
+
+ yarn.resourcemanager.recovery.enabled
+ true
+
+
+
+ yarn.resourcemanager.resource-tracker.address
+ b1m2.hdp.dc:8025
+
+
+
+ yarn.resourcemanager.resource-tracker.address.rm1
+ b1m2.hdp.dc:8025
+
+
+
+ yarn.resourcemanager.resource-tracker.address.rm2
+ b1m3.hdp.dc:8025
+
+
+
+ yarn.resourcemanager.scheduler.address
+ b1m2.hdp.dc:8030
+
+
+
+ yarn.resourcemanager.scheduler.class
+ org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
+
+
+
+ yarn.resourcemanager.scheduler.monitor.enable
+ true
+
+
+
+ yarn.resourcemanager.state-store.max-completed-applications
+ ${yarn.resourcemanager.max-completed-applications}
+
+
+
+ yarn.resourcemanager.store.class
+ org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore
+
+
+
+ yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size
+ 10
+
+
+
+ yarn.resourcemanager.system-metrics-publisher.enabled
+ true
+
+
+
+ yarn.resourcemanager.webapp.address
+ b1m2.hdp.dc:8088
+
+
+
+ yarn.resourcemanager.webapp.address.rm1
+ b1m2.hdp.dc:8088
+
+
+
+ yarn.resourcemanager.webapp.address.rm2
+ b1m3.hdp.dc:8088
+
+
+
+ yarn.resourcemanager.webapp.cross-origin.enabled
+ true
+
+
+
+ yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled
+ false
+
+
+
+ yarn.resourcemanager.webapp.https.address
+ b1m2.hdp.dc:8090
+
+
+
+ yarn.resourcemanager.webapp.https.address.rm1
+ b1m2.hdp.dc:8090
+
+
+
+ yarn.resourcemanager.webapp.https.address.rm2
+ b1m3.hdp.dc:8090
+
+
+
+ yarn.resourcemanager.webapp.spnego-keytab-file
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ yarn.resourcemanager.webapp.spnego-principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ yarn.resourcemanager.work-preserving-recovery.enabled
+ true
+
+
+
+ yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms
+ 10000
+
+
+
+ yarn.resourcemanager.zk-acl
+ sasl:rm:rwcda
+
+
+
+ yarn.resourcemanager.zk-address
+ b1m2.hdp.dc:2181,b1m3.hdp.dc:2181,b1m4.hdp.dc:2181,b1m5.hdp.dc:2181,b1m6.hdp.dc:2181
+
+
+
+ yarn.resourcemanager.zk-num-retries
+ 1000
+
+
+
+ yarn.resourcemanager.zk-retry-interval-ms
+ 1000
+
+
+
+ yarn.resourcemanager.zk-state-store.parent-path
+ /rmstore
+
+
+
+ yarn.resourcemanager.zk-timeout-ms
+ 10000
+
+
+
+ yarn.rm.system-metricspublisher.emit-container-events
+ true
+
+
+
+ yarn.scheduler.capacity.ordering-policy.priority-utilization.underutilized-preemption.enabled
+ true
+
+
+
+ yarn.scheduler.maximum-allocation-mb
+ 40960
+
+
+
+ yarn.scheduler.maximum-allocation-vcores
+ 51
+
+
+
+ yarn.scheduler.minimum-allocation-mb
+ 1024
+
+
+
+ yarn.scheduler.minimum-allocation-vcores
+ 1
+
+
+
+ yarn.service.system-service.dir
+ /services
+
+
+
+ yarn.system-metricspublisher.enabled
+ true
+
+
+
+ yarn.timeline-service.address
+ b1m4.hdp.dc:10200
+
+
+
+ yarn.timeline-service.bind-host
+ 0.0.0.0
+
+
+
+ yarn.timeline-service.client.max-retries
+ 30
+
+
+
+ yarn.timeline-service.client.retry-interval-ms
+ 1000
+
+
+
+ yarn.timeline-service.enabled
+ false
+
+
+
+ yarn.timeline-service.entity-group-fs-store.active-dir
+ /ats/active/
+
+
+
+ yarn.timeline-service.entity-group-fs-store.app-cache-size
+ 10
+
+
+
+ yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds
+ 3600
+
+
+
+ yarn.timeline-service.entity-group-fs-store.done-dir
+ /ats/done/
+
+
+
+ yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes
+ org.apache.hadoop.yarn.applications.distributedshell.DistributedShellTimelinePlugin
+
+
+
+ yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath
+
+
+
+
+ yarn.timeline-service.entity-group-fs-store.retain-seconds
+ 604800
+
+
+
+ yarn.timeline-service.entity-group-fs-store.scan-interval-seconds
+ 60
+
+
+
+ yarn.timeline-service.entity-group-fs-store.summary-store
+ org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore
+
+
+
+ yarn.timeline-service.generic-application-history.save-non-am-container-meta-info
+ false
+
+
+
+ yarn.timeline-service.generic-application-history.store-class
+ org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore
+
+
+
+ yarn.timeline-service.hbase-schema.prefix
+ prod.
+
+
+
+ yarn.timeline-service.hbase.configuration.file
+ file:///etc/hadoop/conf/embedded-yarn-ats-hbase/hbase-site.xml
+
+
+
+ yarn.timeline-service.hbase.coprocessor.jar.hdfs.location
+ file:///usr/lib/edp/hadoop-3.2.2/lib/hadoop-yarn-server-timelineservice-3.2.2.jar
+
+
+
+ yarn.timeline-service.http-authentication.cookie.domain
+
+
+
+
+ yarn.timeline-service.http-authentication.cookie.path
+
+
+
+
+ yarn.timeline-service.http-authentication.kerberos.keytab
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ yarn.timeline-service.http-authentication.kerberos.name.rules
+
+
+
+
+ yarn.timeline-service.http-authentication.kerberos.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ yarn.timeline-service.http-authentication.proxyuser.*.groups
+
+
+
+
+ yarn.timeline-service.http-authentication.proxyuser.*.hosts
+
+
+
+
+ yarn.timeline-service.http-authentication.proxyuser.*.users
+
+
+
+
+ yarn.timeline-service.http-authentication.signature.secret
+
+
+
+
+ yarn.timeline-service.http-authentication.signature.secret.file
+
+
+
+
+ yarn.timeline-service.http-authentication.signer.secret.provider
+
+
+
+
+ yarn.timeline-service.http-authentication.signer.secret.provider.object
+
+
+
+
+ yarn.timeline-service.http-authentication.simple.anonymous.allowed
+ true
+
+
+
+ yarn.timeline-service.http-authentication.token.validity
+
+
+
+
+ yarn.timeline-service.http-authentication.type
+ simple
+
+
+
+ yarn.timeline-service.http-cross-origin.enabled
+ true
+
+
+
+ yarn.timeline-service.keytab
+ /etc/security/keytabs/yarn.service.keytab
+
+
+
+ yarn.timeline-service.leveldb-state-store.path
+ /data/hadoop/yarn/timeline
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.path
+ /data/hadoop/yarn/timeline
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.read-cache-size
+ 104857600
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size
+ 10000
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size
+ 10000
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms
+ 300000
+
+
+
+ yarn.timeline-service.principal
+ yarn/_HOST@ECLD.COM
+
+
+
+ yarn.timeline-service.reader.webapp.address
+ b1m4.hdp.dc:8198
+
+
+
+ yarn.timeline-service.reader.webapp.https.address
+ b1m4.hdp.dc:8199
+
+
+
+ yarn.timeline-service.recovery.enabled
+ true
+
+
+
+ yarn.timeline-service.state-store-class
+ org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore
+
+
+
+ yarn.timeline-service.store-class
+ org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore
+
+
+
+ yarn.timeline-service.ttl-enable
+ true
+
+
+
+ yarn.timeline-service.ttl-ms
+ 2678400000
+
+
+
+ yarn.timeline-service.version
+ 2.0f
+
+
+
+ yarn.timeline-service.versions
+ 1.5f,2.0f
+
+
+
+ yarn.timeline-service.webapp.address
+ b1m4.hdp.dc:8188
+
+
+
+ yarn.timeline-service.webapp.https.address
+ b1m4.hdp.dc:8190
+
+
+
+ yarn.webapp.api-service.enable
+ true
+
+
+
+ yarn.webapp.ui2.enable
+ true
+
+
+
+ yarn.resourcemanager.max-completed-applications
+ 10000
+
+
+
diff --git a/config/b1e11/core-site.xml b/config/b1e11/core-site.xml
new file mode 100644
index 0000000..9d72427
--- /dev/null
+++ b/config/b1e11/core-site.xml
@@ -0,0 +1,262 @@
+
+
+
+
+
+ fs.azure.user.agent.prefix
+ User-Agent: APN/1.0 Hortonworks/1.0 HDP/
+
+
+
+ fs.defaultFS
+ hdfs://b1
+ true
+
+
+
+ fs.s3a.fast.upload
+ true
+
+
+
+ fs.s3a.fast.upload.buffer
+ disk
+
+
+
+ fs.s3a.multipart.size
+ 67108864
+
+
+
+ fs.trash.interval
+ 360
+
+
+
+ ha.failover-controller.active-standby-elector.zk.op.retries
+ 120
+
+
+
+ ha.zookeeper.acl
+ sasl:nn:rwcda
+
+
+
+ ha.zookeeper.quorum
+ b1m2.hdp.dc:2181,b1m3.hdp.dc:2181,b1m4.hdp.dc:2181,b1m5.hdp.dc:2181,b1m6.hdp.dc:2181
+
+
+
+ hadoop.http.authentication.kerberos.keytab
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ hadoop.http.authentication.kerberos.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ hadoop.http.authentication.signature.secret.file
+ /etc/security/http_secret
+
+
+
+ hadoop.http.authentication.simple.anonymous.allowed
+ true
+
+
+
+ hadoop.http.authentication.type
+ simple
+
+
+
+ hadoop.http.cross-origin.allowed-headers
+ X-Requested-With,Content-Type,Accept,Origin,WWW-Authenticate,Accept-Encoding,Transfer-Encoding
+
+
+
+ hadoop.http.cross-origin.allowed-methods
+ GET,PUT,POST,OPTIONS,HEAD,DELETE
+
+
+
+ hadoop.http.cross-origin.allowed-origins
+ *
+
+
+
+ hadoop.http.cross-origin.max-age
+ 1800
+
+
+
+ hadoop.http.filter.initializers
+ org.apache.hadoop.security.AuthenticationFilterInitializer,org.apache.hadoop.security.HttpCrossOriginFilterInitializer
+
+
+
+ hadoop.proxyuser.hdfs.groups
+ *
+
+
+
+ hadoop.proxyuser.hdfs.hosts
+ *
+
+
+
+ hadoop.proxyuser.hive.groups
+ *
+
+
+
+ hadoop.proxyuser.hive.hosts
+ *
+
+
+
+ hadoop.proxyuser.HTTP.groups
+ *
+
+
+
+ hadoop.proxyuser.HTTP.hosts
+ *
+
+
+
+ hadoop.proxyuser.iap.groups
+ *
+
+
+
+ hadoop.proxyuser.iap.hosts
+ *
+
+
+
+ hadoop.proxyuser.livy.groups
+ *
+
+
+
+ hadoop.proxyuser.livy.hosts
+ *
+
+
+
+ hadoop.proxyuser.yarn.groups
+ *
+
+
+
+ hadoop.proxyuser.yarn.hosts
+ *
+
+
+
+ hadoop.rpc.protection
+ authentication,privacy
+
+
+
+ hadoop.security.auth_to_local
+ RULE:[1:$1@$0](hbase-b1@ECLD.COM)s/.*/hbase/
+RULE:[1:$1@$0](hdfs-b1@ECLD.COM)s/.*/hdfs/
+RULE:[1:$1@$0](spark-b1@ECLD.COM)s/.*/spark/
+RULE:[1:$1@$0](yarn-ats-b1@ECLD.COM)s/.*/yarn-ats/
+RULE:[1:$1@$0](.*@ECLD.COM)s/@.*//
+RULE:[2:$1@$0](dn@ECLD.COM)s/.*/hdfs/
+RULE:[2:$1@$0](hbase@ECLD.COM)s/.*/hbase/
+RULE:[2:$1@$0](hive@ECLD.COM)s/.*/hive/
+RULE:[2:$1@$0](jhs@ECLD.COM)s/.*/mapred/
+RULE:[2:$1@$0](jn@ECLD.COM)s/.*/hdfs/
+RULE:[2:$1@$0](livy@ECLD.COM)s/.*/livy/
+RULE:[2:$1@$0](nm@ECLD.COM)s/.*/yarn/
+RULE:[2:$1@$0](nn@ECLD.COM)s/.*/hdfs/
+RULE:[2:$1@$0](rangeradmin@ECLD.COM)s/.*/ranger/
+RULE:[2:$1@$0](rangerlookup@ECLD.COM)s/.*/ranger/
+RULE:[2:$1@$0](rangertagsync@ECLD.COM)s/.*/rangertagsync/
+RULE:[2:$1@$0](rangerusersync@ECLD.COM)s/.*/rangerusersync/
+RULE:[2:$1@$0](rm@ECLD.COM)s/.*/yarn/
+RULE:[2:$1@$0](spark@ECLD.COM)s/.*/spark/
+RULE:[2:$1@$0](yarn@ECLD.COM)s/.*/yarn/
+RULE:[2:$1@$0](yarn-ats-hbase@ECLD.COM)s/.*/yarn-ats/
+DEFAULT
+
+
+
+ hadoop.security.authentication
+ kerberos
+
+
+
+ hadoop.security.authorization
+ true
+
+
+
+ hadoop.security.instrumentation.requires.admin
+ false
+
+
+
+ io.compression.codec.lzo.class
+ com.hadoop.compression.lzo.LzoCodec
+
+
+
+ io.compression.codecs
+ org.apache.hadoop.io.compress.GzipCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec
+
+
+
+ io.file.buffer.size
+ 131072
+
+
+
+ io.serializations
+ org.apache.hadoop.io.serializer.WritableSerialization
+
+
+
+ ipc.client.connect.max.retries
+ 50
+
+
+
+ ipc.client.connection.maxidletime
+ 30000
+
+
+
+ ipc.client.idlethreshold
+ 8000
+
+
+
+ ipc.server.tcpnodelay
+ true
+
+
+
+ mapreduce.jobtracker.webinterface.trusted
+ false
+
+
+
+ ipc.client.fallback-to-simple-auth-allowed
+ true
+
+
+
+ fs.hdfs.impl.disable.cache
+ true
+
+
+
diff --git a/config/b1e11/hdfs-site.xml b/config/b1e11/hdfs-site.xml
new file mode 100644
index 0000000..9b4eda4
--- /dev/null
+++ b/config/b1e11/hdfs-site.xml
@@ -0,0 +1,698 @@
+
+
+
+ dfs.block.access.token.enable
+ true
+
+
+
+ dfs.blockreport.initialDelay
+ 120
+
+
+
+ dfs.blocksize
+ 134217728
+
+
+
+ dfs.client.failover.proxy.provider.b1
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+
+ dfs.client.failover.proxy.provider.b2
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+
+ dfs.client.read.shortcircuit
+ true
+
+
+
+ dfs.client.read.shortcircuit.streams.cache.size
+ 4096
+
+
+
+ dfs.client.retry.policy.enabled
+ false
+
+
+
+ dfs.cluster.administrators
+ hdfs
+
+
+
+ dfs.content-summary.limit
+ 5000
+
+
+
+ dfs.data.transfer.protection
+ authentication,privacy
+
+
+
+ dfs.datanode.address
+ 0.0.0.0:1019
+
+
+
+ dfs.datanode.balance.bandwidthPerSec
+ 6250000
+
+
+
+ dfs.datanode.data.dir
+ [DISK]file:///data1/hadoop/hdfs/data,[DISK]file:///data2/hadoop/hdfs/data,[DISK]file:///data3/hadoop/hdfs/data,[DISK]file:///data4/hadoop/hdfs/data,[DISK]file:///data5/hadoop/hdfs/data,[DISK]file:///data6/hadoop/hdfs/data,[DISK]file:///data7/hadoop/hdfs/data,[DISK]file:///data8/hadoop/hdfs/data,[DISK]file:///data9/hadoop/hdfs/data,[DISK]file:///data10/hadoop/hdfs/data,[DISK]file:///data11/hadoop/hdfs/data,[DISK]file:///data12/hadoop/hdfs/data
+ true
+
+
+
+ dfs.datanode.data.dir.perm
+ 750
+
+
+
+ dfs.datanode.du.reserved
+ 26405499904
+
+
+
+ dfs.datanode.failed.volumes.tolerated
+ 2
+ true
+
+
+
+ dfs.datanode.http.address
+ 0.0.0.0:1022
+
+
+
+ dfs.datanode.https.address
+ 0.0.0.0:50475
+
+
+
+ dfs.datanode.ipc.address
+ 0.0.0.0:8010
+
+
+
+ dfs.datanode.kerberos.principal
+ dn/_HOST@ECLD.COM
+
+
+
+ dfs.datanode.keytab.file
+ /etc/security/keytabs/dn.service.keytab
+
+
+
+ dfs.datanode.max.transfer.threads
+ 16384
+
+
+
+ dfs.domain.socket.path
+ /var/lib/hadoop-hdfs/dn_socket
+
+
+
+ dfs.encrypt.data.transfer.cipher.suites
+ AES/CTR/NoPadding
+
+
+
+ dfs.ha.automatic-failover.enabled
+ true
+
+
+
+ dfs.ha.fencing.methods
+ shell(/bin/true)
+
+
+
+ dfs.ha.namenodes.b1
+ nn1,nn2
+
+
+
+ dfs.ha.namenodes.b2
+ nn3,nn4
+
+
+
+ dfs.heartbeat.interval
+ 3
+
+
+
+ dfs.hosts.exclude
+ /etc/hadoop/conf/dfs.exclude
+
+
+
+ dfs.http.policy
+ HTTP_ONLY
+
+
+
+ dfs.https.port
+ 50470
+
+
+
+ dfs.internal.nameservices
+ b1,b2
+
+
+
+ dfs.journalnode.edits.dir.b1
+ /data2/hadoop/hdfs/journal
+
+
+
+ dfs.journalnode.edits.dir.b2
+ /data2/hadoop/hdfs/journal
+
+
+
+ dfs.journalnode.http-address
+ 0.0.0.0:8480
+
+
+
+ dfs.journalnode.https-address
+ 0.0.0.0:8481
+
+
+
+ dfs.journalnode.kerberos.internal.spnego.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ dfs.journalnode.kerberos.principal
+ jn/_HOST@ECLD.COM
+
+
+
+ dfs.journalnode.keytab.file
+ /etc/security/keytabs/jn.service.keytab
+
+
+
+ dfs.namenode.accesstime.precision
+ 0
+
+
+
+ dfs.namenode.acls.enabled
+ true
+
+
+
+ dfs.namenode.audit.log.async
+ true
+
+
+
+ dfs.namenode.avoid.read.stale.datanode
+ true
+
+
+
+ dfs.namenode.avoid.write.stale.datanode
+ true
+
+
+
+ dfs.namenode.checkpoint.dir
+ /data/hadoop/hdfs/namesecondary
+
+
+
+ dfs.namenode.checkpoint.edits.dir
+ ${dfs.namenode.checkpoint.dir}
+
+
+
+ dfs.namenode.checkpoint.period
+ 21600
+
+
+
+ dfs.namenode.checkpoint.txns
+ 1000000
+
+
+
+ dfs.namenode.fslock.fair
+ false
+
+
+
+ dfs.namenode.handler.count
+ 200
+
+
+
+ dfs.namenode.http-address.b1.nn1
+ b1m2.hdp.dc:50070
+
+
+
+ dfs.namenode.http-address.b1.nn2
+ b1m3.hdp.dc:50070
+
+
+
+ dfs.namenode.http-address.b2.nn3
+ b1m5.hdp.dc:50070
+
+
+
+ dfs.namenode.http-address.b2.nn4
+ b1m6.hdp.dc:50070
+
+
+
+ dfs.namenode.https-address.b1.nn1
+ b1m2.hdp.dc:50470
+
+
+
+ dfs.namenode.https-address.b1.nn2
+ b1m3.hdp.dc:50470
+
+
+
+ dfs.namenode.https-address.b2.nn3
+ b1m5.hdp.dc:50470
+
+
+
+ dfs.namenode.https-address.b2.nn4
+ b1m6.hdp.dc:50470
+
+
+
+ dfs.namenode.kerberos.internal.spnego.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ dfs.namenode.kerberos.principal
+ nn/_HOST@ECLD.COM
+
+
+
+ dfs.namenode.keytab.file
+ /etc/security/keytabs/nn.service.keytab
+
+
+
+ dfs.namenode.max.extra.edits.segments.retained
+ 180
+
+
+
+ dfs.namenode.name.dir
+ /data1/hadoop/hdfs/namenode,/data2/hadoop/hdfs/namenode
+ true
+
+
+
+ dfs.namenode.name.dir.restore
+ true
+
+
+
+ dfs.namenode.num.extra.edits.retained
+ 18000
+
+
+
+ dfs.namenode.rpc-address.b1.nn1
+ b1m2.hdp.dc:8020
+
+
+
+ dfs.namenode.rpc-address.b1.nn2
+ b1m3.hdp.dc:8020
+
+
+
+ dfs.namenode.rpc-address.b2.nn3
+ b1m5.hdp.dc:8020
+
+
+
+ dfs.namenode.rpc-address.b2.nn4
+ b1m6.hdp.dc:8020
+
+
+
+ dfs.namenode.safemode.threshold-pct
+ 0.99
+
+
+
+ dfs.namenode.shared.edits.dir.b1
+ qjournal://b1m2.hdp.dc:8485;b1m3.hdp.dc:8485;b1m5.hdp.dc:8485/b1
+
+
+
+ dfs.namenode.shared.edits.dir.b2
+ qjournal://b1m2.hdp.dc:8485;b1m3.hdp.dc:8485;b1m5.hdp.dc:8485/b2
+
+
+
+ dfs.namenode.stale.datanode.interval
+ 30000
+
+
+
+ dfs.namenode.startup.delay.block.deletion.sec
+ 3600
+
+
+
+ dfs.namenode.write.stale.datanode.ratio
+ 1.0f
+
+
+
+ dfs.nameservices
+ b1,b2,b3,b4,a3,a4,f1,d2,e1
+
+
+
+ dfs.permissions.ContentSummary.subAccess
+ true
+
+
+
+ dfs.permissions.enabled
+ true
+
+
+
+ dfs.permissions.superusergroup
+ hdfs
+
+
+
+ dfs.replication
+ 3
+
+
+
+ dfs.replication.max
+ 50
+
+
+
+ dfs.web.authentication.kerberos.keytab
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ dfs.web.authentication.kerberos.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ dfs.webhdfs.enabled
+ true
+ true
+
+
+
+ fs.permissions.umask-mode
+ 022
+
+
+
+ hadoop.caller.context.enabled
+ true
+
+
+
+ manage.include.files
+ false
+
+
+
+ nfs.exports.allowed.hosts
+ * rw
+
+
+
+ nfs.file.dump.dir
+ /tmp/.hdfs-nfs
+
+
+
+ dfs.client.datanode-restart.timeout
+ 30
+
+
+
+ dfs.client.failover.proxy.provider.a4
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.a4
+ nn1,nn2
+
+
+ dfs.namenode.http-address.a4.nn1
+ a4m1.hdp.dc:50070
+
+
+ dfs.namenode.http-address.a4.nn2
+ a4m2.hdp.dc:50070
+
+
+ dfs.namenode.https-address.a4.nn1
+ a4m1.hdp.dc:50470
+
+
+ dfs.namenode.https-address.a4.nn2
+ a4m2.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.a4.nn1
+ a4m1.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.a4.nn2
+ a4m2.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.a3
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.a3
+ nn1,nn2
+
+
+ dfs.namenode.http-address.a3.nn1
+ a3m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.a3.nn2
+ a3m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.a3.nn1
+ a3m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.a3.nn2
+ a3m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.a3.nn1
+ a3m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.a3.nn2
+ a3m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.b3
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.b3
+ nn1,nn2
+
+
+ dfs.namenode.http-address.b3.nn1
+ b3m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.b3.nn2
+ b3m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.b3.nn1
+ b3m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.b3.nn2
+ b3m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.b3.nn1
+ b3m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.b3.nn2
+ b3m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.b4
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.b4
+ nn1,nn2
+
+
+ dfs.namenode.http-address.b4.nn1
+ b4m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.b4.nn2
+ b4m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.b4.nn1
+ b4m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.b4.nn2
+ b4m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.b4.nn1
+ b4m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.b4.nn2
+ b4m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.f1
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.f1
+ nn1,nn2
+
+
+ dfs.namenode.http-address.f1.nn1
+ f1m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.f1.nn2
+ f1m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.f1.nn1
+ f1m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.f1.nn2
+ f1m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.f1.nn1
+ f1m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.f1.nn2
+ f1m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.d2
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.d2
+ nn1,nn2
+
+
+ dfs.namenode.http-address.d2.nn1
+ d2m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.d2.nn2
+ d2m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.d2.nn1
+ d2m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.d2.nn2
+ d2m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.d2.nn1
+ d2m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.d2.nn2
+ d2m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.e1
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.e1
+ nn1,nn2
+
+
+ dfs.namenode.http-address.e1.nn1
+ e1m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.e1.nn2
+ e1m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.e1.nn1
+ e1m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.e1.nn2
+ e1m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.e1.nn1
+ e1m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.e1.nn2
+ e1m3.hdp.dc:8020
+
+
+
diff --git a/config/b1e11/yarn-site.xml b/config/b1e11/yarn-site.xml
new file mode 100644
index 0000000..01ee9e5
--- /dev/null
+++ b/config/b1e11/yarn-site.xml
@@ -0,0 +1,1021 @@
+
+
+
+ hadoop.http.cross-origin.allowed-origins
+ regex:.*[.]hdp[.]dc(:\d*)?
+
+
+
+ hadoop.registry.client.auth
+ kerberos
+
+
+
+ hadoop.registry.dns.bind-address
+ 0.0.0.0
+
+
+
+ hadoop.registry.dns.bind-port
+ 5354
+ true
+
+
+
+ hadoop.registry.dns.domain-name
+ ECLD.COM
+
+
+
+ hadoop.registry.dns.enabled
+ true
+
+
+
+ hadoop.registry.dns.zone-mask
+ 255.255.255.0
+
+
+
+ hadoop.registry.dns.zone-subnet
+ 172.17.0.0
+
+
+
+ hadoop.registry.jaas.context
+ Client
+
+
+
+ hadoop.registry.secure
+ true
+
+
+
+ hadoop.registry.system.accounts
+ sasl:yarn,sasl:jhs,sasl:hdfs-b1,sasl:rm,sasl:hive,sasl:spark
+
+
+
+ hadoop.registry.zk.quorum
+ b1m2.hdp.dc:2181,b1m3.hdp.dc:2181,b1m4.hdp.dc:2181,b1m5.hdp.dc:2181,b1m6.hdp.dc:2181
+
+
+
+ manage.include.files
+ false
+
+
+
+ yarn.acl.enable
+ true
+
+
+
+ yarn.admin.acl
+ *
+
+
+
+ yarn.application.classpath
+ $HADOOP_CONF_DIR,/usr/lib/edp/hadoop-3.2.2/share/hadoop/common/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/common/lib/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/hdfs/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/hdfs/lib/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/yarn/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/yarn/lib/*
+
+
+
+ yarn.client.nodemanager-connect.max-wait-ms
+ 60000
+
+
+
+ yarn.client.nodemanager-connect.retry-interval-ms
+ 10000
+
+
+
+ yarn.http.policy
+ HTTP_ONLY
+
+
+
+ yarn.log-aggregation-enable
+ true
+
+
+
+ yarn.log-aggregation.retain-seconds
+ 2592000
+
+
+
+ yarn.log.server.url
+ http://b1e1.hdp.dc:19888/jobhistory/logs
+
+
+
+ yarn.log.server.web-service.url
+ http://b1e1.hdp.dc:8188/ws/v1/applicationhistory
+
+
+
+ yarn.node-labels.enabled
+ false
+
+
+
+ yarn.node-labels.fs-store.retry-policy-spec
+ 2000, 500
+
+
+
+ yarn.node-labels.fs-store.root-dir
+ /system/yarn/node-labels
+
+
+
+ yarn.nodemanager.address
+ 0.0.0.0:45454
+
+
+
+ yarn.nodemanager.admin-env
+ MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX
+
+
+
+ yarn.nodemanager.aux-services
+ mapreduce_shuffle,spark_shuffle
+
+
+
+ yarn.nodemanager.aux-services.mapreduce_shuffle.class
+ org.apache.hadoop.mapred.ShuffleHandler
+
+
+
+ yarn.nodemanager.aux-services.spark_shuffle.class
+ org.apache.spark.network.yarn.YarnShuffleService
+
+
+
+ yarn.nodemanager.aux-services.spark_shuffle.classpath
+ /usr/lib/edp/spark-3.1.1-bin-hadoop3.2/yarn/*
+
+
+
+ yarn.nodemanager.aux-services.timeline_collector.class
+ org.apache.hadoop.yarn.server.timelineservice.collector.PerNodeTimelineCollectorsAuxService
+
+
+
+ yarn.nodemanager.aux-services.sparkv2_shuffle.class
+ org.apache.spark.network.yarn.YarnShuffleService
+
+
+
+ yarn.nodemanager.aux-services.sparkv2_shuffle.classpath
+ /usr/lib/edp/spark-2.4.7-bin-hadoop-3.1.2/yarn/*
+
+
+
+ yarn.nodemanager.bind-host
+ 0.0.0.0
+
+
+
+ yarn.nodemanager.container-executor.class
+ org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor
+
+
+
+ yarn.nodemanager.container-metrics.unregister-delay-ms
+ 60000
+
+
+
+ yarn.nodemanager.container-monitor.interval-ms
+ 3000
+
+
+
+ yarn.nodemanager.delete.debug-delay-sec
+ 86400
+
+
+
+ yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage
+ 90
+
+
+
+ yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb
+ 1000
+
+
+
+ yarn.nodemanager.disk-health-checker.min-healthy-disks
+ 0.25
+
+
+
+ yarn.nodemanager.health-checker.interval-ms
+ 135000
+
+
+
+ yarn.nodemanager.health-checker.script.timeout-ms
+ 60000
+
+
+
+ yarn.nodemanager.keytab
+ /etc/security/keytabs/nm.service.keytab
+
+
+
+ yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage
+ false
+
+
+
+ yarn.nodemanager.linux-container-executor.group
+ hadoop
+
+
+
+ yarn.nodemanager.linux-container-executor.nonsecure-mode.limit-users
+ true
+
+
+
+ yarn.nodemanager.local-dirs
+ /data1/hadoop/yarn/local,/data2/hadoop/yarn/local,/data3/hadoop/yarn/local,/data4/hadoop/yarn/local,/data5/hadoop/yarn/local,/data6/hadoop/yarn/local,/data7/hadoop/yarn/local,/data8/hadoop/yarn/local,/data9/hadoop/yarn/local,/data10/hadoop/yarn/local,/data11/hadoop/yarn/local,/data12/hadoop/yarn/local
+
+
+
+ yarn.nodemanager.log-aggregation.compression-type
+ gz
+
+
+
+ yarn.nodemanager.log-aggregation.debug-enabled
+ false
+
+
+
+ yarn.nodemanager.log-aggregation.num-log-files-per-app
+ 30
+
+
+
+ yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds
+ 3600
+
+
+
+ yarn.nodemanager.log-dirs
+ /data1/hadoop/yarn/log,/data2/hadoop/yarn/log,/data3/hadoop/yarn/log,/data4/hadoop/yarn/log,/data5/hadoop/yarn/log,/data6/hadoop/yarn/log,/data7/hadoop/yarn/log,/data8/hadoop/yarn/log,/data9/hadoop/yarn/log,/data10/hadoop/yarn/log,/data11/hadoop/yarn/log,/data12/hadoop/yarn/log
+
+
+
+ yarn.nodemanager.log.retain-seconds
+ 604800
+
+
+
+ yarn.nodemanager.principal
+ nm/_HOST@ECLD.COM
+
+
+
+ yarn.nodemanager.recovery.dir
+ /var/log/hadoop-yarn/nodemanager/recovery-state
+
+
+
+ yarn.nodemanager.recovery.enabled
+ true
+
+
+
+ yarn.nodemanager.recovery.supervised
+ true
+
+
+
+ yarn.nodemanager.remote-app-log-dir
+ /app-logs
+
+
+
+ yarn.nodemanager.remote-app-log-dir-suffix
+ logs
+
+
+
+ yarn.nodemanager.resource-plugins
+
+
+
+
+ yarn.nodemanager.resource-plugins.gpu.allowed-gpu-devices
+
+
+
+
+ yarn.nodemanager.resource-plugins.gpu.docker-plugin
+
+
+
+
+ yarn.nodemanager.resource-plugins.gpu.docker-plugin.nvidiadocker-v1.endpoint
+
+
+
+
+ yarn.nodemanager.resource-plugins.gpu.path-to-discovery-executables
+
+
+
+
+ yarn.nodemanager.resource.cpu-vcores
+ 39
+
+
+
+ yarn.nodemanager.resource.memory-mb
+ 102400
+
+
+
+ yarn.nodemanager.resource.percentage-physical-cpu-limit
+ 80
+
+
+
+ yarn.nodemanager.resourcemanager.connect.wait.secs
+ 1800
+
+
+
+ yarn.nodemanager.runtime.linux.allowed-runtimes
+ default,docker
+
+
+
+ yarn.nodemanager.runtime.linux.docker.allowed-container-networks
+ host,none,bridge
+
+
+
+ yarn.nodemanager.runtime.linux.docker.capabilities
+
+ CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,
+ SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE
+
+
+
+ yarn.nodemanager.runtime.linux.docker.default-container-network
+ host
+
+
+
+ yarn.nodemanager.runtime.linux.docker.privileged-containers.acl
+
+
+
+
+ yarn.nodemanager.runtime.linux.docker.privileged-containers.allowed
+ false
+
+
+
+ yarn.nodemanager.vmem-check-enabled
+ false
+
+
+
+ yarn.nodemanager.vmem-pmem-ratio
+ 2.1
+
+
+
+ yarn.nodemanager.webapp.cross-origin.enabled
+ true
+
+
+
+ yarn.nodemanager.webapp.spnego-keytab-file
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ yarn.nodemanager.webapp.spnego-principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ yarn.resourcemanager.address
+ b1e2.hdp.dc:8050
+
+
+
+ yarn.resourcemanager.admin.address
+ b1e2.hdp.dc:8141
+
+
+
+ yarn.resourcemanager.am.max-attempts
+ 2
+
+
+
+ yarn.resourcemanager.bind-host
+ 0.0.0.0
+
+
+
+ yarn.resourcemanager.cluster-id
+ yarn-cluster
+
+
+
+ yarn.resourcemanager.connect.max-wait.ms
+ 900000
+
+
+
+ yarn.resourcemanager.connect.retry-interval.ms
+ 30000
+
+
+
+ yarn.resourcemanager.display.per-user-apps
+ true
+
+
+
+ yarn.resourcemanager.fs.state-store.retry-policy-spec
+ 2000, 500
+
+
+
+ yarn.resourcemanager.fs.state-store.uri
+
+
+
+
+ yarn.resourcemanager.ha.automatic-failover.zk-base-path
+ /yarn-leader-election-b1e
+
+
+
+ yarn.resourcemanager.ha.enabled
+ true
+
+
+
+ yarn.resourcemanager.ha.rm-ids
+ rm1,rm2
+
+
+
+ yarn.resourcemanager.hostname
+ b1e2.hdp.dc
+
+
+
+ yarn.resourcemanager.hostname.rm1
+ b1e2.hdp.dc
+
+
+
+ yarn.resourcemanager.hostname.rm2
+ b1e3.hdp.dc
+
+
+
+ yarn.resourcemanager.keytab
+ /etc/security/keytabs/rm.service.keytab
+
+
+
+ yarn.resourcemanager.monitor.capacity.preemption.intra-queue-preemption.enabled
+ true
+
+
+
+ yarn.resourcemanager.monitor.capacity.preemption.monitoring_interval
+ 15000
+
+
+
+ yarn.resourcemanager.monitor.capacity.preemption.natural_termination_factor
+ 1
+
+
+
+ yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round
+ 0.1
+
+
+
+ yarn.resourcemanager.nodes.exclude-path
+ /etc/hadoop/conf/yarn.exclude
+
+
+
+ yarn.resourcemanager.placement-constraints.handler
+ scheduler
+
+
+
+ yarn.resourcemanager.principal
+ rm/_HOST@ECLD.COM
+
+
+
+ yarn.resourcemanager.proxy-user-privileges.enabled
+ true
+
+
+
+ yarn.resourcemanager.proxyuser.*.groups
+
+
+
+
+ yarn.resourcemanager.proxyuser.*.hosts
+
+
+
+
+ yarn.resourcemanager.proxyuser.*.users
+
+
+
+
+ yarn.resourcemanager.recovery.enabled
+ true
+
+
+
+ yarn.resourcemanager.resource-tracker.address
+ b1e2.hdp.dc:8025
+
+
+
+ yarn.resourcemanager.resource-tracker.address.rm1
+ b1e2.hdp.dc:8025
+
+
+
+ yarn.resourcemanager.resource-tracker.address.rm2
+ b1e3.hdp.dc:8025
+
+
+
+ yarn.resourcemanager.scheduler.address
+ b1e2.hdp.dc:8030
+
+
+
+ yarn.resourcemanager.scheduler.class
+ org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
+
+
+
+ yarn.resourcemanager.scheduler.monitor.enable
+ true
+
+
+
+ yarn.resourcemanager.state-store.max-completed-applications
+ ${yarn.resourcemanager.max-completed-applications}
+
+
+
+ yarn.resourcemanager.store.class
+ org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore
+
+
+
+ yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size
+ 10
+
+
+
+ yarn.resourcemanager.system-metrics-publisher.enabled
+ true
+
+
+
+ yarn.resourcemanager.webapp.address
+ b1e2.hdp.dc:8088
+
+
+
+ yarn.resourcemanager.webapp.address.rm1
+ b1e2.hdp.dc:8088
+
+
+
+ yarn.resourcemanager.webapp.address.rm2
+ b1e3.hdp.dc:8088
+
+
+
+ yarn.resourcemanager.webapp.cross-origin.enabled
+ true
+
+
+
+ yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled
+ false
+
+
+
+ yarn.resourcemanager.webapp.https.address
+ b1e2.hdp.dc:8090
+
+
+
+ yarn.resourcemanager.webapp.https.address.rm1
+ b1e2.hdp.dc:8090
+
+
+
+ yarn.resourcemanager.webapp.https.address.rm2
+ b1e3.hdp.dc:8090
+
+
+
+ yarn.resourcemanager.webapp.spnego-keytab-file
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ yarn.resourcemanager.webapp.spnego-principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ yarn.resourcemanager.work-preserving-recovery.enabled
+ true
+
+
+
+ yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms
+ 10000
+
+
+
+ yarn.resourcemanager.zk-acl
+ sasl:rm:rwcda
+
+
+
+ yarn.resourcemanager.zk-address
+ b1m2.hdp.dc:2181,b1m3.hdp.dc:2181,b1m4.hdp.dc:2181,b1m5.hdp.dc:2181,b1m6.hdp.dc:2181
+
+
+
+ yarn.resourcemanager.zk-num-retries
+ 1000
+
+
+
+ yarn.resourcemanager.zk-retry-interval-ms
+ 1000
+
+
+
+ yarn.resourcemanager.zk-state-store.parent-path
+ /rmstore-b1e
+
+
+
+ yarn.resourcemanager.zk-timeout-ms
+ 10000
+
+
+
+ yarn.rm.system-metricspublisher.emit-container-events
+ true
+
+
+
+ yarn.scheduler.capacity.ordering-policy.priority-utilization.underutilized-preemption.enabled
+ true
+
+
+
+ yarn.scheduler.maximum-allocation-mb
+ 2048000
+
+
+
+ yarn.scheduler.maximum-allocation-vcores
+ 39
+
+
+
+ yarn.scheduler.minimum-allocation-mb
+ 128
+
+
+
+ yarn.scheduler.minimum-allocation-vcores
+ 1
+
+
+
+ yarn.service.system-service.dir
+ /services
+
+
+
+ yarn.system-metricspublisher.enabled
+ true
+
+
+
+ yarn.timeline-service.address
+ b1e1.hdp.dc:10200
+
+
+
+ yarn.timeline-service.bind-host
+ 0.0.0.0
+
+
+
+ yarn.timeline-service.client.max-retries
+ 30
+
+
+
+ yarn.timeline-service.client.retry-interval-ms
+ 1000
+
+
+
+ yarn.timeline-service.enabled
+ false
+
+
+
+ yarn.timeline-service.entity-group-fs-store.active-dir
+ /ats/active/
+
+
+
+ yarn.timeline-service.entity-group-fs-store.app-cache-size
+ 10
+
+
+
+ yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds
+ 3600
+
+
+
+ yarn.timeline-service.entity-group-fs-store.done-dir
+ /ats/done/
+
+
+
+ yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes
+ org.apache.hadoop.yarn.applications.distributedshell.DistributedShellTimelinePlugin
+
+
+
+ yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath
+
+
+
+
+ yarn.timeline-service.entity-group-fs-store.retain-seconds
+ 604800
+
+
+
+ yarn.timeline-service.entity-group-fs-store.scan-interval-seconds
+ 60
+
+
+
+ yarn.timeline-service.entity-group-fs-store.summary-store
+ org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore
+
+
+
+ yarn.timeline-service.generic-application-history.save-non-am-container-meta-info
+ false
+
+
+
+ yarn.timeline-service.generic-application-history.store-class
+ org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore
+
+
+
+ yarn.timeline-service.hbase-schema.prefix
+ prod.
+
+
+
+ yarn.timeline-service.hbase.configuration.file
+ file:///etc/hadoop/conf/embedded-yarn-ats-hbase/hbase-site.xml
+
+
+
+ yarn.timeline-service.hbase.coprocessor.jar.hdfs.location
+ file:///usr/lib/edp/hadoop-3.2.2/lib/hadoop-yarn-server-timelineservice-3.2.2.jar
+
+
+
+ yarn.timeline-service.http-authentication.cookie.domain
+
+
+
+
+ yarn.timeline-service.http-authentication.cookie.path
+
+
+
+
+ yarn.timeline-service.http-authentication.kerberos.keytab
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ yarn.timeline-service.http-authentication.kerberos.name.rules
+
+
+
+
+ yarn.timeline-service.http-authentication.kerberos.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ yarn.timeline-service.http-authentication.proxyuser.*.groups
+
+
+
+
+ yarn.timeline-service.http-authentication.proxyuser.*.hosts
+
+
+
+
+ yarn.timeline-service.http-authentication.proxyuser.*.users
+
+
+
+
+ yarn.timeline-service.http-authentication.signature.secret
+
+
+
+
+ yarn.timeline-service.http-authentication.signature.secret.file
+
+
+
+
+ yarn.timeline-service.http-authentication.signer.secret.provider
+
+
+
+
+ yarn.timeline-service.http-authentication.signer.secret.provider.object
+
+
+
+
+ yarn.timeline-service.http-authentication.simple.anonymous.allowed
+ true
+
+
+
+ yarn.timeline-service.http-authentication.token.validity
+
+
+
+
+ yarn.timeline-service.http-authentication.type
+ simple
+
+
+
+ yarn.timeline-service.http-cross-origin.enabled
+ true
+
+
+
+ yarn.timeline-service.keytab
+ /etc/security/keytabs/yarn.service.keytab
+
+
+
+ yarn.timeline-service.leveldb-state-store.path
+ /data/hadoop/yarn/timeline
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.path
+ /data/hadoop/yarn/timeline
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.read-cache-size
+ 104857600
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size
+ 10000
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size
+ 10000
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms
+ 300000
+
+
+
+ yarn.timeline-service.principal
+ yarn/_HOST@ECLD.COM
+
+
+
+ yarn.timeline-service.reader.webapp.address
+ b1e1.hdp.dc:8198
+
+
+
+ yarn.timeline-service.reader.webapp.https.address
+ b1e1.hdp.dc:8199
+
+
+
+ yarn.timeline-service.recovery.enabled
+ true
+
+
+
+ yarn.timeline-service.state-store-class
+ org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore
+
+
+
+ yarn.timeline-service.store-class
+ org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore
+
+
+
+ yarn.timeline-service.ttl-enable
+ true
+
+
+
+ yarn.timeline-service.ttl-ms
+ 2678400000
+
+
+
+ yarn.timeline-service.version
+ 2.0f
+
+
+
+ yarn.timeline-service.versions
+ 1.5f,2.0f
+
+
+
+ yarn.timeline-service.webapp.address
+ b1e1.hdp.dc:8188
+
+
+
+ yarn.timeline-service.webapp.https.address
+ b1e1.hdp.dc:8190
+
+
+
+ yarn.webapp.api-service.enable
+ true
+
+
+
+ yarn.webapp.ui2.enable
+ true
+
+
+
diff --git a/config/b2e1/core-site.xml b/config/b2e1/core-site.xml
new file mode 100644
index 0000000..dbdb62b
--- /dev/null
+++ b/config/b2e1/core-site.xml
@@ -0,0 +1,267 @@
+
+
+
+
+
+ fs.azure.user.agent.prefix
+ User-Agent: APN/1.0 Hortonworks/1.0 HDP/
+
+
+
+ fs.defaultFS
+ hdfs://b2
+ true
+
+
+
+ fs.s3a.fast.upload
+ true
+
+
+
+ fs.s3a.fast.upload.buffer
+ disk
+
+
+
+ fs.s3a.multipart.size
+ 67108864
+
+
+
+ fs.trash.interval
+ 4320
+
+
+
+ fs.trash.checkpoint.interval
+ 360
+
+
+
+ ha.failover-controller.active-standby-elector.zk.op.retries
+ 120
+
+
+
+ ha.zookeeper.acl
+ sasl:nn:rwcda
+
+
+
+ ha.zookeeper.quorum
+ b1m2.hdp.dc:2181,b1m3.hdp.dc:2181,b1m4.hdp.dc:2181,b1m5.hdp.dc:2181,b1m6.hdp.dc:2181
+
+
+
+ hadoop.http.authentication.kerberos.keytab
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ hadoop.http.authentication.kerberos.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ hadoop.http.authentication.signature.secret.file
+ /etc/security/http_secret
+
+
+
+ hadoop.http.authentication.simple.anonymous.allowed
+ true
+
+
+
+ hadoop.http.authentication.type
+ simple
+
+
+
+ hadoop.http.cross-origin.allowed-headers
+ X-Requested-With,Content-Type,Accept,Origin,WWW-Authenticate,Accept-Encoding,Transfer-Encoding
+
+
+
+ hadoop.http.cross-origin.allowed-methods
+ GET,PUT,POST,OPTIONS,HEAD,DELETE
+
+
+
+ hadoop.http.cross-origin.allowed-origins
+ *
+
+
+
+ hadoop.http.cross-origin.max-age
+ 1800
+
+
+
+ hadoop.http.filter.initializers
+ org.apache.hadoop.security.AuthenticationFilterInitializer,org.apache.hadoop.security.HttpCrossOriginFilterInitializer
+
+
+
+ hadoop.proxyuser.hdfs.groups
+ *
+
+
+
+ hadoop.proxyuser.hdfs.hosts
+ *
+
+
+
+ hadoop.proxyuser.hive.groups
+ *
+
+
+
+ hadoop.proxyuser.hive.hosts
+ *
+
+
+
+ hadoop.proxyuser.HTTP.groups
+ *
+
+
+
+ hadoop.proxyuser.HTTP.hosts
+ *
+
+
+
+ hadoop.proxyuser.iap.groups
+ *
+
+
+
+ hadoop.proxyuser.iap.hosts
+ *
+
+
+
+ hadoop.proxyuser.livy.groups
+ *
+
+
+
+ hadoop.proxyuser.livy.hosts
+ *
+
+
+
+ hadoop.proxyuser.yarn.groups
+ *
+
+
+
+ hadoop.proxyuser.yarn.hosts
+ *
+
+
+
+ hadoop.rpc.protection
+ authentication,privacy
+
+
+
+ hadoop.security.auth_to_local
+ RULE:[1:$1@$0](hbase-b1@ECLD.COM)s/.*/hbase/
+RULE:[1:$1@$0](hdfs-b1@ECLD.COM)s/.*/hdfs/
+RULE:[1:$1@$0](spark-b1@ECLD.COM)s/.*/spark/
+RULE:[1:$1@$0](yarn-ats-b1@ECLD.COM)s/.*/yarn-ats/
+RULE:[1:$1@$0](.*@ECLD.COM)s/@.*//
+RULE:[2:$1@$0](dn@ECLD.COM)s/.*/hdfs/
+RULE:[2:$1@$0](hbase@ECLD.COM)s/.*/hbase/
+RULE:[2:$1@$0](hive@ECLD.COM)s/.*/hive/
+RULE:[2:$1@$0](jhs@ECLD.COM)s/.*/mapred/
+RULE:[2:$1@$0](jn@ECLD.COM)s/.*/hdfs/
+RULE:[2:$1@$0](livy@ECLD.COM)s/.*/livy/
+RULE:[2:$1@$0](nm@ECLD.COM)s/.*/yarn/
+RULE:[2:$1@$0](nn@ECLD.COM)s/.*/hdfs/
+RULE:[2:$1@$0](rangeradmin@ECLD.COM)s/.*/ranger/
+RULE:[2:$1@$0](rangerlookup@ECLD.COM)s/.*/ranger/
+RULE:[2:$1@$0](rangertagsync@ECLD.COM)s/.*/rangertagsync/
+RULE:[2:$1@$0](rangerusersync@ECLD.COM)s/.*/rangerusersync/
+RULE:[2:$1@$0](rm@ECLD.COM)s/.*/yarn/
+RULE:[2:$1@$0](spark@ECLD.COM)s/.*/spark/
+RULE:[2:$1@$0](yarn@ECLD.COM)s/.*/yarn/
+RULE:[2:$1@$0](yarn-ats-hbase@ECLD.COM)s/.*/yarn-ats/
+DEFAULT
+
+
+
+ hadoop.security.authentication
+ kerberos
+
+
+
+ hadoop.security.authorization
+ true
+
+
+
+ hadoop.security.instrumentation.requires.admin
+ false
+
+
+
+ io.compression.codec.lzo.class
+ com.hadoop.compression.lzo.LzoCodec
+
+
+
+ io.compression.codecs
+ org.apache.hadoop.io.compress.GzipCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec
+
+
+
+ io.file.buffer.size
+ 131072
+
+
+
+ io.serializations
+ org.apache.hadoop.io.serializer.WritableSerialization
+
+
+
+ ipc.client.connect.max.retries
+ 50
+
+
+
+ ipc.client.connection.maxidletime
+ 30000
+
+
+
+ ipc.client.idlethreshold
+ 8000
+
+
+
+ ipc.server.tcpnodelay
+ true
+
+
+
+ mapreduce.jobtracker.webinterface.trusted
+ false
+
+
+
+ ipc.client.fallback-to-simple-auth-allowed
+ true
+
+
+
+ fs.hdfs.impl.disable.cache
+ true
+
+
+
diff --git a/config/b2e1/hdfs-site.xml b/config/b2e1/hdfs-site.xml
new file mode 100644
index 0000000..9b4eda4
--- /dev/null
+++ b/config/b2e1/hdfs-site.xml
@@ -0,0 +1,698 @@
+
+
+
+ dfs.block.access.token.enable
+ true
+
+
+
+ dfs.blockreport.initialDelay
+ 120
+
+
+
+ dfs.blocksize
+ 134217728
+
+
+
+ dfs.client.failover.proxy.provider.b1
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+
+ dfs.client.failover.proxy.provider.b2
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+
+ dfs.client.read.shortcircuit
+ true
+
+
+
+ dfs.client.read.shortcircuit.streams.cache.size
+ 4096
+
+
+
+ dfs.client.retry.policy.enabled
+ false
+
+
+
+ dfs.cluster.administrators
+ hdfs
+
+
+
+ dfs.content-summary.limit
+ 5000
+
+
+
+ dfs.data.transfer.protection
+ authentication,privacy
+
+
+
+ dfs.datanode.address
+ 0.0.0.0:1019
+
+
+
+ dfs.datanode.balance.bandwidthPerSec
+ 6250000
+
+
+
+ dfs.datanode.data.dir
+ [DISK]file:///data1/hadoop/hdfs/data,[DISK]file:///data2/hadoop/hdfs/data,[DISK]file:///data3/hadoop/hdfs/data,[DISK]file:///data4/hadoop/hdfs/data,[DISK]file:///data5/hadoop/hdfs/data,[DISK]file:///data6/hadoop/hdfs/data,[DISK]file:///data7/hadoop/hdfs/data,[DISK]file:///data8/hadoop/hdfs/data,[DISK]file:///data9/hadoop/hdfs/data,[DISK]file:///data10/hadoop/hdfs/data,[DISK]file:///data11/hadoop/hdfs/data,[DISK]file:///data12/hadoop/hdfs/data
+ true
+
+
+
+ dfs.datanode.data.dir.perm
+ 750
+
+
+
+ dfs.datanode.du.reserved
+ 26405499904
+
+
+
+ dfs.datanode.failed.volumes.tolerated
+ 2
+ true
+
+
+
+ dfs.datanode.http.address
+ 0.0.0.0:1022
+
+
+
+ dfs.datanode.https.address
+ 0.0.0.0:50475
+
+
+
+ dfs.datanode.ipc.address
+ 0.0.0.0:8010
+
+
+
+ dfs.datanode.kerberos.principal
+ dn/_HOST@ECLD.COM
+
+
+
+ dfs.datanode.keytab.file
+ /etc/security/keytabs/dn.service.keytab
+
+
+
+ dfs.datanode.max.transfer.threads
+ 16384
+
+
+
+ dfs.domain.socket.path
+ /var/lib/hadoop-hdfs/dn_socket
+
+
+
+ dfs.encrypt.data.transfer.cipher.suites
+ AES/CTR/NoPadding
+
+
+
+ dfs.ha.automatic-failover.enabled
+ true
+
+
+
+ dfs.ha.fencing.methods
+ shell(/bin/true)
+
+
+
+ dfs.ha.namenodes.b1
+ nn1,nn2
+
+
+
+ dfs.ha.namenodes.b2
+ nn3,nn4
+
+
+
+ dfs.heartbeat.interval
+ 3
+
+
+
+ dfs.hosts.exclude
+ /etc/hadoop/conf/dfs.exclude
+
+
+
+ dfs.http.policy
+ HTTP_ONLY
+
+
+
+ dfs.https.port
+ 50470
+
+
+
+ dfs.internal.nameservices
+ b1,b2
+
+
+
+ dfs.journalnode.edits.dir.b1
+ /data2/hadoop/hdfs/journal
+
+
+
+ dfs.journalnode.edits.dir.b2
+ /data2/hadoop/hdfs/journal
+
+
+
+ dfs.journalnode.http-address
+ 0.0.0.0:8480
+
+
+
+ dfs.journalnode.https-address
+ 0.0.0.0:8481
+
+
+
+ dfs.journalnode.kerberos.internal.spnego.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ dfs.journalnode.kerberos.principal
+ jn/_HOST@ECLD.COM
+
+
+
+ dfs.journalnode.keytab.file
+ /etc/security/keytabs/jn.service.keytab
+
+
+
+ dfs.namenode.accesstime.precision
+ 0
+
+
+
+ dfs.namenode.acls.enabled
+ true
+
+
+
+ dfs.namenode.audit.log.async
+ true
+
+
+
+ dfs.namenode.avoid.read.stale.datanode
+ true
+
+
+
+ dfs.namenode.avoid.write.stale.datanode
+ true
+
+
+
+ dfs.namenode.checkpoint.dir
+ /data/hadoop/hdfs/namesecondary
+
+
+
+ dfs.namenode.checkpoint.edits.dir
+ ${dfs.namenode.checkpoint.dir}
+
+
+
+ dfs.namenode.checkpoint.period
+ 21600
+
+
+
+ dfs.namenode.checkpoint.txns
+ 1000000
+
+
+
+ dfs.namenode.fslock.fair
+ false
+
+
+
+ dfs.namenode.handler.count
+ 200
+
+
+
+ dfs.namenode.http-address.b1.nn1
+ b1m2.hdp.dc:50070
+
+
+
+ dfs.namenode.http-address.b1.nn2
+ b1m3.hdp.dc:50070
+
+
+
+ dfs.namenode.http-address.b2.nn3
+ b1m5.hdp.dc:50070
+
+
+
+ dfs.namenode.http-address.b2.nn4
+ b1m6.hdp.dc:50070
+
+
+
+ dfs.namenode.https-address.b1.nn1
+ b1m2.hdp.dc:50470
+
+
+
+ dfs.namenode.https-address.b1.nn2
+ b1m3.hdp.dc:50470
+
+
+
+ dfs.namenode.https-address.b2.nn3
+ b1m5.hdp.dc:50470
+
+
+
+ dfs.namenode.https-address.b2.nn4
+ b1m6.hdp.dc:50470
+
+
+
+ dfs.namenode.kerberos.internal.spnego.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ dfs.namenode.kerberos.principal
+ nn/_HOST@ECLD.COM
+
+
+
+ dfs.namenode.keytab.file
+ /etc/security/keytabs/nn.service.keytab
+
+
+
+ dfs.namenode.max.extra.edits.segments.retained
+ 180
+
+
+
+ dfs.namenode.name.dir
+ /data1/hadoop/hdfs/namenode,/data2/hadoop/hdfs/namenode
+ true
+
+
+
+ dfs.namenode.name.dir.restore
+ true
+
+
+
+ dfs.namenode.num.extra.edits.retained
+ 18000
+
+
+
+ dfs.namenode.rpc-address.b1.nn1
+ b1m2.hdp.dc:8020
+
+
+
+ dfs.namenode.rpc-address.b1.nn2
+ b1m3.hdp.dc:8020
+
+
+
+ dfs.namenode.rpc-address.b2.nn3
+ b1m5.hdp.dc:8020
+
+
+
+ dfs.namenode.rpc-address.b2.nn4
+ b1m6.hdp.dc:8020
+
+
+
+ dfs.namenode.safemode.threshold-pct
+ 0.99
+
+
+
+ dfs.namenode.shared.edits.dir.b1
+ qjournal://b1m2.hdp.dc:8485;b1m3.hdp.dc:8485;b1m5.hdp.dc:8485/b1
+
+
+
+ dfs.namenode.shared.edits.dir.b2
+ qjournal://b1m2.hdp.dc:8485;b1m3.hdp.dc:8485;b1m5.hdp.dc:8485/b2
+
+
+
+ dfs.namenode.stale.datanode.interval
+ 30000
+
+
+
+ dfs.namenode.startup.delay.block.deletion.sec
+ 3600
+
+
+
+ dfs.namenode.write.stale.datanode.ratio
+ 1.0f
+
+
+
+ dfs.nameservices
+ b1,b2,b3,b4,a3,a4,f1,d2,e1
+
+
+
+ dfs.permissions.ContentSummary.subAccess
+ true
+
+
+
+ dfs.permissions.enabled
+ true
+
+
+
+ dfs.permissions.superusergroup
+ hdfs
+
+
+
+ dfs.replication
+ 3
+
+
+
+ dfs.replication.max
+ 50
+
+
+
+ dfs.web.authentication.kerberos.keytab
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ dfs.web.authentication.kerberos.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ dfs.webhdfs.enabled
+ true
+ true
+
+
+
+ fs.permissions.umask-mode
+ 022
+
+
+
+ hadoop.caller.context.enabled
+ true
+
+
+
+ manage.include.files
+ false
+
+
+
+ nfs.exports.allowed.hosts
+ * rw
+
+
+
+ nfs.file.dump.dir
+ /tmp/.hdfs-nfs
+
+
+
+ dfs.client.datanode-restart.timeout
+ 30
+
+
+
+ dfs.client.failover.proxy.provider.a4
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.a4
+ nn1,nn2
+
+
+ dfs.namenode.http-address.a4.nn1
+ a4m1.hdp.dc:50070
+
+
+ dfs.namenode.http-address.a4.nn2
+ a4m2.hdp.dc:50070
+
+
+ dfs.namenode.https-address.a4.nn1
+ a4m1.hdp.dc:50470
+
+
+ dfs.namenode.https-address.a4.nn2
+ a4m2.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.a4.nn1
+ a4m1.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.a4.nn2
+ a4m2.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.a3
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.a3
+ nn1,nn2
+
+
+ dfs.namenode.http-address.a3.nn1
+ a3m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.a3.nn2
+ a3m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.a3.nn1
+ a3m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.a3.nn2
+ a3m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.a3.nn1
+ a3m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.a3.nn2
+ a3m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.b3
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.b3
+ nn1,nn2
+
+
+ dfs.namenode.http-address.b3.nn1
+ b3m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.b3.nn2
+ b3m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.b3.nn1
+ b3m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.b3.nn2
+ b3m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.b3.nn1
+ b3m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.b3.nn2
+ b3m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.b4
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.b4
+ nn1,nn2
+
+
+ dfs.namenode.http-address.b4.nn1
+ b4m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.b4.nn2
+ b4m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.b4.nn1
+ b4m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.b4.nn2
+ b4m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.b4.nn1
+ b4m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.b4.nn2
+ b4m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.f1
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.f1
+ nn1,nn2
+
+
+ dfs.namenode.http-address.f1.nn1
+ f1m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.f1.nn2
+ f1m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.f1.nn1
+ f1m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.f1.nn2
+ f1m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.f1.nn1
+ f1m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.f1.nn2
+ f1m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.d2
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.d2
+ nn1,nn2
+
+
+ dfs.namenode.http-address.d2.nn1
+ d2m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.d2.nn2
+ d2m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.d2.nn1
+ d2m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.d2.nn2
+ d2m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.d2.nn1
+ d2m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.d2.nn2
+ d2m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.e1
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.e1
+ nn1,nn2
+
+
+ dfs.namenode.http-address.e1.nn1
+ e1m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.e1.nn2
+ e1m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.e1.nn1
+ e1m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.e1.nn2
+ e1m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.e1.nn1
+ e1m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.e1.nn2
+ e1m3.hdp.dc:8020
+
+
+
diff --git a/config/b2e1/yarn-site.xml b/config/b2e1/yarn-site.xml
new file mode 100644
index 0000000..407d1f5
--- /dev/null
+++ b/config/b2e1/yarn-site.xml
@@ -0,0 +1,1026 @@
+
+
+
+ hadoop.http.cross-origin.allowed-origins
+ regex:.*[.]hdp[.]dc(:\d*)?
+
+
+
+ hadoop.registry.client.auth
+ kerberos
+
+
+
+ hadoop.registry.dns.bind-address
+ 0.0.0.0
+
+
+
+ hadoop.registry.dns.bind-port
+ 5354
+ true
+
+
+
+ hadoop.registry.dns.domain-name
+ ECLD.COM
+
+
+
+ hadoop.registry.dns.enabled
+ true
+
+
+
+ hadoop.registry.dns.zone-mask
+ 255.255.255.0
+
+
+
+ hadoop.registry.dns.zone-subnet
+ 172.17.0.0
+
+
+
+ hadoop.registry.jaas.context
+ Client
+
+
+
+ hadoop.registry.secure
+ true
+
+
+
+ hadoop.registry.system.accounts
+ sasl:yarn,sasl:jhs,sasl:hdfs-b1,sasl:rm,sasl:hive,sasl:spark
+
+
+
+ hadoop.registry.zk.quorum
+ b1m2.hdp.dc:2181,b1m3.hdp.dc:2181,b1m4.hdp.dc:2181,b1m5.hdp.dc:2181,b1m6.hdp.dc:2181
+
+
+
+ manage.include.files
+ false
+
+
+
+ yarn.acl.enable
+ true
+
+
+
+ yarn.admin.acl
+ *
+
+
+
+ yarn.application.classpath
+ $HADOOP_CONF_DIR,/usr/lib/edp/hadoop-3.2.2/share/hadoop/common/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/common/lib/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/hdfs/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/hdfs/lib/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/yarn/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/yarn/lib/*
+
+
+
+ yarn.client.nodemanager-connect.max-wait-ms
+ 60000
+
+
+
+ yarn.client.nodemanager-connect.retry-interval-ms
+ 10000
+
+
+
+ yarn.http.policy
+ HTTP_ONLY
+
+
+
+ yarn.log-aggregation-enable
+ true
+
+
+
+ yarn.log-aggregation.retain-seconds
+ 2592000
+
+
+
+ yarn.log.server.url
+ http://b1m4.hdp.dc:19888/jobhistory/logs
+
+
+
+ yarn.log.server.web-service.url
+ http://b1m4.hdp.dc:8188/ws/v1/applicationhistory
+
+
+
+ yarn.node-labels.enabled
+ false
+
+
+
+ yarn.node-labels.fs-store.retry-policy-spec
+ 2000, 500
+
+
+
+ yarn.node-labels.fs-store.root-dir
+ /system/yarn/node-labels
+
+
+
+ yarn.nodemanager.address
+ 0.0.0.0:45454
+
+
+
+ yarn.nodemanager.admin-env
+ MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX
+
+
+
+ yarn.nodemanager.aux-services
+ mapreduce_shuffle,spark_shuffle,timeline_collector,sparkv2_shuffle
+
+
+
+ yarn.nodemanager.aux-services.mapreduce_shuffle.class
+ org.apache.hadoop.mapred.ShuffleHandler
+
+
+
+ yarn.nodemanager.aux-services.spark_shuffle.class
+ org.apache.spark.network.yarn.YarnShuffleService
+
+
+
+ yarn.nodemanager.aux-services.spark_shuffle.classpath
+ /usr/lib/edp/spark-3.1.1-bin-hadoop3.2/yarn/*
+
+
+
+ yarn.nodemanager.aux-services.timeline_collector.class
+ org.apache.hadoop.yarn.server.timelineservice.collector.PerNodeTimelineCollectorsAuxService
+
+
+
+ yarn.nodemanager.aux-services.sparkv2_shuffle.class
+ org.apache.spark.network.yarn.YarnShuffleService
+
+
+
+ yarn.nodemanager.aux-services.sparkv2_shuffle.classpath
+ /usr/lib/edp/spark-2.4.7-bin-hadoop-3.1.2/yarn/*
+
+
+
+ yarn.nodemanager.bind-host
+ 0.0.0.0
+
+
+
+ yarn.nodemanager.container-executor.class
+ org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor
+
+
+
+ yarn.nodemanager.container-metrics.unregister-delay-ms
+ 60000
+
+
+
+ yarn.nodemanager.container-monitor.interval-ms
+ 3000
+
+
+
+ yarn.nodemanager.delete.debug-delay-sec
+ 86400
+
+
+
+ yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage
+ 90
+
+
+
+ yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb
+ 1000
+
+
+
+ yarn.nodemanager.disk-health-checker.min-healthy-disks
+ 0.25
+
+
+
+ yarn.nodemanager.health-checker.interval-ms
+ 135000
+
+
+
+ yarn.nodemanager.health-checker.script.timeout-ms
+ 60000
+
+
+
+ yarn.nodemanager.keytab
+ /etc/security/keytabs/nm.service.keytab
+
+
+
+ yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage
+ false
+
+
+
+ yarn.nodemanager.linux-container-executor.group
+ hadoop
+
+
+
+ yarn.nodemanager.linux-container-executor.nonsecure-mode.limit-users
+ true
+
+
+
+ yarn.nodemanager.local-dirs
+ /data1/hadoop/yarn/local,/data2/hadoop/yarn/local,/data3/hadoop/yarn/local,/data4/hadoop/yarn/local,/data5/hadoop/yarn/local,/data6/hadoop/yarn/local,/data7/hadoop/yarn/local,/data8/hadoop/yarn/local,/data9/hadoop/yarn/local,/data10/hadoop/yarn/local,/data11/hadoop/yarn/local,/data12/hadoop/yarn/local
+
+
+
+ yarn.nodemanager.log-aggregation.compression-type
+ gz
+
+
+
+ yarn.nodemanager.log-aggregation.debug-enabled
+ false
+
+
+
+ yarn.nodemanager.log-aggregation.num-log-files-per-app
+ 30
+
+
+
+ yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds
+ 3600
+
+
+
+ yarn.nodemanager.log-dirs
+ /data1/hadoop/yarn/log,/data2/hadoop/yarn/log,/data3/hadoop/yarn/log,/data4/hadoop/yarn/log,/data5/hadoop/yarn/log,/data6/hadoop/yarn/log,/data7/hadoop/yarn/log,/data8/hadoop/yarn/log,/data9/hadoop/yarn/log,/data10/hadoop/yarn/log,/data11/hadoop/yarn/log,/data12/hadoop/yarn/log
+
+
+
+ yarn.nodemanager.log.retain-seconds
+ 604800
+
+
+
+ yarn.nodemanager.principal
+ nm/_HOST@ECLD.COM
+
+
+
+ yarn.nodemanager.recovery.dir
+ /var/log/hadoop-yarn/nodemanager/recovery-state
+
+
+
+ yarn.nodemanager.recovery.enabled
+ true
+
+
+
+ yarn.nodemanager.recovery.supervised
+ true
+
+
+
+ yarn.nodemanager.remote-app-log-dir
+ /app-logs
+
+
+
+ yarn.nodemanager.remote-app-log-dir-suffix
+ logs
+
+
+
+ yarn.nodemanager.resource-plugins
+
+
+
+
+ yarn.nodemanager.resource-plugins.gpu.allowed-gpu-devices
+
+
+
+
+ yarn.nodemanager.resource-plugins.gpu.docker-plugin
+
+
+
+
+ yarn.nodemanager.resource-plugins.gpu.docker-plugin.nvidiadocker-v1.endpoint
+
+
+
+
+ yarn.nodemanager.resource-plugins.gpu.path-to-discovery-executables
+
+
+
+
+ yarn.nodemanager.resource.cpu-vcores
+ 39
+
+
+
+ yarn.nodemanager.resource.memory-mb
+ 184320
+
+
+
+ yarn.nodemanager.resource.percentage-physical-cpu-limit
+ 80
+
+
+
+ yarn.nodemanager.resourcemanager.connect.wait.secs
+ 1800
+
+
+
+ yarn.nodemanager.runtime.linux.allowed-runtimes
+ default,docker
+
+
+
+ yarn.nodemanager.runtime.linux.docker.allowed-container-networks
+ host,none,bridge
+
+
+
+ yarn.nodemanager.runtime.linux.docker.capabilities
+
+ CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,
+ SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE
+
+
+
+ yarn.nodemanager.runtime.linux.docker.default-container-network
+ host
+
+
+
+ yarn.nodemanager.runtime.linux.docker.privileged-containers.acl
+
+
+
+
+ yarn.nodemanager.runtime.linux.docker.privileged-containers.allowed
+ false
+
+
+
+ yarn.nodemanager.vmem-check-enabled
+ false
+
+
+
+ yarn.nodemanager.vmem-pmem-ratio
+ 2.1
+
+
+
+ yarn.nodemanager.webapp.cross-origin.enabled
+ true
+
+
+
+ yarn.nodemanager.webapp.spnego-keytab-file
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ yarn.nodemanager.webapp.spnego-principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ yarn.resourcemanager.address
+ b1m2.hdp.dc:8050
+
+
+
+ yarn.resourcemanager.admin.address
+ b1m2.hdp.dc:8141
+
+
+
+ yarn.resourcemanager.am.max-attempts
+ 2
+
+
+
+ yarn.resourcemanager.bind-host
+ 0.0.0.0
+
+
+
+ yarn.resourcemanager.cluster-id
+ yarn-cluster
+
+
+
+ yarn.resourcemanager.connect.max-wait.ms
+ 900000
+
+
+
+ yarn.resourcemanager.connect.retry-interval.ms
+ 30000
+
+
+
+ yarn.resourcemanager.display.per-user-apps
+ true
+
+
+
+ yarn.resourcemanager.fs.state-store.retry-policy-spec
+ 2000, 500
+
+
+
+ yarn.resourcemanager.fs.state-store.uri
+
+
+
+
+ yarn.resourcemanager.ha.automatic-failover.zk-base-path
+ /yarn-leader-election
+
+
+
+ yarn.resourcemanager.ha.enabled
+ true
+
+
+
+ yarn.resourcemanager.ha.rm-ids
+ rm1,rm2
+
+
+
+ yarn.resourcemanager.hostname
+ b1m2.hdp.dc
+
+
+
+ yarn.resourcemanager.hostname.rm1
+ b1m2.hdp.dc
+
+
+
+ yarn.resourcemanager.hostname.rm2
+ b1m3.hdp.dc
+
+
+
+ yarn.resourcemanager.keytab
+ /etc/security/keytabs/rm.service.keytab
+
+
+
+ yarn.resourcemanager.monitor.capacity.preemption.intra-queue-preemption.enabled
+ true
+
+
+
+ yarn.resourcemanager.monitor.capacity.preemption.monitoring_interval
+ 15000
+
+
+
+ yarn.resourcemanager.monitor.capacity.preemption.natural_termination_factor
+ 1
+
+
+
+ yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round
+ 0.1
+
+
+
+ yarn.resourcemanager.nodes.exclude-path
+ /etc/hadoop/conf/yarn.exclude
+
+
+
+ yarn.resourcemanager.placement-constraints.handler
+ scheduler
+
+
+
+ yarn.resourcemanager.principal
+ rm/_HOST@ECLD.COM
+
+
+
+ yarn.resourcemanager.proxy-user-privileges.enabled
+ true
+
+
+
+ yarn.resourcemanager.proxyuser.*.groups
+
+
+
+
+ yarn.resourcemanager.proxyuser.*.hosts
+
+
+
+
+ yarn.resourcemanager.proxyuser.*.users
+
+
+
+
+ yarn.resourcemanager.recovery.enabled
+ true
+
+
+
+ yarn.resourcemanager.resource-tracker.address
+ b1m2.hdp.dc:8025
+
+
+
+ yarn.resourcemanager.resource-tracker.address.rm1
+ b1m2.hdp.dc:8025
+
+
+
+ yarn.resourcemanager.resource-tracker.address.rm2
+ b1m3.hdp.dc:8025
+
+
+
+ yarn.resourcemanager.scheduler.address
+ b1m2.hdp.dc:8030
+
+
+
+ yarn.resourcemanager.scheduler.class
+ org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
+
+
+
+ yarn.resourcemanager.scheduler.monitor.enable
+ true
+
+
+
+ yarn.resourcemanager.state-store.max-completed-applications
+ ${yarn.resourcemanager.max-completed-applications}
+
+
+
+ yarn.resourcemanager.store.class
+ org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore
+
+
+
+ yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size
+ 10
+
+
+
+ yarn.resourcemanager.system-metrics-publisher.enabled
+ true
+
+
+
+ yarn.resourcemanager.webapp.address
+ b1m2.hdp.dc:8088
+
+
+
+ yarn.resourcemanager.webapp.address.rm1
+ b1m2.hdp.dc:8088
+
+
+
+ yarn.resourcemanager.webapp.address.rm2
+ b1m3.hdp.dc:8088
+
+
+
+ yarn.resourcemanager.webapp.cross-origin.enabled
+ true
+
+
+
+ yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled
+ false
+
+
+
+ yarn.resourcemanager.webapp.https.address
+ b1m2.hdp.dc:8090
+
+
+
+ yarn.resourcemanager.webapp.https.address.rm1
+ b1m2.hdp.dc:8090
+
+
+
+ yarn.resourcemanager.webapp.https.address.rm2
+ b1m3.hdp.dc:8090
+
+
+
+ yarn.resourcemanager.webapp.spnego-keytab-file
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ yarn.resourcemanager.webapp.spnego-principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ yarn.resourcemanager.work-preserving-recovery.enabled
+ true
+
+
+
+ yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms
+ 10000
+
+
+
+ yarn.resourcemanager.zk-acl
+ sasl:rm:rwcda
+
+
+
+ yarn.resourcemanager.zk-address
+ b1m2.hdp.dc:2181,b1m3.hdp.dc:2181,b1m4.hdp.dc:2181,b1m5.hdp.dc:2181,b1m6.hdp.dc:2181
+
+
+
+ yarn.resourcemanager.zk-num-retries
+ 1000
+
+
+
+ yarn.resourcemanager.zk-retry-interval-ms
+ 1000
+
+
+
+ yarn.resourcemanager.zk-state-store.parent-path
+ /rmstore
+
+
+
+ yarn.resourcemanager.zk-timeout-ms
+ 10000
+
+
+
+ yarn.rm.system-metricspublisher.emit-container-events
+ true
+
+
+
+ yarn.scheduler.capacity.ordering-policy.priority-utilization.underutilized-preemption.enabled
+ true
+
+
+
+ yarn.scheduler.maximum-allocation-mb
+ 40960
+
+
+
+ yarn.scheduler.maximum-allocation-vcores
+ 51
+
+
+
+ yarn.scheduler.minimum-allocation-mb
+ 1024
+
+
+
+ yarn.scheduler.minimum-allocation-vcores
+ 1
+
+
+
+ yarn.service.system-service.dir
+ /services
+
+
+
+ yarn.system-metricspublisher.enabled
+ true
+
+
+
+ yarn.timeline-service.address
+ b1m4.hdp.dc:10200
+
+
+
+ yarn.timeline-service.bind-host
+ 0.0.0.0
+
+
+
+ yarn.timeline-service.client.max-retries
+ 30
+
+
+
+ yarn.timeline-service.client.retry-interval-ms
+ 1000
+
+
+
+ yarn.timeline-service.enabled
+ false
+
+
+
+ yarn.timeline-service.entity-group-fs-store.active-dir
+ /ats/active/
+
+
+
+ yarn.timeline-service.entity-group-fs-store.app-cache-size
+ 10
+
+
+
+ yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds
+ 3600
+
+
+
+ yarn.timeline-service.entity-group-fs-store.done-dir
+ /ats/done/
+
+
+
+ yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes
+ org.apache.hadoop.yarn.applications.distributedshell.DistributedShellTimelinePlugin
+
+
+
+ yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath
+
+
+
+
+ yarn.timeline-service.entity-group-fs-store.retain-seconds
+ 604800
+
+
+
+ yarn.timeline-service.entity-group-fs-store.scan-interval-seconds
+ 60
+
+
+
+ yarn.timeline-service.entity-group-fs-store.summary-store
+ org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore
+
+
+
+ yarn.timeline-service.generic-application-history.save-non-am-container-meta-info
+ false
+
+
+
+ yarn.timeline-service.generic-application-history.store-class
+ org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore
+
+
+
+ yarn.timeline-service.hbase-schema.prefix
+ prod.
+
+
+
+ yarn.timeline-service.hbase.configuration.file
+ file:///etc/hadoop/conf/embedded-yarn-ats-hbase/hbase-site.xml
+
+
+
+ yarn.timeline-service.hbase.coprocessor.jar.hdfs.location
+ file:///usr/lib/edp/hadoop-3.2.2/lib/hadoop-yarn-server-timelineservice-3.2.2.jar
+
+
+
+ yarn.timeline-service.http-authentication.cookie.domain
+
+
+
+
+ yarn.timeline-service.http-authentication.cookie.path
+
+
+
+
+ yarn.timeline-service.http-authentication.kerberos.keytab
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ yarn.timeline-service.http-authentication.kerberos.name.rules
+
+
+
+
+ yarn.timeline-service.http-authentication.kerberos.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ yarn.timeline-service.http-authentication.proxyuser.*.groups
+
+
+
+
+ yarn.timeline-service.http-authentication.proxyuser.*.hosts
+
+
+
+
+ yarn.timeline-service.http-authentication.proxyuser.*.users
+
+
+
+
+ yarn.timeline-service.http-authentication.signature.secret
+
+
+
+
+ yarn.timeline-service.http-authentication.signature.secret.file
+
+
+
+
+ yarn.timeline-service.http-authentication.signer.secret.provider
+
+
+
+
+ yarn.timeline-service.http-authentication.signer.secret.provider.object
+
+
+
+
+ yarn.timeline-service.http-authentication.simple.anonymous.allowed
+ true
+
+
+
+ yarn.timeline-service.http-authentication.token.validity
+
+
+
+
+ yarn.timeline-service.http-authentication.type
+ simple
+
+
+
+ yarn.timeline-service.http-cross-origin.enabled
+ true
+
+
+
+ yarn.timeline-service.keytab
+ /etc/security/keytabs/yarn.service.keytab
+
+
+
+ yarn.timeline-service.leveldb-state-store.path
+ /data/hadoop/yarn/timeline
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.path
+ /data/hadoop/yarn/timeline
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.read-cache-size
+ 104857600
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size
+ 10000
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size
+ 10000
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms
+ 300000
+
+
+
+ yarn.timeline-service.principal
+ yarn/_HOST@ECLD.COM
+
+
+
+ yarn.timeline-service.reader.webapp.address
+ b1m4.hdp.dc:8198
+
+
+
+ yarn.timeline-service.reader.webapp.https.address
+ b1m4.hdp.dc:8199
+
+
+
+ yarn.timeline-service.recovery.enabled
+ true
+
+
+
+ yarn.timeline-service.state-store-class
+ org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore
+
+
+
+ yarn.timeline-service.store-class
+ org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore
+
+
+
+ yarn.timeline-service.ttl-enable
+ true
+
+
+
+ yarn.timeline-service.ttl-ms
+ 2678400000
+
+
+
+ yarn.timeline-service.version
+ 2.0f
+
+
+
+ yarn.timeline-service.versions
+ 1.5f,2.0f
+
+
+
+ yarn.timeline-service.webapp.address
+ b1m4.hdp.dc:8188
+
+
+
+ yarn.timeline-service.webapp.https.address
+ b1m4.hdp.dc:8190
+
+
+
+ yarn.webapp.api-service.enable
+ true
+
+
+
+ yarn.webapp.ui2.enable
+ true
+
+
+
+ yarn.resourcemanager.max-completed-applications
+ 10000
+
+
+
diff --git a/config/b2s119/core-site.xml b/config/b2s119/core-site.xml
new file mode 100644
index 0000000..fecd70c
--- /dev/null
+++ b/config/b2s119/core-site.xml
@@ -0,0 +1,267 @@
+
+
+
+
+
+ fs.azure.user.agent.prefix
+ User-Agent: APN/1.0 Hortonworks/1.0 HDP/
+
+
+
+ fs.defaultFS
+ hdfs://b2
+ true
+
+
+
+ fs.s3a.fast.upload
+ true
+
+
+
+ fs.s3a.fast.upload.buffer
+ disk
+
+
+
+ fs.s3a.multipart.size
+ 67108864
+
+
+
+ fs.trash.interval
+ 4320
+
+
+
+ fs.trash.checkpoint.interval
+ 360
+
+
+
+ ha.failover-controller.active-standby-elector.zk.op.retries
+ 120
+
+
+
+ ha.zookeeper.acl
+ sasl:nn:rwcda
+
+
+
+ ha.zookeeper.quorum
+ b5m1.hdp.dc:2181,b5m2.hdp.dc:2181,b5m3.hdp.dc:2181
+
+
+
+ hadoop.http.authentication.kerberos.keytab
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ hadoop.http.authentication.kerberos.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ hadoop.http.authentication.signature.secret.file
+ /etc/security/http_secret
+
+
+
+ hadoop.http.authentication.simple.anonymous.allowed
+ true
+
+
+
+ hadoop.http.authentication.type
+ simple
+
+
+
+ hadoop.http.cross-origin.allowed-headers
+ X-Requested-With,Content-Type,Accept,Origin,WWW-Authenticate,Accept-Encoding,Transfer-Encoding
+
+
+
+ hadoop.http.cross-origin.allowed-methods
+ GET,PUT,POST,OPTIONS,HEAD,DELETE
+
+
+
+ hadoop.http.cross-origin.allowed-origins
+ *
+
+
+
+ hadoop.http.cross-origin.max-age
+ 1800
+
+
+
+ hadoop.http.filter.initializers
+ org.apache.hadoop.security.AuthenticationFilterInitializer,org.apache.hadoop.security.HttpCrossOriginFilterInitializer
+
+
+
+ hadoop.proxyuser.hdfs.groups
+ *
+
+
+
+ hadoop.proxyuser.hdfs.hosts
+ *
+
+
+
+ hadoop.proxyuser.hive.groups
+ *
+
+
+
+ hadoop.proxyuser.hive.hosts
+ *
+
+
+
+ hadoop.proxyuser.HTTP.groups
+ *
+
+
+
+ hadoop.proxyuser.HTTP.hosts
+ *
+
+
+
+ hadoop.proxyuser.iap.groups
+ *
+
+
+
+ hadoop.proxyuser.iap.hosts
+ *
+
+
+
+ hadoop.proxyuser.livy.groups
+ *
+
+
+
+ hadoop.proxyuser.livy.hosts
+ *
+
+
+
+ hadoop.proxyuser.yarn.groups
+ *
+
+
+
+ hadoop.proxyuser.yarn.hosts
+ *
+
+
+
+ hadoop.rpc.protection
+ authentication,privacy
+
+
+
+ hadoop.security.auth_to_local
+ RULE:[1:$1@$0](hbase-b5@ECLD.COM)s/.*/hbase/
+RULE:[1:$1@$0](hdfs-b5@ECLD.COM)s/.*/hdfs/
+RULE:[1:$1@$0](spark-b5@ECLD.COM)s/.*/spark/
+RULE:[1:$1@$0](yarn-ats-b5@ECLD.COM)s/.*/yarn-ats/
+RULE:[1:$1@$0](.*@ECLD.COM)s/@.*//
+RULE:[2:$1@$0](dn@ECLD.COM)s/.*/hdfs/
+RULE:[2:$1@$0](hbase@ECLD.COM)s/.*/hbase/
+RULE:[2:$1@$0](hive@ECLD.COM)s/.*/hive/
+RULE:[2:$1@$0](jhs@ECLD.COM)s/.*/mapred/
+RULE:[2:$1@$0](jn@ECLD.COM)s/.*/hdfs/
+RULE:[2:$1@$0](livy@ECLD.COM)s/.*/livy/
+RULE:[2:$1@$0](nm@ECLD.COM)s/.*/yarn/
+RULE:[2:$1@$0](nn@ECLD.COM)s/.*/hdfs/
+RULE:[2:$1@$0](rangeradmin@ECLD.COM)s/.*/ranger/
+RULE:[2:$1@$0](rangerlookup@ECLD.COM)s/.*/ranger/
+RULE:[2:$1@$0](rangertagsync@ECLD.COM)s/.*/rangertagsync/
+RULE:[2:$1@$0](rangerusersync@ECLD.COM)s/.*/rangerusersync/
+RULE:[2:$1@$0](rm@ECLD.COM)s/.*/yarn/
+RULE:[2:$1@$0](spark@ECLD.COM)s/.*/spark/
+RULE:[2:$1@$0](yarn@ECLD.COM)s/.*/yarn/
+RULE:[2:$1@$0](yarn-ats-hbase@ECLD.COM)s/.*/yarn-ats/
+DEFAULT
+
+
+
+ hadoop.security.authentication
+ kerberos
+
+
+
+ hadoop.security.authorization
+ true
+
+
+
+ hadoop.security.instrumentation.requires.admin
+ false
+
+
+
+ io.compression.codec.lzo.class
+ com.hadoop.compression.lzo.LzoCodec
+
+
+
+ io.compression.codecs
+ org.apache.hadoop.io.compress.GzipCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec
+
+
+
+ io.file.buffer.size
+ 131072
+
+
+
+ io.serializations
+ org.apache.hadoop.io.serializer.WritableSerialization
+
+
+
+ ipc.client.connect.max.retries
+ 50
+
+
+
+ ipc.client.connection.maxidletime
+ 30000
+
+
+
+ ipc.client.idlethreshold
+ 8000
+
+
+
+ ipc.server.tcpnodelay
+ true
+
+
+
+ mapreduce.jobtracker.webinterface.trusted
+ false
+
+
+
+ ipc.client.fallback-to-simple-auth-allowed
+ true
+
+
+
+ fs.hdfs.impl.disable.cache
+ true
+
+
+
diff --git a/config/b2s119/hdfs-site.xml b/config/b2s119/hdfs-site.xml
new file mode 100644
index 0000000..089155b
--- /dev/null
+++ b/config/b2s119/hdfs-site.xml
@@ -0,0 +1,713 @@
+
+
+
+ dfs.block.access.token.enable
+ true
+
+
+
+ dfs.blockreport.initialDelay
+ 120
+
+
+
+ dfs.blocksize
+ 134217728
+
+
+
+ dfs.client.failover.proxy.provider.b5
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+
+ dfs.client.read.shortcircuit
+ true
+
+
+
+ dfs.client.read.shortcircuit.streams.cache.size
+ 4096
+
+
+
+ dfs.client.retry.policy.enabled
+ false
+
+
+
+ dfs.cluster.administrators
+ hdfs
+
+
+
+ dfs.content-summary.limit
+ 5000
+
+
+
+ dfs.data.transfer.protection
+ authentication,privacy
+
+
+
+ dfs.datanode.address
+ 0.0.0.0:1019
+
+
+
+ dfs.datanode.balance.bandwidthPerSec
+ 6250000
+
+
+
+ dfs.datanode.data.dir
+ [DISK]file:///data1/hadoop/hdfs/data,[DISK]file:///data2/hadoop/hdfs/data,[DISK]file:///data3/hadoop/hdfs/data,[DISK]file:///data4/hadoop/hdfs/data,[DISK]file:///data5/hadoop/hdfs/data,[DISK]file:///data6/hadoop/hdfs/data,[DISK]file:///data7/hadoop/hdfs/data,[DISK]file:///data8/hadoop/hdfs/data,[DISK]file:///data9/hadoop/hdfs/data,[DISK]file:///data10/hadoop/hdfs/data,[DISK]file:///data11/hadoop/hdfs/data,[DISK]file:///data12/hadoop/hdfs/data,[DISK]file:///data13/hadoop/hdfs/data,[DISK]file:///data14/hadoop/hdfs/data,[DISK]file:///data15/hadoop/hdfs/data,[DISK]file:///data16/hadoop/hdfs/data,[DISK]file:///data17/hadoop/hdfs/data,[DISK]file:///data18/hadoop/hdfs/data,[DISK]file:///data19/hadoop/hdfs/data,[DISK]file:///data20/hadoop/hdfs/data,[DISK]file:///data21/hadoop/hdfs/data,[DISK]file:///data22/hadoop/hdfs/data,[DISK]file:///data23/hadoop/hdfs/data,[DISK]file:///data24/hadoop/hdfs/data
+ true
+
+
+
+ dfs.datanode.data.dir.perm
+ 750
+
+
+
+ dfs.datanode.du.reserved
+ 26405499904
+
+
+
+ dfs.datanode.failed.volumes.tolerated
+ 2
+ true
+
+
+
+ dfs.datanode.http.address
+ 0.0.0.0:1022
+
+
+
+ dfs.datanode.https.address
+ 0.0.0.0:50475
+
+
+
+ dfs.datanode.ipc.address
+ 0.0.0.0:8010
+
+
+
+ dfs.datanode.kerberos.principal
+ dn/_HOST@ECLD.COM
+
+
+
+ dfs.datanode.keytab.file
+ /etc/security/keytabs/dn.service.keytab
+
+
+
+ dfs.datanode.max.transfer.threads
+ 16384
+
+
+
+ dfs.domain.socket.path
+ /var/lib/hadoop-hdfs/dn_socket
+
+
+
+ dfs.encrypt.data.transfer.cipher.suites
+ AES/CTR/NoPadding
+
+
+
+ dfs.ha.automatic-failover.enabled
+ true
+
+
+
+ dfs.ha.fencing.methods
+ shell(/bin/true)
+
+
+
+ dfs.ha.namenodes.b5
+ nn1,nn2
+
+
+
+ dfs.heartbeat.interval
+ 3
+
+
+
+ dfs.hosts.exclude
+ /etc/hadoop/conf/dfs.exclude
+
+
+
+ dfs.http.policy
+ HTTP_ONLY
+
+
+
+ dfs.https.port
+ 50470
+
+
+
+ dfs.internal.nameservices
+ b5
+
+
+
+ dfs.journalnode.edits.dir.b5
+ /data2/hadoop/hdfs/journal
+
+
+
+ dfs.journalnode.http-address
+ 0.0.0.0:8480
+
+
+
+ dfs.journalnode.https-address
+ 0.0.0.0:8481
+
+
+
+ dfs.journalnode.kerberos.internal.spnego.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ dfs.journalnode.kerberos.principal
+ jn/_HOST@ECLD.COM
+
+
+
+ dfs.journalnode.keytab.file
+ /etc/security/keytabs/jn.service.keytab
+
+
+
+ dfs.namenode.accesstime.precision
+ 0
+
+
+
+ dfs.namenode.acls.enabled
+ true
+
+
+
+ dfs.namenode.audit.log.async
+ true
+
+
+
+ dfs.namenode.avoid.read.stale.datanode
+ true
+
+
+
+ dfs.namenode.avoid.write.stale.datanode
+ true
+
+
+
+ dfs.namenode.checkpoint.dir
+ /data/hadoop/hdfs/namesecondary
+
+
+
+ dfs.namenode.checkpoint.edits.dir
+ ${dfs.namenode.checkpoint.dir}
+
+
+
+ dfs.namenode.checkpoint.period
+ 21600
+
+
+
+ dfs.namenode.checkpoint.txns
+ 1000000
+
+
+
+ dfs.namenode.fslock.fair
+ false
+
+
+
+ dfs.namenode.handler.count
+ 100
+
+
+
+ dfs.namenode.http-address.b5.nn1
+ b5m2.hdp.dc:50070
+
+
+
+ dfs.namenode.http-address.b5.nn2
+ b5m3.hdp.dc:50070
+
+
+
+ dfs.namenode.https-address.b5.nn1
+ b5m2.hdp.dc:50470
+
+
+
+ dfs.namenode.https-address.b5.nn2
+ b5m3.hdp.dc:50470
+
+
+
+ dfs.namenode.kerberos.internal.spnego.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ dfs.namenode.kerberos.principal
+ nn/_HOST@ECLD.COM
+
+
+
+ dfs.namenode.keytab.file
+ /etc/security/keytabs/nn.service.keytab
+
+
+
+ dfs.namenode.max.extra.edits.segments.retained
+ 180
+
+
+
+ dfs.namenode.name.dir
+ /data1/hadoop/hdfs/namenode,/data2/hadoop/hdfs/namenode
+ true
+
+
+
+ dfs.namenode.name.dir.restore
+ true
+
+
+
+ dfs.namenode.num.extra.edits.retained
+ 18000
+
+
+
+ dfs.namenode.rpc-address.b5.nn1
+ b5m2.hdp.dc:8020
+
+
+
+ dfs.namenode.rpc-address.b5.nn2
+ b5m3.hdp.dc:8020
+
+
+
+ dfs.namenode.safemode.threshold-pct
+ 0.99
+
+
+
+ dfs.namenode.shared.edits.dir.b5
+ qjournal://b5m1.hdp.dc:8485;b5m2.hdp.dc:8485;b5m3.hdp.dc:8485/b5
+
+
+
+ dfs.namenode.stale.datanode.interval
+ 30000
+
+
+
+ dfs.namenode.startup.delay.block.deletion.sec
+ 3600
+
+
+
+ dfs.namenode.write.stale.datanode.ratio
+ 1.0f
+
+
+
+ dfs.nameservices
+ b5,b1,b2,b3,b4,a3,a4,f1,e1,d2
+
+
+
+ dfs.permissions.ContentSummary.subAccess
+ true
+
+
+
+ dfs.permissions.enabled
+ true
+
+
+
+ dfs.permissions.superusergroup
+ hdfs
+
+
+
+ dfs.replication
+ 3
+
+
+
+ dfs.replication.max
+ 50
+
+
+
+ dfs.web.authentication.kerberos.keytab
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ dfs.web.authentication.kerberos.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ dfs.webhdfs.enabled
+ true
+ true
+
+
+
+ fs.permissions.umask-mode
+ 022
+
+
+
+ hadoop.caller.context.enabled
+ true
+
+
+
+ manage.include.files
+ false
+
+
+
+ nfs.exports.allowed.hosts
+ * rw
+
+
+
+ nfs.file.dump.dir
+ /tmp/.hdfs-nfs
+
+
+
+ dfs.client.datanode-restart.timeout
+ 30
+
+
+
+ dfs.client.failover.proxy.provider.a4
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.a4
+ nn1,nn2
+
+
+ dfs.namenode.http-address.a4.nn1
+ a4m1.hdp.dc:50070
+
+
+ dfs.namenode.http-address.a4.nn2
+ a4m2.hdp.dc:50070
+
+
+ dfs.namenode.https-address.a4.nn1
+ a4m1.hdp.dc:50470
+
+
+ dfs.namenode.https-address.a4.nn2
+ a4m2.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.a4.nn1
+ a4m1.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.a4.nn2
+ a4m2.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.a3
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.a3
+ nn1,nn2
+
+
+ dfs.namenode.http-address.a3.nn1
+ a3m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.a3.nn2
+ a3m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.a3.nn1
+ a3m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.a3.nn2
+ a3m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.a3.nn1
+ a3m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.a3.nn2
+ a3m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.b3
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.b3
+ nn1,nn2
+
+
+ dfs.namenode.http-address.b3.nn1
+ b3m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.b3.nn2
+ b3m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.b3.nn1
+ b3m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.b3.nn2
+ b3m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.b3.nn1
+ b3m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.b3.nn2
+ b3m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.b1
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.client.failover.proxy.provider.b2
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.b1
+ nn1,nn2
+
+
+ dfs.ha.namenodes.b2
+ nn3,nn4
+
+
+ dfs.namenode.http-address.b1.nn1
+ b1m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.b1.nn2
+ b1m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.b1.nn1
+ b1m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.b1.nn2
+ b1m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.b1.nn1
+ b1m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.b1.nn2
+ b1m3.hdp.dc:8020
+
+
+ dfs.namenode.http-address.b2.nn3
+ b1m5.hdp.dc:50070
+
+
+ dfs.namenode.http-address.b2.nn4
+ b1m6.hdp.dc:50070
+
+
+ dfs.namenode.https-address.b2.nn3
+ b1m5.hdp.dc:50470
+
+
+ dfs.namenode.https-address.b2.nn4
+ b1m6.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.b2.nn3
+ b1m5.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.b2.nn4
+ b1m6.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.f1
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.f1
+ nn1,nn2
+
+
+ dfs.namenode.http-address.f1.nn1
+ f1m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.f1.nn2
+ f1m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.f1.nn1
+ f1m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.f1.nn2
+ f1m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.f1.nn1
+ f1m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.f1.nn2
+ f1m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.d2
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.d2
+ nn1,nn2
+
+
+ dfs.namenode.http-address.d2.nn1
+ d2m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.d2.nn2
+ d2m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.d2.nn1
+ d2m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.d2.nn2
+ d2m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.d2.nn1
+ d2m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.d2.nn2
+ d2m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.e1
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.e1
+ nn1,nn2
+
+
+ dfs.namenode.http-address.e1.nn1
+ e1m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.e1.nn2
+ e1m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.e1.nn1
+ e1m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.e1.nn2
+ e1m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.e1.nn1
+ e1m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.e1.nn2
+ e1m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.b4
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.b4
+ nn1,nn2
+
+
+ dfs.namenode.http-address.b4.nn1
+ b4m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.b4.nn2
+ b4m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.b4.nn1
+ b4m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.b4.nn2
+ b4m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.b4.nn1
+ b4m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.b4.nn2
+ b4m3.hdp.dc:8020
+
+
+
diff --git a/config/b2s119/yarn-site.xml b/config/b2s119/yarn-site.xml
new file mode 100644
index 0000000..e2ab884
--- /dev/null
+++ b/config/b2s119/yarn-site.xml
@@ -0,0 +1,1026 @@
+
+
+
+ hadoop.http.cross-origin.allowed-origins
+ regex:.*[.]hdp[.]dc(:\d*)?
+
+
+
+ hadoop.registry.client.auth
+ kerberos
+
+
+
+ hadoop.registry.dns.bind-address
+ 0.0.0.0
+
+
+
+ hadoop.registry.dns.bind-port
+ 5354
+ true
+
+
+
+ hadoop.registry.dns.domain-name
+ ECLD.COM
+
+
+
+ hadoop.registry.dns.enabled
+ true
+
+
+
+ hadoop.registry.dns.zone-mask
+ 255.255.255.0
+
+
+
+ hadoop.registry.dns.zone-subnet
+ 172.17.0.0
+
+
+
+ hadoop.registry.jaas.context
+ Client
+
+
+
+ hadoop.registry.secure
+ true
+
+
+
+ hadoop.registry.system.accounts
+ sasl:yarn,sasl:jhs,sasl:hdfs-b5,sasl:rm,sasl:hive,sasl:spark
+
+
+
+ hadoop.registry.zk.quorum
+ b5m1.hdp.dc:2181,b5m2.hdp.dc:2181,b5m3.hdp.dc:2181
+
+
+
+ manage.include.files
+ false
+
+
+
+ yarn.acl.enable
+ true
+
+
+
+ yarn.admin.acl
+ *
+
+
+
+ yarn.application.classpath
+ $HADOOP_CONF_DIR,/usr/lib/edp/hadoop-3.2.2/share/hadoop/common/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/common/lib/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/hdfs/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/hdfs/lib/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/yarn/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/yarn/lib/*
+
+
+
+ yarn.client.nodemanager-connect.max-wait-ms
+ 60000
+
+
+
+ yarn.client.nodemanager-connect.retry-interval-ms
+ 10000
+
+
+
+ yarn.http.policy
+ HTTP_ONLY
+
+
+
+ yarn.log-aggregation-enable
+ true
+
+
+
+ yarn.log-aggregation.retain-seconds
+ 2592000
+
+
+
+ yarn.log.server.url
+ http://b5s119.hdp.dc:19888/jobhistory/logs
+
+
+
+ yarn.log.server.web-service.url
+ http://b5s119.hdp.dc:8188/ws/v1/applicationhistory
+
+
+
+ yarn.node-labels.enabled
+ false
+
+
+
+ yarn.node-labels.fs-store.retry-policy-spec
+ 2000, 500
+
+
+
+ yarn.node-labels.fs-store.root-dir
+ /system/yarn/node-labels
+
+
+
+ yarn.nodemanager.address
+ 0.0.0.0:45454
+
+
+
+ yarn.nodemanager.admin-env
+ MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX
+
+
+
+ yarn.nodemanager.aux-services
+ mapreduce_shuffle,spark_shuffle
+
+
+
+ yarn.nodemanager.aux-services.mapreduce_shuffle.class
+ org.apache.hadoop.mapred.ShuffleHandler
+
+
+
+ yarn.nodemanager.aux-services.spark_shuffle.class
+ org.apache.spark.network.yarn.YarnShuffleService
+
+
+
+ yarn.nodemanager.aux-services.spark_shuffle.classpath
+ /usr/lib/edp/spark-3.1.1-bin-hadoop3.2/yarn/*
+
+
+
+ yarn.nodemanager.aux-services.timeline_collector.class
+ org.apache.hadoop.yarn.server.timelineservice.collector.PerNodeTimelineCollectorsAuxService
+
+
+
+ yarn.nodemanager.aux-services.sparkv2_shuffle.class
+ org.apache.spark.network.yarn.YarnShuffleService
+
+
+
+ yarn.nodemanager.aux-services.sparkv2_shuffle.classpath
+ /usr/lib/edp/spark-2.4.7-bin-hadoop-3.1.2/yarn/*
+
+
+
+ yarn.nodemanager.bind-host
+ 0.0.0.0
+
+
+
+ yarn.nodemanager.container-executor.class
+ org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor
+
+
+
+ yarn.nodemanager.container-metrics.unregister-delay-ms
+ 60000
+
+
+
+ yarn.nodemanager.container-monitor.interval-ms
+ 3000
+
+
+
+ yarn.nodemanager.delete.debug-delay-sec
+ 86400
+
+
+
+ yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage
+ 90
+
+
+
+ yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb
+ 1000
+
+
+
+ yarn.nodemanager.disk-health-checker.min-healthy-disks
+ 0.25
+
+
+
+ yarn.nodemanager.health-checker.interval-ms
+ 135000
+
+
+
+ yarn.nodemanager.health-checker.script.timeout-ms
+ 60000
+
+
+
+ yarn.nodemanager.keytab
+ /etc/security/keytabs/nm.service.keytab
+
+
+
+ yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage
+ false
+
+
+
+ yarn.nodemanager.linux-container-executor.group
+ hadoop
+
+
+
+ yarn.nodemanager.linux-container-executor.nonsecure-mode.limit-users
+ true
+
+
+
+ yarn.nodemanager.local-dirs
+ /data1/hadoop/yarn/local,/data2/hadoop/yarn/local,/data3/hadoop/yarn/local,/data4/hadoop/yarn/local,/data5/hadoop/yarn/local,/data6/hadoop/yarn/local,/data7/hadoop/yarn/local,/data8/hadoop/yarn/local,/data9/hadoop/yarn/local,/data10/hadoop/yarn/local,/data11/hadoop/yarn/local,/data12/hadoop/yarn/local,/data13/hadoop/yarn/local,/data14/hadoop/yarn/local,/data15/hadoop/yarn/local,/data16/hadoop/yarn/local,/data17/hadoop/yarn/local,/data18/hadoop/yarn/local,/data19/hadoop/yarn/local,/data20/hadoop/yarn/local,/data21/hadoop/yarn/local,/data22/hadoop/yarn/local,/data23/hadoop/yarn/local,/data24/hadoop/yarn/local
+
+
+
+ yarn.nodemanager.log-aggregation.compression-type
+ gz
+
+
+
+ yarn.nodemanager.log-aggregation.debug-enabled
+ false
+
+
+
+ yarn.nodemanager.log-aggregation.num-log-files-per-app
+ 30
+
+
+
+ yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds
+ 3600
+
+
+
+ yarn.nodemanager.log-dirs
+ /data1/hadoop/yarn/log,/data2/hadoop/yarn/log,/data3/hadoop/yarn/log,/data4/hadoop/yarn/log,/data5/hadoop/yarn/log,/data6/hadoop/yarn/log,/data7/hadoop/yarn/log,/data8/hadoop/yarn/log,/data9/hadoop/yarn/log,/data10/hadoop/yarn/log,/data11/hadoop/yarn/log,/data12/hadoop/yarn/log,/data13/hadoop/yarn/log,/data14/hadoop/yarn/log,/data15/hadoop/yarn/log,/data16/hadoop/yarn/log,/data17/hadoop/yarn/log,/data18/hadoop/yarn/log,/data19/hadoop/yarn/log,/data20/hadoop/yarn/log,/data21/hadoop/yarn/log,/data22/hadoop/yarn/log,/data23/hadoop/yarn/log,/data24/hadoop/yarn/log
+
+
+
+ yarn.nodemanager.log.retain-seconds
+ 604800
+
+
+
+ yarn.nodemanager.principal
+ nm/_HOST@ECLD.COM
+
+
+
+ yarn.nodemanager.recovery.dir
+ /var/log/hadoop-yarn/nodemanager/recovery-state
+
+
+
+ yarn.nodemanager.recovery.enabled
+ true
+
+
+
+ yarn.nodemanager.recovery.supervised
+ true
+
+
+
+ yarn.nodemanager.remote-app-log-dir
+ /app-logs
+
+
+
+ yarn.nodemanager.remote-app-log-dir-suffix
+ logs
+
+
+
+ yarn.nodemanager.resource-plugins
+
+
+
+
+ yarn.nodemanager.resource-plugins.gpu.allowed-gpu-devices
+
+
+
+
+ yarn.nodemanager.resource-plugins.gpu.docker-plugin
+
+
+
+
+ yarn.nodemanager.resource-plugins.gpu.docker-plugin.nvidiadocker-v1.endpoint
+
+
+
+
+ yarn.nodemanager.resource-plugins.gpu.path-to-discovery-executables
+
+
+
+
+ yarn.nodemanager.resource.cpu-vcores
+ 70
+
+
+
+ yarn.nodemanager.resource.memory-mb
+ 348160
+
+
+
+ yarn.nodemanager.resource.percentage-physical-cpu-limit
+ 80
+
+
+
+ yarn.nodemanager.resourcemanager.connect.wait.secs
+ 1800
+
+
+
+ yarn.nodemanager.runtime.linux.allowed-runtimes
+ default,docker
+
+
+
+ yarn.nodemanager.runtime.linux.docker.allowed-container-networks
+ host,none,bridge
+
+
+
+ yarn.nodemanager.runtime.linux.docker.capabilities
+
+ CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,
+ SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE
+
+
+
+ yarn.nodemanager.runtime.linux.docker.default-container-network
+ host
+
+
+
+ yarn.nodemanager.runtime.linux.docker.privileged-containers.acl
+
+
+
+
+ yarn.nodemanager.runtime.linux.docker.privileged-containers.allowed
+ false
+
+
+
+ yarn.nodemanager.vmem-check-enabled
+ false
+
+
+
+ yarn.nodemanager.vmem-pmem-ratio
+ 2.1
+
+
+
+ yarn.nodemanager.webapp.cross-origin.enabled
+ true
+
+
+
+ yarn.nodemanager.webapp.spnego-keytab-file
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ yarn.nodemanager.webapp.spnego-principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ yarn.resourcemanager.address
+ b5s120.hdp.dc:8050
+
+
+
+ yarn.resourcemanager.admin.address
+ b5s120.hdp.dc:8141
+
+
+
+ yarn.resourcemanager.am.max-attempts
+ 2
+
+
+
+ yarn.resourcemanager.bind-host
+ 0.0.0.0
+
+
+
+ yarn.resourcemanager.cluster-id
+ yarn-cluster
+
+
+
+ yarn.resourcemanager.connect.max-wait.ms
+ 900000
+
+
+
+ yarn.resourcemanager.connect.retry-interval.ms
+ 30000
+
+
+
+ yarn.resourcemanager.display.per-user-apps
+ true
+
+
+
+ yarn.resourcemanager.fs.state-store.retry-policy-spec
+ 2000, 500
+
+
+
+ yarn.resourcemanager.fs.state-store.uri
+
+
+
+
+ yarn.resourcemanager.ha.automatic-failover.zk-base-path
+ /yarn-leader-election-b5hudi
+
+
+
+ yarn.resourcemanager.ha.enabled
+ true
+
+
+
+ yarn.resourcemanager.ha.rm-ids
+ rm1,rm2
+
+
+
+ yarn.resourcemanager.hostname
+ b5s120.hdp.dc
+
+
+
+ yarn.resourcemanager.hostname.rm1
+ b5s120.hdp.dc
+
+
+
+ yarn.resourcemanager.hostname.rm2
+ b5s121.hdp.dc
+
+
+
+ yarn.resourcemanager.keytab
+ /etc/security/keytabs/rm.service.keytab
+
+
+
+ yarn.resourcemanager.monitor.capacity.preemption.intra-queue-preemption.enabled
+ true
+
+
+
+ yarn.resourcemanager.monitor.capacity.preemption.monitoring_interval
+ 15000
+
+
+
+ yarn.resourcemanager.monitor.capacity.preemption.natural_termination_factor
+ 1
+
+
+
+ yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round
+ 0.1
+
+
+
+ yarn.resourcemanager.nodes.exclude-path
+ /etc/hadoop/conf/yarn.exclude
+
+
+
+ yarn.resourcemanager.placement-constraints.handler
+ scheduler
+
+
+
+ yarn.resourcemanager.principal
+ rm/_HOST@ECLD.COM
+
+
+
+ yarn.resourcemanager.proxy-user-privileges.enabled
+ true
+
+
+
+ yarn.resourcemanager.proxyuser.*.groups
+
+
+
+
+ yarn.resourcemanager.proxyuser.*.hosts
+
+
+
+
+ yarn.resourcemanager.proxyuser.*.users
+
+
+
+
+ yarn.resourcemanager.recovery.enabled
+ true
+
+
+
+ yarn.resourcemanager.resource-tracker.address
+ b5s120.hdp.dc:8025
+
+
+
+ yarn.resourcemanager.resource-tracker.address.rm1
+ b5s120.hdp.dc:8025
+
+
+
+ yarn.resourcemanager.resource-tracker.address.rm2
+ b5s121.hdp.dc:8025
+
+
+
+ yarn.resourcemanager.scheduler.address
+ b5s120.hdp.dc:8030
+
+
+
+ yarn.resourcemanager.scheduler.class
+ org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
+
+
+
+ yarn.resourcemanager.scheduler.monitor.enable
+ true
+
+
+
+ yarn.resourcemanager.max-completed-applications
+ 10000
+
+
+
+ yarn.resourcemanager.state-store.max-completed-applications
+ ${yarn.resourcemanager.max-completed-applications}
+
+
+
+ yarn.resourcemanager.store.class
+ org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore
+
+
+
+ yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size
+ 10
+
+
+
+ yarn.resourcemanager.system-metrics-publisher.enabled
+ true
+
+
+
+ yarn.resourcemanager.webapp.address
+ b5s120.hdp.dc:8088
+
+
+
+ yarn.resourcemanager.webapp.address.rm1
+ b5s120.hdp.dc:8088
+
+
+
+ yarn.resourcemanager.webapp.address.rm2
+ b5s121.hdp.dc:8088
+
+
+
+ yarn.resourcemanager.webapp.cross-origin.enabled
+ true
+
+
+
+ yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled
+ false
+
+
+
+ yarn.resourcemanager.webapp.https.address
+ b5s120.hdp.dc:8090
+
+
+
+ yarn.resourcemanager.webapp.https.address.rm1
+ b5s120.hdp.dc:8090
+
+
+
+ yarn.resourcemanager.webapp.https.address.rm2
+ b5s121.hdp.dc:8090
+
+
+
+ yarn.resourcemanager.webapp.spnego-keytab-file
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ yarn.resourcemanager.webapp.spnego-principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ yarn.resourcemanager.work-preserving-recovery.enabled
+ true
+
+
+
+ yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms
+ 10000
+
+
+
+ yarn.resourcemanager.zk-acl
+ sasl:rm:rwcda
+
+
+
+ yarn.resourcemanager.zk-address
+ b5m1.hdp.dc:2181,b5m2.hdp.dc:2181,b5m3.hdp.dc:2181
+
+
+
+ yarn.resourcemanager.zk-num-retries
+ 1000
+
+
+
+ yarn.resourcemanager.zk-retry-interval-ms
+ 1000
+
+
+
+ yarn.resourcemanager.zk-state-store.parent-path
+ /rmstore-b5hudi
+
+
+
+ yarn.resourcemanager.zk-timeout-ms
+ 10000
+
+
+
+ yarn.rm.system-metricspublisher.emit-container-events
+ true
+
+
+
+ yarn.scheduler.capacity.ordering-policy.priority-utilization.underutilized-preemption.enabled
+ true
+
+
+
+ yarn.scheduler.maximum-allocation-mb
+ 81920
+
+
+
+ yarn.scheduler.maximum-allocation-vcores
+ 70
+
+
+
+ yarn.scheduler.minimum-allocation-mb
+ 1024
+
+
+
+ yarn.scheduler.minimum-allocation-vcores
+ 1
+
+
+
+ yarn.service.system-service.dir
+ /services
+
+
+
+ yarn.system-metricspublisher.enabled
+ true
+
+
+
+ yarn.timeline-service.address
+ b5s119.hdp.dc:10200
+
+
+
+ yarn.timeline-service.bind-host
+ 0.0.0.0
+
+
+
+ yarn.timeline-service.client.max-retries
+ 30
+
+
+
+ yarn.timeline-service.client.retry-interval-ms
+ 1000
+
+
+
+ yarn.timeline-service.enabled
+ false
+
+
+
+ yarn.timeline-service.entity-group-fs-store.active-dir
+ /ats/active/
+
+
+
+ yarn.timeline-service.entity-group-fs-store.app-cache-size
+ 10
+
+
+
+ yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds
+ 3600
+
+
+
+ yarn.timeline-service.entity-group-fs-store.done-dir
+ /ats/done/
+
+
+
+ yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes
+ org.apache.hadoop.yarn.applications.distributedshell.DistributedShellTimelinePlugin
+
+
+
+ yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath
+
+
+
+
+ yarn.timeline-service.entity-group-fs-store.retain-seconds
+ 604800
+
+
+
+ yarn.timeline-service.entity-group-fs-store.scan-interval-seconds
+ 60
+
+
+
+ yarn.timeline-service.entity-group-fs-store.summary-store
+ org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore
+
+
+
+ yarn.timeline-service.generic-application-history.save-non-am-container-meta-info
+ false
+
+
+
+ yarn.timeline-service.generic-application-history.store-class
+ org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore
+
+
+
+ yarn.timeline-service.hbase-schema.prefix
+ prod.
+
+
+
+ yarn.timeline-service.hbase.configuration.file
+ file:///etc/hadoop/conf/embedded-yarn-ats-hbase/hbase-site.xml
+
+
+
+ yarn.timeline-service.hbase.coprocessor.jar.hdfs.location
+ file:///usr/lib/edp/hadoop-3.2.2/lib/hadoop-yarn-server-timelineservice-3.2.2.jar
+
+
+
+ yarn.timeline-service.http-authentication.cookie.domain
+
+
+
+
+ yarn.timeline-service.http-authentication.cookie.path
+
+
+
+
+ yarn.timeline-service.http-authentication.kerberos.keytab
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ yarn.timeline-service.http-authentication.kerberos.name.rules
+
+
+
+
+ yarn.timeline-service.http-authentication.kerberos.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ yarn.timeline-service.http-authentication.proxyuser.*.groups
+
+
+
+
+ yarn.timeline-service.http-authentication.proxyuser.*.hosts
+
+
+
+
+ yarn.timeline-service.http-authentication.proxyuser.*.users
+
+
+
+
+ yarn.timeline-service.http-authentication.signature.secret
+
+
+
+
+ yarn.timeline-service.http-authentication.signature.secret.file
+
+
+
+
+ yarn.timeline-service.http-authentication.signer.secret.provider
+
+
+
+
+ yarn.timeline-service.http-authentication.signer.secret.provider.object
+
+
+
+
+ yarn.timeline-service.http-authentication.simple.anonymous.allowed
+ true
+
+
+
+ yarn.timeline-service.http-authentication.token.validity
+
+
+
+
+ yarn.timeline-service.http-authentication.type
+ simple
+
+
+
+ yarn.timeline-service.http-cross-origin.enabled
+ true
+
+
+
+ yarn.timeline-service.keytab
+ /etc/security/keytabs/yarn.service.keytab
+
+
+
+ yarn.timeline-service.leveldb-state-store.path
+ /data1/hadoop/yarn/timeline
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.path
+ /data1/hadoop/yarn/timeline
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.read-cache-size
+ 104857600
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size
+ 10000
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size
+ 10000
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms
+ 300000
+
+
+
+ yarn.timeline-service.principal
+ yarn/_HOST@ECLD.COM
+
+
+
+ yarn.timeline-service.reader.webapp.address
+ b5s119.hdp.dc:8198
+
+
+
+ yarn.timeline-service.reader.webapp.https.address
+ b5s119.hdp.dc:8199
+
+
+
+ yarn.timeline-service.recovery.enabled
+ true
+
+
+
+ yarn.timeline-service.state-store-class
+ org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore
+
+
+
+ yarn.timeline-service.store-class
+ org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore
+
+
+
+ yarn.timeline-service.ttl-enable
+ true
+
+
+
+ yarn.timeline-service.ttl-ms
+ 2678400000
+
+
+
+ yarn.timeline-service.version
+ 2.0f
+
+
+
+ yarn.timeline-service.versions
+ 1.5f,2.0f
+
+
+
+ yarn.timeline-service.webapp.address
+ b5s119.hdp.dc:8188
+
+
+
+ yarn.timeline-service.webapp.https.address
+ b5s119.hdp.dc:8190
+
+
+
+ yarn.webapp.api-service.enable
+ true
+
+
+
+ yarn.webapp.ui2.enable
+ true
+
+
+
diff --git a/config/b5s119/core-site.xml b/config/b5s119/core-site.xml
new file mode 100644
index 0000000..9d72427
--- /dev/null
+++ b/config/b5s119/core-site.xml
@@ -0,0 +1,262 @@
+
+
+
+
+
+ fs.azure.user.agent.prefix
+ User-Agent: APN/1.0 Hortonworks/1.0 HDP/
+
+
+
+ fs.defaultFS
+ hdfs://b1
+ true
+
+
+
+ fs.s3a.fast.upload
+ true
+
+
+
+ fs.s3a.fast.upload.buffer
+ disk
+
+
+
+ fs.s3a.multipart.size
+ 67108864
+
+
+
+ fs.trash.interval
+ 360
+
+
+
+ ha.failover-controller.active-standby-elector.zk.op.retries
+ 120
+
+
+
+ ha.zookeeper.acl
+ sasl:nn:rwcda
+
+
+
+ ha.zookeeper.quorum
+ b1m2.hdp.dc:2181,b1m3.hdp.dc:2181,b1m4.hdp.dc:2181,b1m5.hdp.dc:2181,b1m6.hdp.dc:2181
+
+
+
+ hadoop.http.authentication.kerberos.keytab
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ hadoop.http.authentication.kerberos.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ hadoop.http.authentication.signature.secret.file
+ /etc/security/http_secret
+
+
+
+ hadoop.http.authentication.simple.anonymous.allowed
+ true
+
+
+
+ hadoop.http.authentication.type
+ simple
+
+
+
+ hadoop.http.cross-origin.allowed-headers
+ X-Requested-With,Content-Type,Accept,Origin,WWW-Authenticate,Accept-Encoding,Transfer-Encoding
+
+
+
+ hadoop.http.cross-origin.allowed-methods
+ GET,PUT,POST,OPTIONS,HEAD,DELETE
+
+
+
+ hadoop.http.cross-origin.allowed-origins
+ *
+
+
+
+ hadoop.http.cross-origin.max-age
+ 1800
+
+
+
+ hadoop.http.filter.initializers
+ org.apache.hadoop.security.AuthenticationFilterInitializer,org.apache.hadoop.security.HttpCrossOriginFilterInitializer
+
+
+
+ hadoop.proxyuser.hdfs.groups
+ *
+
+
+
+ hadoop.proxyuser.hdfs.hosts
+ *
+
+
+
+ hadoop.proxyuser.hive.groups
+ *
+
+
+
+ hadoop.proxyuser.hive.hosts
+ *
+
+
+
+ hadoop.proxyuser.HTTP.groups
+ *
+
+
+
+ hadoop.proxyuser.HTTP.hosts
+ *
+
+
+
+ hadoop.proxyuser.iap.groups
+ *
+
+
+
+ hadoop.proxyuser.iap.hosts
+ *
+
+
+
+ hadoop.proxyuser.livy.groups
+ *
+
+
+
+ hadoop.proxyuser.livy.hosts
+ *
+
+
+
+ hadoop.proxyuser.yarn.groups
+ *
+
+
+
+ hadoop.proxyuser.yarn.hosts
+ *
+
+
+
+ hadoop.rpc.protection
+ authentication,privacy
+
+
+
+ hadoop.security.auth_to_local
+ RULE:[1:$1@$0](hbase-b1@ECLD.COM)s/.*/hbase/
+RULE:[1:$1@$0](hdfs-b1@ECLD.COM)s/.*/hdfs/
+RULE:[1:$1@$0](spark-b1@ECLD.COM)s/.*/spark/
+RULE:[1:$1@$0](yarn-ats-b1@ECLD.COM)s/.*/yarn-ats/
+RULE:[1:$1@$0](.*@ECLD.COM)s/@.*//
+RULE:[2:$1@$0](dn@ECLD.COM)s/.*/hdfs/
+RULE:[2:$1@$0](hbase@ECLD.COM)s/.*/hbase/
+RULE:[2:$1@$0](hive@ECLD.COM)s/.*/hive/
+RULE:[2:$1@$0](jhs@ECLD.COM)s/.*/mapred/
+RULE:[2:$1@$0](jn@ECLD.COM)s/.*/hdfs/
+RULE:[2:$1@$0](livy@ECLD.COM)s/.*/livy/
+RULE:[2:$1@$0](nm@ECLD.COM)s/.*/yarn/
+RULE:[2:$1@$0](nn@ECLD.COM)s/.*/hdfs/
+RULE:[2:$1@$0](rangeradmin@ECLD.COM)s/.*/ranger/
+RULE:[2:$1@$0](rangerlookup@ECLD.COM)s/.*/ranger/
+RULE:[2:$1@$0](rangertagsync@ECLD.COM)s/.*/rangertagsync/
+RULE:[2:$1@$0](rangerusersync@ECLD.COM)s/.*/rangerusersync/
+RULE:[2:$1@$0](rm@ECLD.COM)s/.*/yarn/
+RULE:[2:$1@$0](spark@ECLD.COM)s/.*/spark/
+RULE:[2:$1@$0](yarn@ECLD.COM)s/.*/yarn/
+RULE:[2:$1@$0](yarn-ats-hbase@ECLD.COM)s/.*/yarn-ats/
+DEFAULT
+
+
+
+ hadoop.security.authentication
+ kerberos
+
+
+
+ hadoop.security.authorization
+ true
+
+
+
+ hadoop.security.instrumentation.requires.admin
+ false
+
+
+
+ io.compression.codec.lzo.class
+ com.hadoop.compression.lzo.LzoCodec
+
+
+
+ io.compression.codecs
+ org.apache.hadoop.io.compress.GzipCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec
+
+
+
+ io.file.buffer.size
+ 131072
+
+
+
+ io.serializations
+ org.apache.hadoop.io.serializer.WritableSerialization
+
+
+
+ ipc.client.connect.max.retries
+ 50
+
+
+
+ ipc.client.connection.maxidletime
+ 30000
+
+
+
+ ipc.client.idlethreshold
+ 8000
+
+
+
+ ipc.server.tcpnodelay
+ true
+
+
+
+ mapreduce.jobtracker.webinterface.trusted
+ false
+
+
+
+ ipc.client.fallback-to-simple-auth-allowed
+ true
+
+
+
+ fs.hdfs.impl.disable.cache
+ true
+
+
+
diff --git a/config/b5s119/hdfs-site.xml b/config/b5s119/hdfs-site.xml
new file mode 100644
index 0000000..9b4eda4
--- /dev/null
+++ b/config/b5s119/hdfs-site.xml
@@ -0,0 +1,698 @@
+
+
+
+ dfs.block.access.token.enable
+ true
+
+
+
+ dfs.blockreport.initialDelay
+ 120
+
+
+
+ dfs.blocksize
+ 134217728
+
+
+
+ dfs.client.failover.proxy.provider.b1
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+
+ dfs.client.failover.proxy.provider.b2
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+
+ dfs.client.read.shortcircuit
+ true
+
+
+
+ dfs.client.read.shortcircuit.streams.cache.size
+ 4096
+
+
+
+ dfs.client.retry.policy.enabled
+ false
+
+
+
+ dfs.cluster.administrators
+ hdfs
+
+
+
+ dfs.content-summary.limit
+ 5000
+
+
+
+ dfs.data.transfer.protection
+ authentication,privacy
+
+
+
+ dfs.datanode.address
+ 0.0.0.0:1019
+
+
+
+ dfs.datanode.balance.bandwidthPerSec
+ 6250000
+
+
+
+ dfs.datanode.data.dir
+ [DISK]file:///data1/hadoop/hdfs/data,[DISK]file:///data2/hadoop/hdfs/data,[DISK]file:///data3/hadoop/hdfs/data,[DISK]file:///data4/hadoop/hdfs/data,[DISK]file:///data5/hadoop/hdfs/data,[DISK]file:///data6/hadoop/hdfs/data,[DISK]file:///data7/hadoop/hdfs/data,[DISK]file:///data8/hadoop/hdfs/data,[DISK]file:///data9/hadoop/hdfs/data,[DISK]file:///data10/hadoop/hdfs/data,[DISK]file:///data11/hadoop/hdfs/data,[DISK]file:///data12/hadoop/hdfs/data
+ true
+
+
+
+ dfs.datanode.data.dir.perm
+ 750
+
+
+
+ dfs.datanode.du.reserved
+ 26405499904
+
+
+
+ dfs.datanode.failed.volumes.tolerated
+ 2
+ true
+
+
+
+ dfs.datanode.http.address
+ 0.0.0.0:1022
+
+
+
+ dfs.datanode.https.address
+ 0.0.0.0:50475
+
+
+
+ dfs.datanode.ipc.address
+ 0.0.0.0:8010
+
+
+
+ dfs.datanode.kerberos.principal
+ dn/_HOST@ECLD.COM
+
+
+
+ dfs.datanode.keytab.file
+ /etc/security/keytabs/dn.service.keytab
+
+
+
+ dfs.datanode.max.transfer.threads
+ 16384
+
+
+
+ dfs.domain.socket.path
+ /var/lib/hadoop-hdfs/dn_socket
+
+
+
+ dfs.encrypt.data.transfer.cipher.suites
+ AES/CTR/NoPadding
+
+
+
+ dfs.ha.automatic-failover.enabled
+ true
+
+
+
+ dfs.ha.fencing.methods
+ shell(/bin/true)
+
+
+
+ dfs.ha.namenodes.b1
+ nn1,nn2
+
+
+
+ dfs.ha.namenodes.b2
+ nn3,nn4
+
+
+
+ dfs.heartbeat.interval
+ 3
+
+
+
+ dfs.hosts.exclude
+ /etc/hadoop/conf/dfs.exclude
+
+
+
+ dfs.http.policy
+ HTTP_ONLY
+
+
+
+ dfs.https.port
+ 50470
+
+
+
+ dfs.internal.nameservices
+ b1,b2
+
+
+
+ dfs.journalnode.edits.dir.b1
+ /data2/hadoop/hdfs/journal
+
+
+
+ dfs.journalnode.edits.dir.b2
+ /data2/hadoop/hdfs/journal
+
+
+
+ dfs.journalnode.http-address
+ 0.0.0.0:8480
+
+
+
+ dfs.journalnode.https-address
+ 0.0.0.0:8481
+
+
+
+ dfs.journalnode.kerberos.internal.spnego.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ dfs.journalnode.kerberos.principal
+ jn/_HOST@ECLD.COM
+
+
+
+ dfs.journalnode.keytab.file
+ /etc/security/keytabs/jn.service.keytab
+
+
+
+ dfs.namenode.accesstime.precision
+ 0
+
+
+
+ dfs.namenode.acls.enabled
+ true
+
+
+
+ dfs.namenode.audit.log.async
+ true
+
+
+
+ dfs.namenode.avoid.read.stale.datanode
+ true
+
+
+
+ dfs.namenode.avoid.write.stale.datanode
+ true
+
+
+
+ dfs.namenode.checkpoint.dir
+ /data/hadoop/hdfs/namesecondary
+
+
+
+ dfs.namenode.checkpoint.edits.dir
+ ${dfs.namenode.checkpoint.dir}
+
+
+
+ dfs.namenode.checkpoint.period
+ 21600
+
+
+
+ dfs.namenode.checkpoint.txns
+ 1000000
+
+
+
+ dfs.namenode.fslock.fair
+ false
+
+
+
+ dfs.namenode.handler.count
+ 200
+
+
+
+ dfs.namenode.http-address.b1.nn1
+ b1m2.hdp.dc:50070
+
+
+
+ dfs.namenode.http-address.b1.nn2
+ b1m3.hdp.dc:50070
+
+
+
+ dfs.namenode.http-address.b2.nn3
+ b1m5.hdp.dc:50070
+
+
+
+ dfs.namenode.http-address.b2.nn4
+ b1m6.hdp.dc:50070
+
+
+
+ dfs.namenode.https-address.b1.nn1
+ b1m2.hdp.dc:50470
+
+
+
+ dfs.namenode.https-address.b1.nn2
+ b1m3.hdp.dc:50470
+
+
+
+ dfs.namenode.https-address.b2.nn3
+ b1m5.hdp.dc:50470
+
+
+
+ dfs.namenode.https-address.b2.nn4
+ b1m6.hdp.dc:50470
+
+
+
+ dfs.namenode.kerberos.internal.spnego.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ dfs.namenode.kerberos.principal
+ nn/_HOST@ECLD.COM
+
+
+
+ dfs.namenode.keytab.file
+ /etc/security/keytabs/nn.service.keytab
+
+
+
+ dfs.namenode.max.extra.edits.segments.retained
+ 180
+
+
+
+ dfs.namenode.name.dir
+ /data1/hadoop/hdfs/namenode,/data2/hadoop/hdfs/namenode
+ true
+
+
+
+ dfs.namenode.name.dir.restore
+ true
+
+
+
+ dfs.namenode.num.extra.edits.retained
+ 18000
+
+
+
+ dfs.namenode.rpc-address.b1.nn1
+ b1m2.hdp.dc:8020
+
+
+
+ dfs.namenode.rpc-address.b1.nn2
+ b1m3.hdp.dc:8020
+
+
+
+ dfs.namenode.rpc-address.b2.nn3
+ b1m5.hdp.dc:8020
+
+
+
+ dfs.namenode.rpc-address.b2.nn4
+ b1m6.hdp.dc:8020
+
+
+
+ dfs.namenode.safemode.threshold-pct
+ 0.99
+
+
+
+ dfs.namenode.shared.edits.dir.b1
+ qjournal://b1m2.hdp.dc:8485;b1m3.hdp.dc:8485;b1m5.hdp.dc:8485/b1
+
+
+
+ dfs.namenode.shared.edits.dir.b2
+ qjournal://b1m2.hdp.dc:8485;b1m3.hdp.dc:8485;b1m5.hdp.dc:8485/b2
+
+
+
+ dfs.namenode.stale.datanode.interval
+ 30000
+
+
+
+ dfs.namenode.startup.delay.block.deletion.sec
+ 3600
+
+
+
+ dfs.namenode.write.stale.datanode.ratio
+ 1.0f
+
+
+
+ dfs.nameservices
+ b1,b2,b3,b4,a3,a4,f1,d2,e1
+
+
+
+ dfs.permissions.ContentSummary.subAccess
+ true
+
+
+
+ dfs.permissions.enabled
+ true
+
+
+
+ dfs.permissions.superusergroup
+ hdfs
+
+
+
+ dfs.replication
+ 3
+
+
+
+ dfs.replication.max
+ 50
+
+
+
+ dfs.web.authentication.kerberos.keytab
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ dfs.web.authentication.kerberos.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ dfs.webhdfs.enabled
+ true
+ true
+
+
+
+ fs.permissions.umask-mode
+ 022
+
+
+
+ hadoop.caller.context.enabled
+ true
+
+
+
+ manage.include.files
+ false
+
+
+
+ nfs.exports.allowed.hosts
+ * rw
+
+
+
+ nfs.file.dump.dir
+ /tmp/.hdfs-nfs
+
+
+
+ dfs.client.datanode-restart.timeout
+ 30
+
+
+
+ dfs.client.failover.proxy.provider.a4
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.a4
+ nn1,nn2
+
+
+ dfs.namenode.http-address.a4.nn1
+ a4m1.hdp.dc:50070
+
+
+ dfs.namenode.http-address.a4.nn2
+ a4m2.hdp.dc:50070
+
+
+ dfs.namenode.https-address.a4.nn1
+ a4m1.hdp.dc:50470
+
+
+ dfs.namenode.https-address.a4.nn2
+ a4m2.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.a4.nn1
+ a4m1.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.a4.nn2
+ a4m2.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.a3
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.a3
+ nn1,nn2
+
+
+ dfs.namenode.http-address.a3.nn1
+ a3m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.a3.nn2
+ a3m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.a3.nn1
+ a3m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.a3.nn2
+ a3m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.a3.nn1
+ a3m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.a3.nn2
+ a3m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.b3
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.b3
+ nn1,nn2
+
+
+ dfs.namenode.http-address.b3.nn1
+ b3m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.b3.nn2
+ b3m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.b3.nn1
+ b3m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.b3.nn2
+ b3m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.b3.nn1
+ b3m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.b3.nn2
+ b3m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.b4
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.b4
+ nn1,nn2
+
+
+ dfs.namenode.http-address.b4.nn1
+ b4m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.b4.nn2
+ b4m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.b4.nn1
+ b4m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.b4.nn2
+ b4m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.b4.nn1
+ b4m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.b4.nn2
+ b4m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.f1
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.f1
+ nn1,nn2
+
+
+ dfs.namenode.http-address.f1.nn1
+ f1m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.f1.nn2
+ f1m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.f1.nn1
+ f1m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.f1.nn2
+ f1m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.f1.nn1
+ f1m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.f1.nn2
+ f1m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.d2
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.d2
+ nn1,nn2
+
+
+ dfs.namenode.http-address.d2.nn1
+ d2m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.d2.nn2
+ d2m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.d2.nn1
+ d2m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.d2.nn2
+ d2m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.d2.nn1
+ d2m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.d2.nn2
+ d2m3.hdp.dc:8020
+
+
+
+ dfs.client.failover.proxy.provider.e1
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+ dfs.ha.namenodes.e1
+ nn1,nn2
+
+
+ dfs.namenode.http-address.e1.nn1
+ e1m2.hdp.dc:50070
+
+
+ dfs.namenode.http-address.e1.nn2
+ e1m3.hdp.dc:50070
+
+
+ dfs.namenode.https-address.e1.nn1
+ e1m2.hdp.dc:50470
+
+
+ dfs.namenode.https-address.e1.nn2
+ e1m3.hdp.dc:50470
+
+
+ dfs.namenode.rpc-address.e1.nn1
+ e1m2.hdp.dc:8020
+
+
+ dfs.namenode.rpc-address.e1.nn2
+ e1m3.hdp.dc:8020
+
+
+
diff --git a/config/b5s119/yarn-site.xml b/config/b5s119/yarn-site.xml
new file mode 100644
index 0000000..e2ab884
--- /dev/null
+++ b/config/b5s119/yarn-site.xml
@@ -0,0 +1,1026 @@
+
+
+
+ hadoop.http.cross-origin.allowed-origins
+ regex:.*[.]hdp[.]dc(:\d*)?
+
+
+
+ hadoop.registry.client.auth
+ kerberos
+
+
+
+ hadoop.registry.dns.bind-address
+ 0.0.0.0
+
+
+
+ hadoop.registry.dns.bind-port
+ 5354
+ true
+
+
+
+ hadoop.registry.dns.domain-name
+ ECLD.COM
+
+
+
+ hadoop.registry.dns.enabled
+ true
+
+
+
+ hadoop.registry.dns.zone-mask
+ 255.255.255.0
+
+
+
+ hadoop.registry.dns.zone-subnet
+ 172.17.0.0
+
+
+
+ hadoop.registry.jaas.context
+ Client
+
+
+
+ hadoop.registry.secure
+ true
+
+
+
+ hadoop.registry.system.accounts
+ sasl:yarn,sasl:jhs,sasl:hdfs-b5,sasl:rm,sasl:hive,sasl:spark
+
+
+
+ hadoop.registry.zk.quorum
+ b5m1.hdp.dc:2181,b5m2.hdp.dc:2181,b5m3.hdp.dc:2181
+
+
+
+ manage.include.files
+ false
+
+
+
+ yarn.acl.enable
+ true
+
+
+
+ yarn.admin.acl
+ *
+
+
+
+ yarn.application.classpath
+ $HADOOP_CONF_DIR,/usr/lib/edp/hadoop-3.2.2/share/hadoop/common/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/common/lib/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/hdfs/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/hdfs/lib/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/yarn/*, /usr/lib/edp/hadoop-3.2.2/share/hadoop/yarn/lib/*
+
+
+
+ yarn.client.nodemanager-connect.max-wait-ms
+ 60000
+
+
+
+ yarn.client.nodemanager-connect.retry-interval-ms
+ 10000
+
+
+
+ yarn.http.policy
+ HTTP_ONLY
+
+
+
+ yarn.log-aggregation-enable
+ true
+
+
+
+ yarn.log-aggregation.retain-seconds
+ 2592000
+
+
+
+ yarn.log.server.url
+ http://b5s119.hdp.dc:19888/jobhistory/logs
+
+
+
+ yarn.log.server.web-service.url
+ http://b5s119.hdp.dc:8188/ws/v1/applicationhistory
+
+
+
+ yarn.node-labels.enabled
+ false
+
+
+
+ yarn.node-labels.fs-store.retry-policy-spec
+ 2000, 500
+
+
+
+ yarn.node-labels.fs-store.root-dir
+ /system/yarn/node-labels
+
+
+
+ yarn.nodemanager.address
+ 0.0.0.0:45454
+
+
+
+ yarn.nodemanager.admin-env
+ MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX
+
+
+
+ yarn.nodemanager.aux-services
+ mapreduce_shuffle,spark_shuffle
+
+
+
+ yarn.nodemanager.aux-services.mapreduce_shuffle.class
+ org.apache.hadoop.mapred.ShuffleHandler
+
+
+
+ yarn.nodemanager.aux-services.spark_shuffle.class
+ org.apache.spark.network.yarn.YarnShuffleService
+
+
+
+ yarn.nodemanager.aux-services.spark_shuffle.classpath
+ /usr/lib/edp/spark-3.1.1-bin-hadoop3.2/yarn/*
+
+
+
+ yarn.nodemanager.aux-services.timeline_collector.class
+ org.apache.hadoop.yarn.server.timelineservice.collector.PerNodeTimelineCollectorsAuxService
+
+
+
+ yarn.nodemanager.aux-services.sparkv2_shuffle.class
+ org.apache.spark.network.yarn.YarnShuffleService
+
+
+
+ yarn.nodemanager.aux-services.sparkv2_shuffle.classpath
+ /usr/lib/edp/spark-2.4.7-bin-hadoop-3.1.2/yarn/*
+
+
+
+ yarn.nodemanager.bind-host
+ 0.0.0.0
+
+
+
+ yarn.nodemanager.container-executor.class
+ org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor
+
+
+
+ yarn.nodemanager.container-metrics.unregister-delay-ms
+ 60000
+
+
+
+ yarn.nodemanager.container-monitor.interval-ms
+ 3000
+
+
+
+ yarn.nodemanager.delete.debug-delay-sec
+ 86400
+
+
+
+ yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage
+ 90
+
+
+
+ yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb
+ 1000
+
+
+
+ yarn.nodemanager.disk-health-checker.min-healthy-disks
+ 0.25
+
+
+
+ yarn.nodemanager.health-checker.interval-ms
+ 135000
+
+
+
+ yarn.nodemanager.health-checker.script.timeout-ms
+ 60000
+
+
+
+ yarn.nodemanager.keytab
+ /etc/security/keytabs/nm.service.keytab
+
+
+
+ yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage
+ false
+
+
+
+ yarn.nodemanager.linux-container-executor.group
+ hadoop
+
+
+
+ yarn.nodemanager.linux-container-executor.nonsecure-mode.limit-users
+ true
+
+
+
+ yarn.nodemanager.local-dirs
+ /data1/hadoop/yarn/local,/data2/hadoop/yarn/local,/data3/hadoop/yarn/local,/data4/hadoop/yarn/local,/data5/hadoop/yarn/local,/data6/hadoop/yarn/local,/data7/hadoop/yarn/local,/data8/hadoop/yarn/local,/data9/hadoop/yarn/local,/data10/hadoop/yarn/local,/data11/hadoop/yarn/local,/data12/hadoop/yarn/local,/data13/hadoop/yarn/local,/data14/hadoop/yarn/local,/data15/hadoop/yarn/local,/data16/hadoop/yarn/local,/data17/hadoop/yarn/local,/data18/hadoop/yarn/local,/data19/hadoop/yarn/local,/data20/hadoop/yarn/local,/data21/hadoop/yarn/local,/data22/hadoop/yarn/local,/data23/hadoop/yarn/local,/data24/hadoop/yarn/local
+
+
+
+ yarn.nodemanager.log-aggregation.compression-type
+ gz
+
+
+
+ yarn.nodemanager.log-aggregation.debug-enabled
+ false
+
+
+
+ yarn.nodemanager.log-aggregation.num-log-files-per-app
+ 30
+
+
+
+ yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds
+ 3600
+
+
+
+ yarn.nodemanager.log-dirs
+ /data1/hadoop/yarn/log,/data2/hadoop/yarn/log,/data3/hadoop/yarn/log,/data4/hadoop/yarn/log,/data5/hadoop/yarn/log,/data6/hadoop/yarn/log,/data7/hadoop/yarn/log,/data8/hadoop/yarn/log,/data9/hadoop/yarn/log,/data10/hadoop/yarn/log,/data11/hadoop/yarn/log,/data12/hadoop/yarn/log,/data13/hadoop/yarn/log,/data14/hadoop/yarn/log,/data15/hadoop/yarn/log,/data16/hadoop/yarn/log,/data17/hadoop/yarn/log,/data18/hadoop/yarn/log,/data19/hadoop/yarn/log,/data20/hadoop/yarn/log,/data21/hadoop/yarn/log,/data22/hadoop/yarn/log,/data23/hadoop/yarn/log,/data24/hadoop/yarn/log
+
+
+
+ yarn.nodemanager.log.retain-seconds
+ 604800
+
+
+
+ yarn.nodemanager.principal
+ nm/_HOST@ECLD.COM
+
+
+
+ yarn.nodemanager.recovery.dir
+ /var/log/hadoop-yarn/nodemanager/recovery-state
+
+
+
+ yarn.nodemanager.recovery.enabled
+ true
+
+
+
+ yarn.nodemanager.recovery.supervised
+ true
+
+
+
+ yarn.nodemanager.remote-app-log-dir
+ /app-logs
+
+
+
+ yarn.nodemanager.remote-app-log-dir-suffix
+ logs
+
+
+
+ yarn.nodemanager.resource-plugins
+
+
+
+
+ yarn.nodemanager.resource-plugins.gpu.allowed-gpu-devices
+
+
+
+
+ yarn.nodemanager.resource-plugins.gpu.docker-plugin
+
+
+
+
+ yarn.nodemanager.resource-plugins.gpu.docker-plugin.nvidiadocker-v1.endpoint
+
+
+
+
+ yarn.nodemanager.resource-plugins.gpu.path-to-discovery-executables
+
+
+
+
+ yarn.nodemanager.resource.cpu-vcores
+ 70
+
+
+
+ yarn.nodemanager.resource.memory-mb
+ 348160
+
+
+
+ yarn.nodemanager.resource.percentage-physical-cpu-limit
+ 80
+
+
+
+ yarn.nodemanager.resourcemanager.connect.wait.secs
+ 1800
+
+
+
+ yarn.nodemanager.runtime.linux.allowed-runtimes
+ default,docker
+
+
+
+ yarn.nodemanager.runtime.linux.docker.allowed-container-networks
+ host,none,bridge
+
+
+
+ yarn.nodemanager.runtime.linux.docker.capabilities
+
+ CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,
+ SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE
+
+
+
+ yarn.nodemanager.runtime.linux.docker.default-container-network
+ host
+
+
+
+ yarn.nodemanager.runtime.linux.docker.privileged-containers.acl
+
+
+
+
+ yarn.nodemanager.runtime.linux.docker.privileged-containers.allowed
+ false
+
+
+
+ yarn.nodemanager.vmem-check-enabled
+ false
+
+
+
+ yarn.nodemanager.vmem-pmem-ratio
+ 2.1
+
+
+
+ yarn.nodemanager.webapp.cross-origin.enabled
+ true
+
+
+
+ yarn.nodemanager.webapp.spnego-keytab-file
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ yarn.nodemanager.webapp.spnego-principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ yarn.resourcemanager.address
+ b5s120.hdp.dc:8050
+
+
+
+ yarn.resourcemanager.admin.address
+ b5s120.hdp.dc:8141
+
+
+
+ yarn.resourcemanager.am.max-attempts
+ 2
+
+
+
+ yarn.resourcemanager.bind-host
+ 0.0.0.0
+
+
+
+ yarn.resourcemanager.cluster-id
+ yarn-cluster
+
+
+
+ yarn.resourcemanager.connect.max-wait.ms
+ 900000
+
+
+
+ yarn.resourcemanager.connect.retry-interval.ms
+ 30000
+
+
+
+ yarn.resourcemanager.display.per-user-apps
+ true
+
+
+
+ yarn.resourcemanager.fs.state-store.retry-policy-spec
+ 2000, 500
+
+
+
+ yarn.resourcemanager.fs.state-store.uri
+
+
+
+
+ yarn.resourcemanager.ha.automatic-failover.zk-base-path
+ /yarn-leader-election-b5hudi
+
+
+
+ yarn.resourcemanager.ha.enabled
+ true
+
+
+
+ yarn.resourcemanager.ha.rm-ids
+ rm1,rm2
+
+
+
+ yarn.resourcemanager.hostname
+ b5s120.hdp.dc
+
+
+
+ yarn.resourcemanager.hostname.rm1
+ b5s120.hdp.dc
+
+
+
+ yarn.resourcemanager.hostname.rm2
+ b5s121.hdp.dc
+
+
+
+ yarn.resourcemanager.keytab
+ /etc/security/keytabs/rm.service.keytab
+
+
+
+ yarn.resourcemanager.monitor.capacity.preemption.intra-queue-preemption.enabled
+ true
+
+
+
+ yarn.resourcemanager.monitor.capacity.preemption.monitoring_interval
+ 15000
+
+
+
+ yarn.resourcemanager.monitor.capacity.preemption.natural_termination_factor
+ 1
+
+
+
+ yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round
+ 0.1
+
+
+
+ yarn.resourcemanager.nodes.exclude-path
+ /etc/hadoop/conf/yarn.exclude
+
+
+
+ yarn.resourcemanager.placement-constraints.handler
+ scheduler
+
+
+
+ yarn.resourcemanager.principal
+ rm/_HOST@ECLD.COM
+
+
+
+ yarn.resourcemanager.proxy-user-privileges.enabled
+ true
+
+
+
+ yarn.resourcemanager.proxyuser.*.groups
+
+
+
+
+ yarn.resourcemanager.proxyuser.*.hosts
+
+
+
+
+ yarn.resourcemanager.proxyuser.*.users
+
+
+
+
+ yarn.resourcemanager.recovery.enabled
+ true
+
+
+
+ yarn.resourcemanager.resource-tracker.address
+ b5s120.hdp.dc:8025
+
+
+
+ yarn.resourcemanager.resource-tracker.address.rm1
+ b5s120.hdp.dc:8025
+
+
+
+ yarn.resourcemanager.resource-tracker.address.rm2
+ b5s121.hdp.dc:8025
+
+
+
+ yarn.resourcemanager.scheduler.address
+ b5s120.hdp.dc:8030
+
+
+
+ yarn.resourcemanager.scheduler.class
+ org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
+
+
+
+ yarn.resourcemanager.scheduler.monitor.enable
+ true
+
+
+
+ yarn.resourcemanager.max-completed-applications
+ 10000
+
+
+
+ yarn.resourcemanager.state-store.max-completed-applications
+ ${yarn.resourcemanager.max-completed-applications}
+
+
+
+ yarn.resourcemanager.store.class
+ org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore
+
+
+
+ yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size
+ 10
+
+
+
+ yarn.resourcemanager.system-metrics-publisher.enabled
+ true
+
+
+
+ yarn.resourcemanager.webapp.address
+ b5s120.hdp.dc:8088
+
+
+
+ yarn.resourcemanager.webapp.address.rm1
+ b5s120.hdp.dc:8088
+
+
+
+ yarn.resourcemanager.webapp.address.rm2
+ b5s121.hdp.dc:8088
+
+
+
+ yarn.resourcemanager.webapp.cross-origin.enabled
+ true
+
+
+
+ yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled
+ false
+
+
+
+ yarn.resourcemanager.webapp.https.address
+ b5s120.hdp.dc:8090
+
+
+
+ yarn.resourcemanager.webapp.https.address.rm1
+ b5s120.hdp.dc:8090
+
+
+
+ yarn.resourcemanager.webapp.https.address.rm2
+ b5s121.hdp.dc:8090
+
+
+
+ yarn.resourcemanager.webapp.spnego-keytab-file
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ yarn.resourcemanager.webapp.spnego-principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ yarn.resourcemanager.work-preserving-recovery.enabled
+ true
+
+
+
+ yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms
+ 10000
+
+
+
+ yarn.resourcemanager.zk-acl
+ sasl:rm:rwcda
+
+
+
+ yarn.resourcemanager.zk-address
+ b5m1.hdp.dc:2181,b5m2.hdp.dc:2181,b5m3.hdp.dc:2181
+
+
+
+ yarn.resourcemanager.zk-num-retries
+ 1000
+
+
+
+ yarn.resourcemanager.zk-retry-interval-ms
+ 1000
+
+
+
+ yarn.resourcemanager.zk-state-store.parent-path
+ /rmstore-b5hudi
+
+
+
+ yarn.resourcemanager.zk-timeout-ms
+ 10000
+
+
+
+ yarn.rm.system-metricspublisher.emit-container-events
+ true
+
+
+
+ yarn.scheduler.capacity.ordering-policy.priority-utilization.underutilized-preemption.enabled
+ true
+
+
+
+ yarn.scheduler.maximum-allocation-mb
+ 81920
+
+
+
+ yarn.scheduler.maximum-allocation-vcores
+ 70
+
+
+
+ yarn.scheduler.minimum-allocation-mb
+ 1024
+
+
+
+ yarn.scheduler.minimum-allocation-vcores
+ 1
+
+
+
+ yarn.service.system-service.dir
+ /services
+
+
+
+ yarn.system-metricspublisher.enabled
+ true
+
+
+
+ yarn.timeline-service.address
+ b5s119.hdp.dc:10200
+
+
+
+ yarn.timeline-service.bind-host
+ 0.0.0.0
+
+
+
+ yarn.timeline-service.client.max-retries
+ 30
+
+
+
+ yarn.timeline-service.client.retry-interval-ms
+ 1000
+
+
+
+ yarn.timeline-service.enabled
+ false
+
+
+
+ yarn.timeline-service.entity-group-fs-store.active-dir
+ /ats/active/
+
+
+
+ yarn.timeline-service.entity-group-fs-store.app-cache-size
+ 10
+
+
+
+ yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds
+ 3600
+
+
+
+ yarn.timeline-service.entity-group-fs-store.done-dir
+ /ats/done/
+
+
+
+ yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes
+ org.apache.hadoop.yarn.applications.distributedshell.DistributedShellTimelinePlugin
+
+
+
+ yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath
+
+
+
+
+ yarn.timeline-service.entity-group-fs-store.retain-seconds
+ 604800
+
+
+
+ yarn.timeline-service.entity-group-fs-store.scan-interval-seconds
+ 60
+
+
+
+ yarn.timeline-service.entity-group-fs-store.summary-store
+ org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore
+
+
+
+ yarn.timeline-service.generic-application-history.save-non-am-container-meta-info
+ false
+
+
+
+ yarn.timeline-service.generic-application-history.store-class
+ org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore
+
+
+
+ yarn.timeline-service.hbase-schema.prefix
+ prod.
+
+
+
+ yarn.timeline-service.hbase.configuration.file
+ file:///etc/hadoop/conf/embedded-yarn-ats-hbase/hbase-site.xml
+
+
+
+ yarn.timeline-service.hbase.coprocessor.jar.hdfs.location
+ file:///usr/lib/edp/hadoop-3.2.2/lib/hadoop-yarn-server-timelineservice-3.2.2.jar
+
+
+
+ yarn.timeline-service.http-authentication.cookie.domain
+
+
+
+
+ yarn.timeline-service.http-authentication.cookie.path
+
+
+
+
+ yarn.timeline-service.http-authentication.kerberos.keytab
+ /etc/security/keytabs/spnego.service.keytab
+
+
+
+ yarn.timeline-service.http-authentication.kerberos.name.rules
+
+
+
+
+ yarn.timeline-service.http-authentication.kerberos.principal
+ HTTP/_HOST@ECLD.COM
+
+
+
+ yarn.timeline-service.http-authentication.proxyuser.*.groups
+
+
+
+
+ yarn.timeline-service.http-authentication.proxyuser.*.hosts
+
+
+
+
+ yarn.timeline-service.http-authentication.proxyuser.*.users
+
+
+
+
+ yarn.timeline-service.http-authentication.signature.secret
+
+
+
+
+ yarn.timeline-service.http-authentication.signature.secret.file
+
+
+
+
+ yarn.timeline-service.http-authentication.signer.secret.provider
+
+
+
+
+ yarn.timeline-service.http-authentication.signer.secret.provider.object
+
+
+
+
+ yarn.timeline-service.http-authentication.simple.anonymous.allowed
+ true
+
+
+
+ yarn.timeline-service.http-authentication.token.validity
+
+
+
+
+ yarn.timeline-service.http-authentication.type
+ simple
+
+
+
+ yarn.timeline-service.http-cross-origin.enabled
+ true
+
+
+
+ yarn.timeline-service.keytab
+ /etc/security/keytabs/yarn.service.keytab
+
+
+
+ yarn.timeline-service.leveldb-state-store.path
+ /data1/hadoop/yarn/timeline
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.path
+ /data1/hadoop/yarn/timeline
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.read-cache-size
+ 104857600
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size
+ 10000
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size
+ 10000
+
+
+
+ yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms
+ 300000
+
+
+
+ yarn.timeline-service.principal
+ yarn/_HOST@ECLD.COM
+
+
+
+ yarn.timeline-service.reader.webapp.address
+ b5s119.hdp.dc:8198
+
+
+
+ yarn.timeline-service.reader.webapp.https.address
+ b5s119.hdp.dc:8199
+
+
+
+ yarn.timeline-service.recovery.enabled
+ true
+
+
+
+ yarn.timeline-service.state-store-class
+ org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore
+
+
+
+ yarn.timeline-service.store-class
+ org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore
+
+
+
+ yarn.timeline-service.ttl-enable
+ true
+
+
+
+ yarn.timeline-service.ttl-ms
+ 2678400000
+
+
+
+ yarn.timeline-service.version
+ 2.0f
+
+
+
+ yarn.timeline-service.versions
+ 1.5f,2.0f
+
+
+
+ yarn.timeline-service.webapp.address
+ b5s119.hdp.dc:8188
+
+
+
+ yarn.timeline-service.webapp.https.address
+ b5s119.hdp.dc:8190
+
+
+
+ yarn.webapp.api-service.enable
+ true
+
+
+
+ yarn.webapp.ui2.enable
+ true
+
+
+