refactor(all): b9配置从主分支移除

This commit is contained in:
2024-02-29 20:23:32 +08:00
parent 7d0511bd4c
commit c6c6919f11
14 changed files with 4 additions and 2347 deletions

View File

@@ -1,333 +0,0 @@
<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="viewfs-mount-table.xml"/>
<property>
<name>fs.azure.user.agent.prefix</name>
<value>User-Agent: APN/1.0 Hortonworks/1.0 HDP/</value>
</property>
<property>
<name>fs.defaultFS</name>
<!--<value>viewfs://datalake</value>-->
<value>jfs://tdsc</value>
<final>true</final>
</property>
<property>
<name>fs.s3a.fast.upload</name>
<value>true</value>
</property>
<property>
<name>fs.s3a.fast.upload.buffer</name>
<value>disk</value>
</property>
<property>
<name>fs.s3a.multipart.size</name>
<value>67108864</value>
</property>
<property>
<name>fs.trash.interval</name>
<value>4320</value>
</property>
<property>
<name>ha.failover-controller.active-standby-elector.zk.op.retries</name>
<value>120</value>
</property>
<property>
<name>ha.zookeeper.acl</name>
<value>sasl:nn:rwcda</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>b9m1.hdp.dc:2181,b9m2.hdp.dc:2181,b9m3.hdp.dc:2181</value>
</property>
<property>
<name>hadoop.http.authentication.kerberos.keytab</name>
<value>/etc/security/keytabs/spnego.service.keytab</value>
</property>
<property>
<name>hadoop.http.authentication.kerberos.principal</name>
<value>HTTP/_HOST@ECLD.COM</value>
</property>
<property>
<name>hadoop.http.authentication.signature.secret.file</name>
<value>/etc/security/http_secret</value>
</property>
<property>
<name>hadoop.http.authentication.simple.anonymous.allowed</name>
<value>true</value>
</property>
<property>
<name>hadoop.http.authentication.type</name>
<value>simple</value>
</property>
<property>
<name>hadoop.http.cross-origin.allowed-headers</name>
<value>X-Requested-With,Content-Type,Accept,Origin,WWW-Authenticate,Accept-Encoding,Transfer-Encoding</value>
</property>
<property>
<name>hadoop.http.cross-origin.allowed-methods</name>
<value>GET,PUT,POST,OPTIONS,HEAD,DELETE</value>
</property>
<property>
<name>hadoop.http.cross-origin.allowed-origins</name>
<value>*</value>
</property>
<property>
<name>hadoop.http.cross-origin.max-age</name>
<value>1800</value>
</property>
<property>
<name>hadoop.http.filter.initializers</name>
<value>org.apache.hadoop.security.AuthenticationFilterInitializer,org.apache.hadoop.security.HttpCrossOriginFilterInitializer</value>
</property>
<property>
<name>hadoop.proxyuser.hdfs.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.hdfs.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.hive.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.hive.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.HTTP.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.HTTP.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.iap.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.iap.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.livy.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.livy.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.yarn.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.yarn.hosts</name>
<value>b9m2.hdp.dc,b9m3.hdp.dc</value>
</property>
<property>
<name>hadoop.rpc.protection</name>
<value>authentication,privacy</value>
</property>
<property>
<name>hadoop.security.auth_to_local</name>
<value>RULE:[1:$1@$0](hbase-b9@ECLD.COM)s/.*/hbase/
RULE:[1:$1@$0](hdfs-b9@ECLD.COM)s/.*/hdfs/
RULE:[1:$1@$0](spark-b9@ECLD.COM)s/.*/spark/
RULE:[1:$1@$0](yarn-ats-b9@ECLD.COM)s/.*/yarn-ats/
RULE:[1:$1@$0](.*@ECLD.COM)s/@.*//
RULE:[2:$1@$0](dn@ECLD.COM)s/.*/hdfs/
RULE:[2:$1@$0](hbase@ECLD.COM)s/.*/hbase/
RULE:[2:$1@$0](hive@ECLD.COM)s/.*/hive/
RULE:[2:$1@$0](jhs@ECLD.COM)s/.*/mapred/
RULE:[2:$1@$0](jn@ECLD.COM)s/.*/hdfs/
RULE:[2:$1@$0](livy@ECLD.COM)s/.*/livy/
RULE:[2:$1@$0](nm@ECLD.COM)s/.*/yarn/
RULE:[2:$1@$0](nn@ECLD.COM)s/.*/hdfs/
RULE:[2:$1@$0](rangeradmin@ECLD.COM)s/.*/ranger/
RULE:[2:$1@$0](rangerlookup@ECLD.COM)s/.*/ranger/
RULE:[2:$1@$0](rangertagsync@ECLD.COM)s/.*/rangertagsync/
RULE:[2:$1@$0](rangerusersync@ECLD.COM)s/.*/rangerusersync/
RULE:[2:$1@$0](rm@ECLD.COM)s/.*/yarn/
RULE:[2:$1@$0](spark@ECLD.COM)s/.*/spark/
RULE:[2:$1@$0](yarn@ECLD.COM)s/.*/yarn/
RULE:[2:$1@$0](yarn-ats-hbase@ECLD.COM)s/.*/yarn-ats/
DEFAULT</value>
</property>
<property>
<name>hadoop.security.authentication</name>
<value>kerberos</value>
</property>
<property>
<name>hadoop.security.authorization</name>
<value>true</value>
</property>
<property>
<name>hadoop.security.instrumentation.requires.admin</name>
<value>false</value>
</property>
<property>
<name>io.compression.codec.lzo.class</name>
<value>com.hadoop.compression.lzo.LzoCodec</value>
</property>
<property>
<name>io.compression.codecs</name>
<value>org.apache.hadoop.io.compress.GzipCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<property>
<name>io.serializations</name>
<value>org.apache.hadoop.io.serializer.WritableSerialization</value>
</property>
<property>
<name>ipc.client.connect.max.retries</name>
<value>50</value>
</property>
<property>
<name>ipc.client.connection.maxidletime</name>
<value>30000</value>
</property>
<property>
<name>ipc.client.idlethreshold</name>
<value>8000</value>
</property>
<property>
<name>ipc.server.tcpnodelay</name>
<value>true</value>
</property>
<property>
<name>mapreduce.jobtracker.webinterface.trusted</name>
<value>false</value>
</property>
<!--juicefs-->
<property>
<name>fs.jfs.impl</name>
<value>io.juicefs.JuiceFileSystem</value>
</property>
<property>
<name>fs.AbstractFileSystem.jfs.impl</name>
<value>io.juicefs.JuiceFS</value>
</property>
<property>
<name>juicefs.cache-dir</name>
<value>/var/jfsCache</value>
</property>
<property>
<name>juicefs.cache-size</name>
<value>0</value>
</property>
<property>
<name>juicefs.cache-full-block</name>
<value>true</value>
</property>
<property>
<name>juicefs.free-space</name>
<value>0.05</value>
</property>
<property>
<name>juicefs.attr-cache</name>
<value>86400</value>
</property>
<property>
<name>juicefs.entry-cache</name>
<value>86400</value>
</property>
<property>
<name>juicefs.dir-entry-cache</name>
<value>86400</value>
</property>
<property>
<name>juicefs.get-timeout</name>
<value>120</value>
</property>
<property>
<name>juicefs.put-timeout</name>
<value>120</value>
</property>
<property>
<name>juicefs.no-usage-report</name>
<value>true</value>
</property>
<property>
<name>juicefs.tdsc.meta</name>
<value>tikv://132.126.207.85:2379,132.126.207.86:2379,132.126.207.87:2379/tdsc?ca=/etc/tikv/ca.crt&amp;cert=/etc/tikv/client.crt&amp;key=/etc/tikv/client.pem</value>
</property>
<property>
<name>juicefs.backup-meta</name>
<value>0</value>
</property>
<property>
<name>juicefs.block.size</name>
<value>268435456</value>
</property>
<property>
<name>juicefs.prefetch</name>
<value>0</value>
</property>
<property>
<name>juicefs.debug</name>
<value>false</value>
</property>
<property>
<name>juicefs.no-bgjob</name>
<value>true</value>
</property>
<property>
<name>jfs.kerberos.token-servers</name>
<value>hdfs://b9</value>
</property>
<property>
<name>mapreduce.job.hdfs-servers</name>
<value>hdfs://b9</value>
</property>
</configuration>

View File

@@ -1,847 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
<property>
<name>dfs.block.access.token.enable</name>
<value>true</value>
</property>
<property>
<name>dfs.blockreport.initialDelay</name>
<value>120</value>
</property>
<property>
<name>dfs.blocksize</name>
<value>134217728</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.b9</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.b2</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.b1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.client.read.shortcircuit</name>
<value>true</value>
</property>
<property>
<name>dfs.client.read.shortcircuit.streams.cache.size</name>
<value>4096</value>
</property>
<property>
<name>dfs.client.retry.policy.enabled</name>
<value>false</value>
</property>
<property>
<name>dfs.cluster.administrators</name>
<value> hdfs</value>
</property>
<property>
<name>dfs.content-summary.limit</name>
<value>5000</value>
</property>
<property>
<name>dfs.data.transfer.protection</name>
<value>authentication,privacy</value>
</property>
<property>
<name>dfs.datanode.address</name>
<value>0.0.0.0:1019</value>
</property>
<property>
<name>dfs.datanode.balance.bandwidthPerSec</name>
<value>6250000</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>[DISK]file:///data1/hadoop/hdfs/data,[DISK]file:///data2/hadoop/hdfs/data,[DISK]file:///data3/hadoop/hdfs/data,[DISK]file:///data4/hadoop/hdfs/data,[DISK]file:///data5/hadoop/hdfs/data,[DISK]file:///data6/hadoop/hdfs/data,[DISK]file:///data7/hadoop/hdfs/data,[DISK]file:///data8/hadoop/hdfs/data,[DISK]file:///data9/hadoop/hdfs/data,[DISK]file:///data10/hadoop/hdfs/data,[DISK]file:///data11/hadoop/hdfs/data,[DISK]file:///data12/hadoop/hdfs/data</value>
<final>true</final>
</property>
<property>
<name>dfs.datanode.data.dir.perm</name>
<value>750</value>
</property>
<property>
<name>dfs.datanode.du.reserved</name>
<value>26405499904</value>
</property>
<property>
<name>dfs.datanode.failed.volumes.tolerated</name>
<value>2</value>
<final>true</final>
</property>
<property>
<name>dfs.datanode.http.address</name>
<value>0.0.0.0:1022</value>
</property>
<property>
<name>dfs.datanode.https.address</name>
<value>0.0.0.0:50475</value>
</property>
<property>
<name>dfs.datanode.ipc.address</name>
<value>0.0.0.0:8010</value>
</property>
<property>
<name>dfs.datanode.kerberos.principal</name>
<value>dn/_HOST@ECLD.COM</value>
</property>
<property>
<name>dfs.datanode.keytab.file</name>
<value>/etc/security/keytabs/dn.service.keytab</value>
</property>
<property>
<name>dfs.datanode.max.transfer.threads</name>
<value>16384</value>
</property>
<property>
<name>dfs.domain.socket.path</name>
<value>/var/lib/hadoop-hdfs/dn_socket</value>
</property>
<property>
<name>dfs.encrypt.data.transfer.cipher.suites</name>
<value>AES/CTR/NoPadding</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>shell(/bin/true)</value>
</property>
<property>
<name>dfs.ha.namenodes.b9</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.ha.namenodes.b1</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.ha.namenodes.b2</name>
<value>nn3,nn4</value>
</property>
<property>
<name>dfs.heartbeat.interval</name>
<value>3</value>
</property>
<property>
<name>dfs.hosts.exclude</name>
<value>/etc/hadoop/conf/dfs.exclude</value>
</property>
<property>
<name>dfs.http.policy</name>
<value>HTTP_ONLY</value>
</property>
<property>
<name>dfs.https.port</name>
<value>50470</value>
</property>
<property>
<name>dfs.internal.nameservices</name>
<value>b9</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/data2/hadoop/hdfs/journal</value>
</property>
<property>
<name>dfs.journalnode.edits.dir.b9</name>
<value>/data2/hadoop/hdfs/journal</value>
</property>
<property>
<name>dfs.journalnode.http-address</name>
<value>0.0.0.0:8480</value>
</property>
<property>
<name>dfs.journalnode.https-address</name>
<value>0.0.0.0:8481</value>
</property>
<property>
<name>dfs.journalnode.kerberos.internal.spnego.principal</name>
<value>HTTP/_HOST@ECLD.COM</value>
</property>
<property>
<name>dfs.journalnode.kerberos.principal</name>
<value>jn/_HOST@ECLD.COM</value>
</property>
<property>
<name>dfs.journalnode.keytab.file</name>
<value>/etc/security/keytabs/jn.service.keytab</value>
</property>
<property>
<name>dfs.namenode.accesstime.precision</name>
<value>0</value>
</property>
<property>
<name>dfs.namenode.acls.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.audit.log.async</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.avoid.read.stale.datanode</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.avoid.write.stale.datanode</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.checkpoint.dir</name>
<value>/data/hadoop/hdfs/namesecondary</value>
</property>
<property>
<name>dfs.namenode.checkpoint.edits.dir</name>
<value>${dfs.namenode.checkpoint.dir}</value>
</property>
<property>
<name>dfs.namenode.checkpoint.period</name>
<value>21600</value>
</property>
<property>
<name>dfs.namenode.checkpoint.txns</name>
<value>1000000</value>
</property>
<property>
<name>dfs.namenode.fslock.fair</name>
<value>false</value>
</property>
<property>
<name>dfs.namenode.handler.count</name>
<value>100</value>
</property>
<property>
<name>dfs.namenode.http-address.b9.nn1</name>
<value>b9m2.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.b9.nn2</name>
<value>b9m3.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.b1.nn1</name>
<value>b1m2.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.b1.nn2</name>
<value>b1m3.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.b2.nn3</name>
<value>b1m5.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.b2.nn4</name>
<value>b1m6.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.b9.nn1</name>
<value>b9m2.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.b9.nn2</name>
<value>b9m3.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.b1.nn1</name>
<value>b1m2.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.b1.nn2</name>
<value>b1m3.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.b2.nn3</name>
<value>b1m5.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.b2.nn4</name>
<value>b1m6.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.kerberos.internal.spnego.principal</name>
<value>HTTP/_HOST@ECLD.COM</value>
</property>
<property>
<name>dfs.namenode.kerberos.principal</name>
<value>nn/_HOST@ECLD.COM</value>
</property>
<property>
<name>dfs.namenode.keytab.file</name>
<value>/etc/security/keytabs/nn.service.keytab</value>
</property>
<property>
<name>dfs.namenode.max.extra.edits.segments.retained</name>
<value>180</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/data1/hadoop/hdfs/namenode,/data2/hadoop/hdfs/namenode</value>
<final>true</final>
</property>
<property>
<name>dfs.namenode.name.dir.restore</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.num.extra.edits.retained</name>
<value>18000</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b9.nn1</name>
<value>b9m2.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b9.nn2</name>
<value>b9m3.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b1.nn1</name>
<value>b1m2.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b1.nn2</name>
<value>b1m3.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b2.nn3</name>
<value>b1m5.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b2.nn4</name>
<value>b1m6.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.safemode.threshold-pct</name>
<value>0.99</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://b9m1.hdp.dc:8485;b9m2.hdp.dc:8485;b9m3.hdp.dc:8485/b9</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir.b9</name>
<value>qjournal://b9m1.hdp.dc:8485;b9m2.hdp.dc:8485;b9m3.hdp.dc:8485/b9</value>
</property>
<property>
<name>dfs.namenode.stale.datanode.interval</name>
<value>30000</value>
</property>
<property>
<name>dfs.namenode.startup.delay.block.deletion.sec</name>
<value>3600</value>
</property>
<property>
<name>dfs.namenode.write.stale.datanode.ratio</name>
<value>1.0f</value>
</property>
<property>
<name>dfs.nameservices</name>
<value>b9,b7,b5,b1,b2,b3,b4,a3,a4,a6,f1,e1,d2</value>
</property>
<property>
<name>dfs.permissions.ContentSummary.subAccess</name>
<value>true</value>
</property>
<property>
<name>dfs.permissions.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.permissions.superusergroup</name>
<value>hdfs</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.replication.max</name>
<value>50</value>
</property>
<property>
<name>dfs.web.authentication.kerberos.keytab</name>
<value>/etc/security/keytabs/spnego.service.keytab</value>
</property>
<property>
<name>dfs.web.authentication.kerberos.principal</name>
<value>HTTP/_HOST@ECLD.COM</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
<final>true</final>
</property>
<property>
<name>fs.permissions.umask-mode</name>
<value>022</value>
</property>
<property>
<name>hadoop.caller.context.enabled</name>
<value>true</value>
</property>
<property>
<name>manage.include.files</name>
<value>false</value>
</property>
<property>
<name>nfs.exports.allowed.hosts</name>
<value>* rw</value>
</property>
<property>
<name>nfs.file.dump.dir</name>
<value>/tmp/.hdfs-nfs</value>
</property>
<property>
<name>dfs.client.datanode-restart.timeout</name>
<value>30</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.f1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.namenodes.f1</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.http-address.f1.nn1</name>
<value>f1m2.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.f1.nn2</name>
<value>f1m3.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.f1.nn1</name>
<value>f1m2.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.f1.nn2</name>
<value>f1m3.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.rpc-address.f1.nn1</name>
<value>f1m2.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.f1.nn2</name>
<value>f1m3.hdp.dc:8020</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.d2</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.namenodes.d2</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.http-address.d2.nn1</name>
<value>d2m2.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.d2.nn2</name>
<value>d2m3.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.d2.nn1</name>
<value>d2m2.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.d2.nn2</name>
<value>d2m3.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.rpc-address.d2.nn1</name>
<value>d2m2.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.d2.nn2</name>
<value>d2m3.hdp.dc:8020</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.e1</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.namenodes.e1</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.http-address.e1.nn1</name>
<value>e1m2.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.e1.nn2</name>
<value>e1m3.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.e1.nn1</name>
<value>e1m2.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.e1.nn2</name>
<value>e1m3.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.rpc-address.e1.nn1</name>
<value>e1m2.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.e1.nn2</name>
<value>e1m3.hdp.dc:8020</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.b4</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.namenodes.b4</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.http-address.b4.nn1</name>
<value>b4m2.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.b4.nn2</name>
<value>b4m3.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.b4.nn1</name>
<value>b4m2.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.b4.nn2</name>
<value>b4m3.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b4.nn1</name>
<value>b4m2.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b4.nn2</name>
<value>b4m3.hdp.dc:8020</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.a6</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.namenodes.a6</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.http-address.a6.nn1</name>
<value>a6m2.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.a6.nn2</name>
<value>a6m3.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.a6.nn1</name>
<value>a6m2.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.a6.nn2</name>
<value>a6m3.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.rpc-address.a6.nn1</name>
<value>a6m2.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.a6.nn2</name>
<value>a6m3.hdp.dc:8020</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.b5</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.namenodes.b5</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.http-address.b5.nn1</name>
<value>b5m1.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.b5.nn2</name>
<value>b5m2.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.b5.nn1</name>
<value>b5m1.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.b5.nn2</name>
<value>b5m2.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b5.nn1</name>
<value>b5m1.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b5.nn2</name>
<value>b5m2.hdp.dc:8020</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.a4</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.namenodes.a4</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.http-address.a4.nn1</name>
<value>a4m1.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.a4.nn2</name>
<value>a4m2.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.a4.nn1</name>
<value>a4m1.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.a4.nn2</name>
<value>a4m2.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.rpc-address.a4.nn1</name>
<value>a4m1.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.a4.nn2</name>
<value>a4m2.hdp.dc:8020</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.a3</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.namenodes.a3</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.http-address.a3.nn1</name>
<value>a3m2.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.a3.nn2</name>
<value>a3m3.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.a3.nn1</name>
<value>a3m2.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.a3.nn2</name>
<value>a3m3.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.rpc-address.a3.nn1</name>
<value>a3m2.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.a3.nn2</name>
<value>a3m3.hdp.dc:8020</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.b3</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.namenodes.b3</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.http-address.b3.nn1</name>
<value>b3m2.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.b3.nn2</name>
<value>b3m3.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.b3.nn1</name>
<value>b3m2.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.b3.nn2</name>
<value>b3m3.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b3.nn1</name>
<value>b3m2.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b3.nn2</name>
<value>b3m3.hdp.dc:8020</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.b7</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.namenodes.b7</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.http-address.b7.nn1</name>
<value>b7m2.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.b7.nn2</name>
<value>b7m3.hdp.dc:50070</value>
</property>
<property>
<name>dfs.namenode.https-address.b7.nn1</name>
<value>b7m2.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.https-address.b7.nn2</name>
<value>b7m3.hdp.dc:50470</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b7.nn1</name>
<value>b7m2.hdp.dc:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.b7.nn2</name>
<value>b7m3.hdp.dc:8020</value>
</property>
<property>
<name>dfs.permissions</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.inode.attributes.provider.class</name>
<value>org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer</value>
</property>
</configuration>

View File

@@ -1,46 +0,0 @@
<configuration>
<property>
<name>fs.viewfs.mounttable.datalake.link./app-logs</name>
<value>hdfs://b9/app-logs</value>
</property>
<property>
<name>fs.viewfs.mounttable.datalake.link./apps</name>
<value>hdfs://b9/apps</value>
</property>
<property>
<name>fs.viewfs.mounttable.datalake.link./ats</name>
<value>hdfs://b9/ats</value>
</property>
<property>
<name>fs.viewfs.mounttable.datalake.link./atsv2</name>
<value>hdfs://b9/atsv2</value>
</property>
<property>
<name>fs.viewfs.mounttable.datalake.link./mr-history</name>
<value>hdfs://b9/mr-history</value>
</property>
<property>
<name>fs.viewfs.mounttable.datalake.link./spark2-history</name>
<value>hdfs://b9/spark2-history</value>
</property>
<property>
<name>fs.viewfs.mounttable.datalake.link./tmp</name>
<value>hdfs://b9/tmp</value>
</property>
<property>
<name>fs.viewfs.mounttable.datalake.link./user</name>
<value>hdfs://b9/user</value>
</property>
<property>
<name>fs.viewfs.mounttable.datalake.link./warehouse</name>
<value>hdfs://b9/warehouse</value>
</property>
<property>
<name>fs.viewfs.mounttable.datalake.link./b1tmp</name>
<value>hdfs://b1/tmp</value>
</property>
<property>
<name>fs.viewfs.mounttable.datalake.link./b2tmp</name>
<value>hdfs://b2/tmp</value>
</property>
</configuration>

File diff suppressed because it is too large Load Diff