[HUDI-2785] Add Trino setup in Docker Demo (#4300)
* [HUDI-2785] Add Trino setup in Docker Demo * Update docker account and remove unnecessary configs * Adjust sparkadhoc Dockerfile
This commit is contained in:
107
docker/hoodie/hadoop/base_java11/entrypoint.sh
Normal file
107
docker/hoodie/hadoop/base_java11/entrypoint.sh
Normal file
@@ -0,0 +1,107 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
#######################################################################################
|
||||
## COPIED FROM ##
|
||||
## https://github.com/big-data-europe/docker-hadoop/blob/master/base/entrypoint.sh ##
|
||||
# ##
|
||||
#######################################################################################
|
||||
|
||||
# Set some sensible defaults
|
||||
export CORE_CONF_fs_defaultFS=${CORE_CONF_fs_defaultFS:-hdfs://`hostname -f`:8020}
|
||||
|
||||
function addProperty() {
|
||||
local path=$1
|
||||
local name=$2
|
||||
local value=$3
|
||||
|
||||
local entry="<property><name>$name</name><value>${value}</value></property>"
|
||||
local escapedEntry=$(echo $entry | sed 's/\//\\\//g')
|
||||
sed -i "/<\/configuration>/ s/.*/${escapedEntry}\n&/" $path
|
||||
}
|
||||
|
||||
function configure() {
|
||||
local path=$1
|
||||
local module=$2
|
||||
local envPrefix=$3
|
||||
|
||||
local var
|
||||
local value
|
||||
|
||||
echo "Configuring $module"
|
||||
for c in `printenv | perl -sne 'print "$1 " if m/^${envPrefix}_(.+?)=.*/' -- -envPrefix=$envPrefix`; do
|
||||
name=`echo ${c} | perl -pe 's/___/-/g; s/__/@/g; s/_/./g; s/@/_/g;'`
|
||||
var="${envPrefix}_${c}"
|
||||
value=${!var}
|
||||
echo " - Setting $name=$value"
|
||||
addProperty /etc/hadoop/$module-site.xml $name "$value"
|
||||
done
|
||||
}
|
||||
|
||||
configure /etc/hadoop/core-site.xml core CORE_CONF
|
||||
configure /etc/hadoop/hdfs-site.xml hdfs HDFS_CONF
|
||||
configure /etc/hadoop/yarn-site.xml yarn YARN_CONF
|
||||
configure /etc/hadoop/httpfs-site.xml httpfs HTTPFS_CONF
|
||||
configure /etc/hadoop/kms-site.xml kms KMS_CONF
|
||||
|
||||
if [ "$MULTIHOMED_NETWORK" = "1" ]; then
|
||||
echo "Configuring for multihomed network"
|
||||
|
||||
# HDFS
|
||||
addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.rpc-bind-host 0.0.0.0
|
||||
addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.servicerpc-bind-host 0.0.0.0
|
||||
addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.http-bind-host 0.0.0.0
|
||||
addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.https-bind-host 0.0.0.0
|
||||
addProperty /etc/hadoop/hdfs-site.xml dfs.client.use.datanode.hostname true
|
||||
addProperty /etc/hadoop/hdfs-site.xml dfs.datanode.use.datanode.hostname true
|
||||
|
||||
# YARN
|
||||
addProperty /etc/hadoop/yarn-site.xml yarn.resourcemanager.bind-host 0.0.0.0
|
||||
addProperty /etc/hadoop/yarn-site.xml yarn.nodemanager.bind-host 0.0.0.0
|
||||
addProperty /etc/hadoop/yarn-site.xml yarn.nodemanager.bind-host 0.0.0.0
|
||||
addProperty /etc/hadoop/yarn-site.xml yarn.timeline-service.bind-host 0.0.0.0
|
||||
|
||||
# MAPRED
|
||||
addProperty /etc/hadoop/mapred-site.xml yarn.nodemanager.bind-host 0.0.0.0
|
||||
fi
|
||||
|
||||
if [ -n "$GANGLIA_HOST" ]; then
|
||||
mv /etc/hadoop/hadoop-metrics.properties /etc/hadoop/hadoop-metrics.properties.orig
|
||||
mv /etc/hadoop/hadoop-metrics2.properties /etc/hadoop/hadoop-metrics2.properties.orig
|
||||
|
||||
for module in mapred jvm rpc ugi; do
|
||||
echo "$module.class=org.apache.hadoop.metrics.ganglia.GangliaContext31"
|
||||
echo "$module.period=10"
|
||||
echo "$module.servers=$GANGLIA_HOST:8649"
|
||||
done > /etc/hadoop/hadoop-metrics.properties
|
||||
|
||||
for module in namenode datanode resourcemanager nodemanager mrappmaster jobhistoryserver; do
|
||||
echo "$module.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31"
|
||||
echo "$module.sink.ganglia.period=10"
|
||||
echo "$module.sink.ganglia.supportsparse=true"
|
||||
echo "$module.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both"
|
||||
echo "$module.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40"
|
||||
echo "$module.sink.ganglia.servers=$GANGLIA_HOST:8649"
|
||||
done > /etc/hadoop/hadoop-metrics2.properties
|
||||
fi
|
||||
|
||||
# Save Container IP in ENV variable
|
||||
/usr/bin/export_container_ip.sh
|
||||
|
||||
exec "$@"
|
||||
Reference in New Issue
Block a user