aboutsummaryrefslogtreecommitdiff
path: root/aarch64/sbin/start-dfs.sh
diff options
context:
space:
mode:
Diffstat (limited to 'aarch64/sbin/start-dfs.sh')
-rwxr-xr-xaarch64/sbin/start-dfs.sh117
1 files changed, 117 insertions, 0 deletions
diff --git a/aarch64/sbin/start-dfs.sh b/aarch64/sbin/start-dfs.sh
new file mode 100755
index 0000000..8cbea16
--- /dev/null
+++ b/aarch64/sbin/start-dfs.sh
@@ -0,0 +1,117 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Start hadoop dfs daemons.
+# Optinally upgrade or rollback dfs state.
+# Run this on master node.
+
+usage="Usage: start-dfs.sh [-upgrade|-rollback] [other options such as -clusterId]"
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hdfs-config.sh
+
+# get arguments
+if [ $# -ge 1 ]; then
+ nameStartOpt="$1"
+ shift
+ case "$nameStartOpt" in
+ (-upgrade)
+ ;;
+ (-rollback)
+ dataStartOpt="$nameStartOpt"
+ ;;
+ (*)
+ echo $usage
+ exit 1
+ ;;
+ esac
+fi
+
+#Add other possible options
+nameStartOpt="$nameStartOpt $@"
+
+#---------------------------------------------------------
+# namenodes
+
+NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -namenodes)
+
+echo "Starting namenodes on [$NAMENODES]"
+
+"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$NAMENODES" \
+ --script "$bin/hdfs" start namenode $nameStartOpt
+
+#---------------------------------------------------------
+# datanodes (using default slaves file)
+
+if [ -n "$HADOOP_SECURE_DN_USER" ]; then
+ echo \
+ "Attempting to start secure cluster, skipping datanodes. " \
+ "Run start-secure-dns.sh as root to complete startup."
+else
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --script "$bin/hdfs" start datanode $dataStartOpt
+fi
+
+#---------------------------------------------------------
+# secondary namenodes (if any)
+
+SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>/dev/null)
+
+if [ -n "$SECONDARY_NAMENODES" ]; then
+ echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
+
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$SECONDARY_NAMENODES" \
+ --script "$bin/hdfs" start secondarynamenode
+fi
+
+#---------------------------------------------------------
+# quorumjournal nodes (if any)
+
+SHARED_EDITS_DIR=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
+
+case "$SHARED_EDITS_DIR" in
+qjournal://*)
+ JOURNAL_NODES=$(echo "$SHARED_EDITS_DIR" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
+ echo "Starting journal nodes [$JOURNAL_NODES]"
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$JOURNAL_NODES" \
+ --script "$bin/hdfs" start journalnode ;;
+esac
+
+#---------------------------------------------------------
+# ZK Failover controllers, if auto-HA is enabled
+AUTOHA_ENABLED=$($HADOOP_PREFIX/bin/hdfs getconf -confKey dfs.ha.automatic-failover.enabled)
+if [ "$(echo "$AUTOHA_ENABLED" | tr A-Z a-z)" = "true" ]; then
+ echo "Starting ZK Failover Controllers on NN hosts [$NAMENODES]"
+ "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+ --config "$HADOOP_CONF_DIR" \
+ --hostnames "$NAMENODES" \
+ --script "$bin/hdfs" start zkfc
+fi
+
+# eof