zookeeper-hadoop-hbase-start-script
yum install gcc* uuid-devel libuid-devel libtool
yum install -y git
wget http://download.zeromq.org/zeromq-2.1.7.tar.gz
./configure
make
make install
git clone https://github.com/nathanmarz/jzmq.git
./autogen.sh
./configure
make
make install
wget http://mirrors.cnnic.cn/apache/storm/apache-storm-0.9.4/apache-storm-0.9.4.tar.gz
ln -sf storm
------------------------------------------------------------------------
STORM_HOME=$BASE_HOME/storm
PATH=$PATH:$STORM_HOME/bin
export STORM_HOME
......
Step1 :
ssh zjenterprise01
cd /myhome/
./runzk.sh "/myhome/usr/zookeeper/bin/zkServer.sh stop"
./runzk.sh "/myhome/usr/zookeeper/bin/zkServer.sh start"
./runzk.sh "/myhome/usr/zookeeper/bin/zkServer.sh status"
./runzk.sh "/myhome/usr/hadoop/sbin/hadoop-daemon.sh stop journalnode"
./runzk.sh "/myhome/usr/hadoop/sbin/hadoop-daemon.sh start journalnode"
exit
Step2:
..hdfs namenode -format
..hdfs zkfc -formatZK
hadoop-daemon.sh start namenode
start-dfs.sh
ssh zjenterprise02
hdfs namenode -bootstrapStandby
hadoop-daemon.sh start namenode
ssh zjenterprise03
cd /myhome/
./runyarn.sh "/myhome/usr/hadoop/sbin/stop-yarn.sh"
./runyarn.sh "/myhome/usr/hadoop/sbin/start-yarn.sh"
./runresourcemanager.sh "/myhome/usr/hadoop/sbin/yarn-daemon.sh stop resourcemanager"
./runresourcemanager.sh "/myhome/usr/hadoop/sbin/yarn-daemon.sh start resourcemanager"
exit
./runnamenode.sh "/myhome/usr/hadoop/sbin/hadoop-daemon.sh start namenode"
Step1 :
在各个JournalNode节点上,输入以下命令启动journalnode服务:
sbin/hadoop-daemon.sh start journalnode
Step2:
在[nn1]上,对其进行格式化,并启动:
bin/hdfs namenode -format
sbin/hadoop-daemon.sh start namenode
Step3:ssh zjenterprise02
在[nn2]上,同步nn1的元数据信息:
bin/hdfs namenode -bootstrapStandby
Step4:
启动[nn2]:
sbin/hadoop-daemon.sh start namenode
经过以上四步操作,nn1和nn2均处理standby状态
Step5:
将[nn1]切换为Active
bin/hdfs haadmin -transitionToActive nn1
Step6:
在[nn1]上,启动所有datanode
sbin/hadoop-daemons.sh start datanode
关闭Hadoop集群:
在[nn1]上,输入以下命令
sbin/stop-dfs.sh
-------------------------------------------------------------------------------------------------
#!/bin/sh
#base_root=/myhome/usr
#scync all time
ssh zjenterprise01 'ntpdate time.nist.gov'
ssh zjenterprise02 'ntpdate time.nist.gov'
ssh zjenterprise03 'ntpdate time.nist.gov'
ssh zjenterprise04 'ntpdate time.nist.gov'
ssh zjenterprise05 'ntpdate time.nist.gov'
ssh zjenterprise06 'ntpdate time.nist.gov'
ssh zjenterprise07 'ntpdate time.nist.gov'
#start zookeeper daemons
sh zjenterprise05 '/myhome/usr/zookeeper/bin/zkServer.sh start'
ssh zjenterprise06 '/myhome/usr/zookeeper/bin/zkServer.sh start'
ssh zjenterprise07 '/myhome/usr/zookeeper/bin/zkServer.sh start'
ssh zjenterprise05 '/myhome/usr/zookeeper/bin/zkServer.sh status'
ssh zjenterprise06 '/myhome/usr/zookeeper/bin/zkServer.sh status'
ssh zjenterprise07 '/myhome/usr/zookeeper/bin/zkServer.sh status'
#start all journalnode
ssh zjenterprise05 '/myhome/usr/hadoop/sbin/hadoop-daemon.sh start journalnode'
ssh zjenterprise06 '/myhome/usr/hadoop/sbin/hadoop-daemon.sh start journalnode'
ssh zjenterprise07 '/myhome/usr/hadoop/sbin/hadoop-daemon.sh start journalnode'
#start namenode daemos
ssh zjenterprise01 '/myhome/usr/hadoop/sbin/hadoop-daemon.sh start namenode'
ssh zjenterprise02 '/myhome/usr/hadoop/sbin/hadoop-daemon.sh start namenode'
ssh zjenterprise01 '/myhome/usr/hadoop/sbin/hadoop-daemon.sh start zkfc'
ssh zjenterprise02 '/myhome/usr/hadoop/sbin/hadoop-daemon.sh start zkfc'
#yarn MapReduce daemons
ssh zjenterprise03 '/myhome/usr/hadoop/sbin/start-yarn.sh'
#start resourcemanager daemons
ssh zjenterprise04 '/myhome/usr/hadoop/sbin/yarn-daemon.sh start resourcemanager'
#start datanodes
ssh zjenterprise05 '/myhome/usr/hadoop/sbin/hadoop-daemon.sh start datanode'
ssh zjenterprise06 '/myhome/usr/hadoop/sbin/hadoop-daemon.sh start datanode'
ssh zjenterprise07 '/myhome/usr/hadoop/sbin/hadoop-daemon.sh start datanode'
#format another namenode
#ssh zjenterprise02 '/myhome/usr/hadoop/bin/hdfs namenode -bootstrapStandby'
#trigger zjenterprssh zjenterprise01 '/myhome/usr/hadoop/sbin/hadoop-daemon.sh start namenode'ise01 active
ssh zjenterprise01 '/myhome/usr/hadoop/bin/hdfs haadmin -transitionToActive --forcemanual zjenterprise01'
ssh zjenterprise02 '/myhome/usr/hadoop/bin/hdfs haadmin -transitionToActive --forcemanual zjenterprise01'
ssh zjenterprise02 '/myhome/usr/hadoop/bin/hdfs haadmin -getServiceState zjenterprise01'
ssh zjenterprise02 '/myhome/usr/hadoop/bin/hdfs haadmin -getServiceState zjenterprise02'
#start base daemons
ssh zjenterprise01 '/myhome/usr/hbase/bin/start-hbase.sh'
#ssh zjenterprise02 '/myhome/usr/hbase/bin/start-hbase.sh'