hadoop+zookeeper+kafka 安装及使用教程
hadoop安装
[root@hadoop1 ~]# yum install -y java-1.8.0-openjdk java-1.8.0-openjdk-devel
[root@hadoop1 ~]# tar -zxf hadoop-2.7.7.tar.gz
[root@hadoop1 ~]# mv hadoop-2.7.7 /usr/local/hadoop
[root@hadoop1 ~]# chown -R 0.0 /usr/local/hadoop
配置JAVA运行环境
[root@hadoop1 ~]# vim /etc/hosts
192.168.1.50 hadoop1
[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/hadoop-env.sh
25: export JAVA_HOME="/usr"
33: export HADOOP_CONF_DIR="/usr/local/hadoop/etc/hadoop"
[root@hadoop1 ~]# /usr/local/hadoop/bin/hadoop version

HDFS部署
[root@hadoop1 ~]# yum install -y java-1.8.0-openjdk java-1.8.0-openjdk-devel
[root@hadoop1 ~]# vim /etc/hosts
192.168.1.50 hadoop1
192.168.1.51 node-0001
192.168.1.52 node-0002
192.168.1.53 node-0003
以下操作仅在 hadoop1 上执行
[root@hadoop1 ~]# vim /etc/ssh/ssh_config
# 60行新添加
StrictHostKeyChecking no
[root@hadoop1 ~]# ssh-keygen -t rsa -b 2048 -N '' -f /root/.ssh/id_rsa
[root@hadoop1 ~]# for i in hadoop1 node-{0001..0003};do
ssh-copy-id -i /root/.ssh/id_rsa.pub ${i}
done
配置文件语法格式 --
<property>
<name></name>
<value></value>
</property>
1、配置 hadoop-env.sh 参考 配置JAVA运行环境 案例
localhost
[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/slaves
node-0001
node-0002
node-0003
[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://hadoop1:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/var/hadoop</value>
</property>
</configuration>
[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/hdfs-site.xml
<configuration>
<property>
<name>dfs.namenode.http-address</name>
<value>hadoop1:50070</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>hadoop1:50090</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
</configuration>
[root@hadoop1 ~]# for i in node-{0001..0003};do
rsync -aXSH --delete /usr/local/hadoop ${i}:/usr/local/
done
[root@hadoop1 ~]# mkdir /var/hadoop
[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs namenode -format
[root@hadoop1 ~]# /usr/local/hadoop/sbin/start-dfs.sh
6、验证集群配置
[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs dfsadmin -report
mapreduce部署
[root@hadoop1 ~]# cd /usr/local/hadoop/etc/hadoop/
[root@hadoop1 hadoop]# cp mapred-site.xml.template mapred-site.xml
[root@hadoop1 hadoop]# vim mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>

Yarn部署
[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/yarn-site.xml
<configuration>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>hadoop1</value>
</property>
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
启动集群 [以下操作仅在 hadoop1 上执行]
[root@hadoop1 ~]# for i in node-{0001..0003};do
rsync -avXSH --delete /usr/local/hadoop/etc ${i}:/usr/local/hadoop/
done
[root@hadoop1 ~]# /usr/local/hadoop/sbin/start-yarn.sh
验证集群
[root@hadoop1 ~]# /usr/local/hadoop/bin/yarn node -list
web页面访问
namenode: http://hadoop1:50070
secondarynamenode: http://hadoop1:50090
resourcemanager: http://hadoop1:8088/cluster
重新初始化集群
警告:该方法会丢失所有数据
1、停止集群 /usr/local/hadoop/sbin/stop-all.sh
2、删除所有节点的 /var/hadoop/*
3、在 hadoop1 上重新格式化 /usr/local/hadoop/bin/hdfs namenode -format
4、启动集群 /usr/local/hadoop/sbin/start-all.sh
[root@hadoop1 ~]# /usr/local/hadoop/sbin/stop-all.sh
[root@hadoop1 ~]# for i in hadoop1 node-{0001..0003};do
ssh ${i} 'rm -rf /var/hadoop/*'
done
[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs namenode -format
[root@hadoop1 ~]# /usr/local/hadoop/sbin/start-all.sh
[root@hadoop1 ~]# ssh-copy-id -i /root/.ssh/id_rsa.pub 192.168.1.54
[root@hadoop1 ~]# vim /etc/hosts
192.168.1.50 hadoop1
192.168.1.51 node-0001
192.168.1.52 node-0002
192.168.1.53 node-0003
192.168.1.54 newnode
[root@hadoop1 ~]# for i in node-{0001..0003} newnode;do
rsync -av /etc/hosts ${i}:/etc/
done
[root@hadoop1 ~]# rsync -aXSH /usr/local/hadoop newnode:/usr/local/
新节点执行
[root@newnode ~]# yum install -y java-1.8.0-openjdk-devel
[root@newnode ~]# /usr/local/hadoop/sbin/hadoop-daemon.sh start datanode
[root@newnode ~]# /usr/local/hadoop/bin/hdfs dfsadmin -setBalancerBandwidth 500000000
[root@newnode ~]# /usr/local/hadoop/sbin/start-balancer.sh
[root@newnode ~]# /usr/local/hadoop/sbin/yarn-daemon.sh start nodemanager
[root@newnode ~]# jps
1186 DataNode
1431 NodeManager
1535 Jps
验证集群(hadoop1上执行)
[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs dfsadmin -report
... ...
-------------------------------------------------
Live datanodes (4):
[root@hadoop1 ~]# /usr/local/hadoop/bin/yarn node -list
删除节点
配置数据迁移 hdfs-site.xml(hadoop1上做,不需要同步)
[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/hdfs-site.xml
<property>
<name>dfs.hosts.exclude</name>
<value>/usr/local/hadoop/etc/hadoop/exclude</value>
</property>
配置排除主机列表,并迁移数据(hadoop1上执行)
# 在删除配置文件中添加 newnode
[root@hadoop1 ~]# echo newnode >/usr/local/hadoop/etc/hadoop/exclude
# 迁移数据
[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs dfsadmin -refreshNodes
# 查看状态,仅当节点状态为 Decommissioned 时候才可以下线
[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs dfsadmin -report
下线节点(newnode执行)
[root@newnode ~]# /usr/local/hadoop/sbin/hadoop-daemon.sh stop datanode
[root@newnode ~]# /usr/local/hadoop/sbin/yarn-daemon.sh stop nodemanager

HDFS用户授权
hadoop1与nfsgw都要添加用户
[root@hadoop1 ~]# groupadd -g 800 nfsuser
[root@hadoop1 ~]# useradd -g 800 -u 800 -r -d /var/hadoop nfsuser
#----------------------------------------------------------------------------------------
[root@nfsgw ~]# groupadd -g 800 nfsuser
[root@nfsgw ~]# useradd -g 800 -u 800 -r -d /var/hadoop nfsuser
HDFS集群授权
[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://hadoop1:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/var/hadoop</value>
</property>
<property>
<name>hadoop.proxyuser.nfsuser.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.nfsuser.hosts</name>
<value>*</value>
</property>
</configuration>
[root@hadoop1 ~]# /usr/local/hadoop/sbin/stop-all.sh
[root@hadoop1 ~]# for i in node-{0001..0003};do
rsync -avXSH /usr/local/hadoop/etc ${i}:/usr/local/hadoop/
done
[root@hadoop1 ~]# /usr/local/hadoop/sbin/start-dfs.sh
[root@hadoop1 ~]# jps
5925 NameNode
6122 SecondaryNameNode
6237 Jps
[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs dfsadmin -report
... ...
-------------------------------------------------
Live datanodes (3):
[root@nfsgw ~]# yum remove -y rpcbind nfs-utils
[root@nfsgw ~]# vim /etc/hosts
192.168.1.50 hadoop1
192.168.1.51 node-0001
192.168.1.52 node-0002
192.168.1.53 node-0003
192.168.1.55 nfsgw
[root@nfsgw ~]# yum install -y java-1.8.0-openjdk-devel
配置 HDFS 客户端
[root@nfsgw ~]# rsync -aXSH --delete hadoop1:/usr/local/hadoop /usr/local/
[root@nfsgw ~]# /usr/local/hadoop/bin/hadoop fs -ls /
...
配置网关服务
[root@nfsgw ~]# vim /usr/local/hadoop/etc/hadoop/hdfs-site.xml
<configuration>
<property>
<name>dfs.namenode.http-address</name>
<value>hadoop1:50070</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>hadoop1:50090</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.hosts.exclude</name>
<value>/usr/local/hadoop/etc/hadoop/exclude</value>
</property>
<property>
<name>nfs.exports.allowed.hosts</name>
<value>* rw</value>
</property>
<property>
<name>nfs.dump.dir</name>
<value>/var/nfstmp</value>
</property>
</configuration>
启动网关服务
[root@nfsgw ~]# mkdir /var/nfstmp
[root@nfsgw ~]# chown nfsuser.nfsuser /var/nfstmp
[root@nfsgw ~]# rm -rf /usr/local/hadoop/logs/*
[root@nfsgw ~]# setfacl -m user:nfsuser:rwx /usr/local/hadoop/logs
[root@nfsgw ~]# getfacl /usr/local/hadoop/logs
[root@nfsgw ~]# cd /usr/local/hadoop/
[root@nfsgw hadoop]# ./sbin/hadoop-daemon.sh --script ./bin/hdfs start portmap
[root@nfsgw hadoop]# jps
1376 Portmap
1416 Jps
[root@nfsgw hadoop]# rm -rf /tmp/.hdfs-nfs
[root@nfsgw hadoop]# sudo -u nfsuser ./sbin/hadoop-daemon.sh --script ./bin/hdfs start nfs3
[root@nfsgw hadoop]# sudo -u nfsuser jps
1452 Nfs3
1502 Jps
mount验证
[root@newnode ~]# yum install -y nfs-utils
[root@newnode ~]# showmount -e 192.168.1.55
Export list for 192.168.1.55:
/ *
[root@newnode ~]# mount -t nfs -o vers=3,proto=tcp,nolock,noacl,noatime,sync 192.168.1.55:/ /mnt/
[root@newnode ~]# df -h
Filesystem Size Used Avail Use% Mounted on
192.168.1.55:/ 118G 15G 104G 13% /mnt

[root@hadoop1 ~]# yum install -y java-1.8.0-openjdk-devel
[root@hadoop1 ~]# tar zxf zookeeper-3.4.13.tar.gz
[root@hadoop1 ~]# mv zookeeper-3.4.13 /usr/local/zookeeper
[root@hadoop1 ~]# cd /usr/local/zookeeper/conf/
[root@hadoop1 conf]# cp zoo_sample.cfg zoo.cfg
[root@hadoop1 conf]# vim zoo.cfg
# 配置文件最后添加
server.1=node-0001:2888:3888
server.2=node-0002:2888:3888
server.3=node-0003:2888:3888
server.4=hadoop1:2888:3888:observer
[root@hadoop1 ~]# for i in node-{0001..0003};do
rsync -aXSH --delete /usr/local/zookeeper ${i}:/usr/local/
done
所有节点手工启动服务
[root@hadoop1 ~]# mkdir /tmp/zookeeper
[root@hadoop1 ~]# grep -Po "\d+(?==${HOSTNAME})" /usr/local/zookeeper/conf/zoo.cfg >/tmp/zookeeper/myid
[root@hadoop1 ~]# /usr/local/zookeeper/bin/zkServer.sh start
[root@hadoop1 ~]# jps
1001 QuorumPeerMain
当所有节点启动完成以后使用命令验证:
/usr/local/zookeeper/bin/zkServer.sh status
zookeeper集群管理
[root@hadoop1 ~]# yum install -y socat
[root@hadoop1 ~]# socat - TCP:node-0001:2181
ruok
imok
[root@hadoop1 bin]# ./zkstats hadoop1 node-{0001..0003}
hadoop1 Mode: observer
node-0001 Mode: follower
node-0002 Mode: leader
node-0003 Mode: follower
[root@hadoop1 ~]# yum install -y java-1.8.0-openjdk-devel
[root@hadoop1 ~]# tar zxf kafka_2.12-2.1.0.tgz
[root@hadoop1 ~]# mv kafka_2.12-2.1.0 /usr/local/kafka
[root@hadoop1 ~]# for i in node-{0001..0003};do
rsync -aXSH --delete /usr/local/kafka ${i}:/usr/local/
done
2、修改 node-0001,node-0002,node-0003 配置文件并启动服务
[root@node-0001 ~]# vim /usr/local/kafka/config/server.properties
21 broker.id=1
123 zookeeper.connect=node-0001:2181,node-0002:2181,node-0003:2181
[root@node-0001 ~]# /usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties
[root@node-0001 ~]# jps
1400 Kafka
3、验证(在不同机器上执行)
[root@node-0001 ~]# /usr/local/kafka/bin/kafka-topics.sh --create --partitions 1 --replication-factor 1 --zookeeper localhost:2181 --topic mymsg
#----------------------------------------------------------------------------------------
[root@node-0002 ~]# /usr/local/kafka/bin/kafka-console-producer.sh --broker-list localhost:9092 --topic mymsg
#----------------------------------------------------------------------------------------
[root@node-0003 ~]# /usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic mymsg


环境初始化
hadoop1 上执行
[root@hadoop1 ~]# vim /etc/hosts
192.168.1.50 hadoop1
192.168.1.56 hadoop2
192.168.1.51 node-0001
192.168.1.52 node-0002
192.168.1.53 node-0003
[root@hadoop1 ~]# rsync -aXSH --delete /root/.ssh hadoop2:/root/
[root@hadoop1 ~]# for i in hadoop2 node-{0001..0003};do
rsync -av /etc/hosts ${i}:/etc/
done
hadoop2 上执行
[root@hadoop2 ~]# yum install -y java-1.8.0-openjdk-devel
[root@hadoop2 ~]# vim /etc/ssh/ssh_config
# 60行新添加
StrictHostKeyChecking no
集群配置文件
在 hadoop1 上完成以下文件的配置
1、配置 hadoop-env.sh
[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/hadoop-env.sh
25: export JAVA_HOME="java-1.8.0-openjdk安装路径"
33: export HADOOP_CONF_DIR="/usr/local/hadoop/etc/hadoop"
2、配置 slaves
[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/slaves
node-0001
node-0002
node-0003
3、配置 core-site.xml
[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://mycluster</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/var/hadoop</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>node-0001:2181,node-0002:2181,node-0003:2181</value>
</property>
<property>
<name>hadoop.proxyuser.nfsuser.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.nfsuser.hosts</name>
<value>*</value>
</property>
</configuration>
4、配置 hdfs-site.xml
[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/hdfs-site.xml
<configuration>
<property>
<name>dfs.nameservices</name>
<value>mycluster</value>
</property>
<property>
<name>dfs.ha.namenodes.mycluster</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn1</name>
<value>hadoop1:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn2</name>
<value>hadoop2:8020</value>
</property>
<property>
<name>dfs.namenode.http-address.mycluster.nn1</name>
<value>hadoop1:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.mycluster.nn2</name>
<value>hadoop2:50070</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://node-0001:8485;node-0002:8485;node-0003:8485/mycluster</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/var/hadoop/journal</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.mycluster</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.hosts.exclude</name>
<value>/usr/local/hadoop/etc/hadoop/exclude</value>
</property>
</configuration>
5、配置 mapred-site.xml
[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
6、配置 yarn-site.xml
[root@hadoop1 ~]# vim /usr/local/hadoop/etc/hadoop/yarn-site.xml
<configuration>
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>node-0001:2181,node-0002:2181,node-0003:2181</value>
</property>
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>yarn-ha</value>
</property>
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>hadoop1</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>hadoop2</value>
</property>
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
初始化启动集群
1、重启机器、在 node-0001,node-0002,node-0003 启动 zookeeper
[root@node-0001 ~]# /usr/local/zookeeper/bin/zkServer.sh start
#----------------------------------------------------------------------------------------
[root@node-0002 ~]# /usr/local/zookeeper/bin/zkServer.sh start
#----------------------------------------------------------------------------------------
[root@node-0003 ~]# /usr/local/zookeeper/bin/zkServer.sh start
#----------------------------------------------------------------------------------------
[root@hadoop1 ~]# zkstats node-{0001..0003}
node-0001 Mode: follower
node-0002 Mode: leader
node-0003 Mode: follower
2、清空实验数据并同步配置文件(hadoop1 上执行)
[root@hadoop1 ~]# rm -rf /var/hadoop/* /usr/local/hadoop/logs
[root@hadoop1 ~]# for i in hadoop2 node-{0001..0003};do
rsync -av /etc/hosts ${i}:/etc/
rsync -aXSH --delete /var/hadoop ${i}:/var/
rsync -aXSH --delete /usr/local/hadoop ${i}:/usr/local/
done
3、在 node-0001,node-0002,node-0003 启动 journalnode 服务
[root@node-0001 ~]# /usr/local/hadoop/sbin/hadoop-daemon.sh start journalnode
[root@node-0001 ~]# jps
1037 JournalNode
#----------------------------------------------------------------------------------------
[root@node-0002 ~]# /usr/local/hadoop/sbin/hadoop-daemon.sh start journalnode
#----------------------------------------------------------------------------------------
[root@node-0003 ~]# /usr/local/hadoop/sbin/hadoop-daemon.sh start journalnode
4、初始化(hadoop1 上执行)
[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs zkfc -formatZK
[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs namenode -format
[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs namenode -initializeSharedEdits
[root@hadoop1 ~]# rsync -aXSH --delete /var/hadoop/dfs hadoop2:/var/hadoop/
5、停止在 node-0001,node-0002,node-0003 上的 journalnode 服务
[root@node-0001 ~]# /usr/local/hadoop/sbin/hadoop-daemon.sh stop journalnode
#----------------------------------------------------------------------------------------
[root@node-0002 ~]# /usr/local/hadoop/sbin/hadoop-daemon.sh stop journalnode
#----------------------------------------------------------------------------------------
[root@node-0003 ~]# /usr/local/hadoop/sbin/hadoop-daemon.sh stop journalnode
6、启动集群
#-------------------- 下面这条命令在 hadoop1 上执行 ----------------------------------------
[root@hadoop1 ~]# /usr/local/hadoop/sbin/start-all.sh
#-------------------- 下面这条命令在 hadoop2 上执行 ----------------------------------------
[root@hadoop2 ~]# /usr/local/hadoop/sbin/yarn-daemon.sh start resourcemanager
验证集群
[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs haadmin -getServiceState nn1
[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs haadmin -getServiceState nn2
[root@hadoop1 ~]# /usr/local/hadoop/bin/yarn rmadmin -getServiceState rm1
[root@hadoop1 ~]# /usr/local/hadoop/bin/yarn rmadmin -getServiceState rm2
[root@hadoop1 ~]# /usr/local/hadoop/bin/hdfs dfsadmin -report
[root@hadoop1 ~]# /usr/local/hadoop/bin/yarn node -list