1.脚本编写
#!/bin/bash
NN1_HOSTNAME=""
NN2_HOSTNAME=""
NN1_SERVICEID=""
NN2_SERVICEID=""
NN1_SERVICESTATE=""
NN2_SERVICESTATE=""
EMAIL=503757851@qq.com
#CDH_BIN_HOME=/opt/cloudera/parcels/CDH/bin
CDH_BIN_HOME=/home/hadoop/app/hadoop/bin
# 读取nameservers
ha_name=$(${CDH_BIN_HOME}/hdfs getconf -confKey dfs.nameservices)
# 读取namenodeids
namenode_serviceids=$(${CDH_BIN_HOME}/hdfs getconf -confKey dfs.ha.namenodes.${ha_name})
# 按照 , 分隔遍历namenodeids
for node in $(echo ${namenode_serviceids//,/ }); do
state=$(${CDH_BIN_HOME}/hdfs haadmin -getServiceState $node)
if [ "$state" == "active" ]; then
NN1_SERVICEID="${node}"
NN1_SERVICESTATE="${state}"
NN1_HOSTNAME=`echo $(${CDH_BIN_HOME}/hdfs getconf -confKey dfs.namenode.rpc-address.${ha_name}.${node}) | awk -F ':' '{print $1}'`
#echo "${NN1_HOSTNAME} : ${NN1_SERVICEID} : ${NN1_SERVICESTATE}"
elif [ "$state" == "standby" ]; then
NN2_SERVICEID="${node}"
NN2_SERVICESTATE="${state}"
NN2_HOSTNAME=`echo $(${CDH_BIN_HOME}/hdfs getconf -confKey dfs.namenode.rpc-address.${ha_name}.${node}) | awk -F ':' '{print $1}'`
#echo "${NN2_HOSTNAME} : ${NN2_SERVICEID} : ${NN2_SERVICESTATE}"
else
echo "hdfs haadmin -getServiceState $node: unkown"
fi
done
echo " "
echo "Hostname Namenode_Serviceid Namenode_State"
echo "${NN1_HOSTNAME} ${NN1_SERVICEID} ${NN1_SERVICESTATE}"
echo "${NN2_HOSTNAME} ${NN2_SERVICEID} ${NN2_SERVICESTATE}"
#save current NN1/2_HOSTNAME state
echo "${NN1_HOSTNAME} ${NN1_SERVICEID} ${NN1_SERVICESTATE}" > HDFS_HA.log
echo "${NN2_HOSTNAME} ${NN2_SERVICEID} ${NN2_SERVICESTATE}" >> HDFS_HA.log
# 发送邮件
# 判断文件是否存在
if [ -f HDFS_HA_LAST.log ];then
# 取出第一列 一个元素
HISTORYHOSTNAME=`cat HDFS_HA_LAST.log| awk '{print $1}' | head -n 1`
# 判断namenode是否有变化
if [ "$HISTORYHOSTNAME" != "${NN1_HOSTNAME}" ];then
# 有变化 发送邮件
echo "send a mail"
echo -e "`date "+%Y-%m-%d %H:%M:%S"` : Please to check namenode log." | mail \
-r "From: alertAdmin <503757851@qq.com>" \
-s "Warn: CDH HDFS HA Failover!." ${EMAIL}
fi
fi
cat HDFS_HA.log > HDFS_HA_LAST.log
1.1.执行脚本
此时hdfs ha没有产生failover,所以仅仅是打印了集群hdfs ha状态
[hadoop@ruozedata001 hadoop]$ ./get_hdfs_ha_state.sh
Hostname Namenode_Serviceid Namenode_State
ruozedata002 nn2 active
ruozedata001 nn1 standby
2.模拟failover
[hadoop@ruozedata001 ~]$ hdfs haadmin -failover nn2 nn1
Failover to NameNode at ruozedata001/172.19.6.116:8020 successful
此时再次执行 get_hdfs_ha_state.sh 脚本
[hadoop@ruozedata001 hadoop]$ ./get_hdfs_ha_state.sh
Hostname Namenode_Serviceid Namenode_State
ruozedata001 nn1 active
ruozedata002 nn2 standby
send a mail # 发送邮件了
此时邮箱也已经收到了邮件预警信息