部署storm和配置文件详解
阅读原文时间:2021年04月20日阅读:1
#########安装storm########
storm_install.sh
sudo ansible -i ansible_hosts elk -m copy -a "src=/letv/apache-storm-1.1.0.tar.gz dest=/letv/apache-storm-1.1.0.tar.gz "
ansible -i ansible_hosts elk -m script -a '/root/script/storm_install.sh'
ansible -i ansible_hosts elk -m shell -a  'source /etc/profile && storm supervisor &'



ansible -i ansible_hosts elk -m shell -a 'ss -lnpt'
ansible -i ansible_hosts elk -m shell -a 'ps -ef | grep storm'
#!/bin/bash
cd /letv/
tar zvxf apache-storm-1.1.0.tar.gz

ln -s /letv/apache-storm-1.1.0 /usr/local/storm


#改配置文件
cat >> /letv/apache-storm-1.1.0/conf/storm.yaml << EOF

#Storm集群对应的ZooKeeper集群的主机列表
storm.zookeeper.servers:
     - "bops-10-183-93-129"
     - "bops-10-183-93-131"
     - "bops-10-183-93-132"
#Storm集群对应的ZooKeeper集群的服务端口,ZooKeeper默认端口为21818
storm.zookeeper.port: 21818
#Storm使用的本地文件系统目录(必须存在并且Storm进程可读写)
storm.local.dir: /data/hadoop/data5/storm-workdir
#Storm运行模式,集群模式需设置为distributed(分布式的)
storm.cluster.mode: distributed
#storm.local.mode.zmq    true
#Storm的元数据在ZooKeeper中存储的根目录
storm.zookeeper.root: /storm
storm.zookeeper.session.timeout: 60000
#整个Storm集群的Nimbus节点
nimbus.host: bops-10-183-93-129
storm.log.dir: /data/hadoop/data4/storm-logs
#Storm的Slot,最好设置成OS核数的整数倍;同时由于Storm是基于内存的实时计算,Slot数不要大于每台物理机可运行Slot个数:(物理内存-虚拟内存)/单个Java进程最大可占用内存数
supervisor.slots.ports:
        - 6700
        - 6701
        - 6702
        - 6703
        - 6704
        - 6705
        - 6706
        - 6707
        - 6708
        - 6709
        - 6710
        - 6711
        - 6712
        - 6713
        - 6714
        - 6715
        - 6716
        - 6717
        - 6718
        - 6719


EOF

mkdir -p /data/hadoop/data5/storm-workdir && mkdir -p /data/hadoop/data4/storm-logs

#创建storm工作目录  storm-workdir storm-logs
storm.local.dir: /data/hadoop/data5/storm-workdir
storm.log.dir: /data/hadoop/data4/storm-logs
mkdir  /data/hadoop/data5/storm-workdir && mkdir /data/hadoop/data4/storm-logs
#启动nimbus
storm nimbus &
###########################start-nimbus.sh
#! /bin/bash
source /etc/profile
cd `dirname $0`
pwd
DAY=`date +%F`
if [ -f "/data/hadoop/data4/storm-logs/nimbus.log" ]; then
        mv /data/hadoop/data4/storm-logs/nimbus.log /data/hadoop/data4/storm-logs/nimbus.log_${DAY}
fi
COUNT=`jps | grep nimbus  | wc -l`
if [ $COUNT -eq 0 ]; then
  nohup /usr/local/storm/bin/storm nimbus 1>/data/hadoop/data4/storm-logs/nimbus.log 2>&1 &
  sleep 5
fi
################

#启动nimbus storm
storm supervisor &

##############start-storm.sh
#! /bin/bash
source /etc/profile
cd `dirname $0`
pwd

DAY=`date +%F`
OLD=`date -d "2 days ago" +%F`
TIME=`date +"%F %H:%M:%S"`

LOG="log_monitor_storm_supervisor_$DAY"
rm -f log_monitor_storm_supervisor_$OLD.log
echo starting supervisors,time: `date`  1>> $LOG.log 2>&1
for stormserver in $(cat ../conf/slaves)
    do
    echo stormserver: $stormserver
    echo sh monitor-java-process.sh ${stormserver} 15801342789 supervisor "sh /usr/local/storm/bin/start-supervisor.sh"
    sh monitor-java-process.sh ${stormserver} 15801342789 supervisor "sh /usr/local/storm/bin/start-supervisor.sh"
done 1>> $LOG.log 2>&1

NIMBUS_HOST=`tail -n 1 ../conf/nimbus`
sh monitor-java-process.sh ${NIMBUS_HOST} 15801342789 nimbus "sh /usr/local/storm/bin/start-nimbus.sh" 1>> $LOG.log 2>&1
echo ending time: `date`  1>> $LOG.log 2>&1
###########



#启动ui
storm ui  &
####################ui############
#!/bin/bash
source /etc/profile
cd `dirname $0`
pwd

DAY=`date +%F`
if [ -f "/data/hadoop/data4/storm-logs/storm-ui.log" ]; then
    mv /data/hadoop/data4/storm-logs/storm-ui.log /data/hadoop/data4/storm-logs/storm-ui.log_${DAY}
fi
COUNT=`jps | grep core  | wc -l`
if [ $COUNT -eq 0 ]; then
  nohup /usr/local/storm/bin/storm ui > /data/hadoop/data4/storm-logs/storm-ui.log 2>&1 &
  sleep 5
fi


#################start-supervisor.sh
#!/bin/bash
source /etc/profile
cd `dirname $0`
pwd

DAY=`date +%F`

if [ -f "/data/hadoop/data4/storm-logs/supervisor.log" ]; then
        mv /data/hadoop/data4/storm-logs/supervisor.log /data/hadoop/data4/storm-logs/supervisor.log_${DAY}
fi

COUNT=`jps | grep supervisor  | wc -l`
if [ $COUNT -eq 0 ]; then
  echo starting supervisor
  echo `date "+%Y%m%d %H:%M:%S"` COUNT=0, starting supervisor >> /data/hadoop/data4/storm-logs/supervisor-restart.log
  nohup /usr/local/storm/bin/storm supervisor 1> /data/hadoop/data4/storm-logs/supervisor.log  2>&1 &
  sleep 5
  #bin/storm all-supervisor >/dev/null 2>&1 &
fi

#########







  1. Nimbus:在 master 机器上,在监控下执行 bin/storm nimbus 命令。
  2. Supervisor:在每个工作节点上,在监控下执行 bin/storm supervisor 命令。Supervisor 的后台进程主要负责启动/停止该机器上的 worker 进程。
  3. UI:在 master 机器上,在监控下执行 bin/storm ui 命令启动 Storm UI(Storm UI 是一个可以在浏览器中方便地监控集群与拓扑运行状况的站点)后台进程。可以通过 http://{nimbus.host}:8080 来访问 UI 站点。

最后加环境变量
#!/bin/bash

cat >> /etc/profile << EOF
export JAVA_HOME=/usr/local/java
export SCALA_HOME=/usr/local/scala
export KAFKA_HOME=/usr/local/kafka
export PATH=$JAVA_HOME/bin:$SCALA_HOME/bin:$KAFKA_HOME/bin:$PATH
export STORM_HOME=/usr/local/storm
export PATH=${PATH}:${STORM_HOME}/bin
export ZK_CONNECT=10.183.93.129:21818,10.183.93.131:21818,10.183.93.132:21818/kafka
EOF

手机扫一扫

移动阅读更方便

阿里云服务器
腾讯云服务器
七牛云服务器