- #! /bin/bash
- #cd进入jar包的所在地,java -jar执行jar包
- for i in hadoop01 hadoop03;
- do
- echo "============$i=========="
- ssh $i "cd /root/project/offlineDW/applog/applog/; java -jar mock-log.jar >/dev/null 2>&1 &"
- done
注:具体请参考以下连接!
简单启动jar包脚本_一个人的牛牛的博客-CSDN博客_jar包启动脚本
- #! /bin/bash
-
- case $1 in
- "start")
- for i in hadoop01 hadoop03
- do
- echo "==========启动$i采集flume=========="
- ssh $i "nohup /training/apache-flume-1.9.0-bin/bin/flume-ng agent --conf-file /training/apache-flume-1.9.0-bin/conf/file-flume-kafka.conf -n a1 -Dflume.root.logger=INFO,LOGFILE >/training/apache-flume-1.9.0-bin/flume-log.txt 2>&1 &"
- done
- ;;
- "stop")
- for i in hadoop01 hadoop03
- do
- echo "==========停止$i=========="
- ssh $i "ps -ef | awk '/.conf/ && !/awk/{print \$2}' | xargs kill -9"
- done
- ;;
- esac
注:具体请参考以下连接!
flume的conf文件启停脚本(生产者)_一个人的牛牛的博客-CSDN博客_flume启动停止脚本
- #!/bin/bash
-
- #1/ 获取输入参数个数,如果没有参数,直接退出。
- pcount=$#
- if((pcount==0));then
- echo no args;
- exit;
- fi
-
- #2 获取文件名称
- p1=$1
- fname=`basename $p1`
- echo fname=$fname
-
- #3 获取上级目录到绝对路径
- pdir=`cd -P $(dirname $p1); pwd`
- echo pdir=$pdir
-
- #4 获取当前用户名称
- user=`whoami`
-
- #5 循环
- for host in hadoop01 hadoop02 hadoop03
- do
- echo "=============== $host ================"
- rsync -rvl $pdir/$fname $user@$host:$pdir
- done
注:具体请参考以下连接!
jps、kafka、zookeeper群起脚本和rsync文件分发脚本(超详细)_一个人的牛牛的博客-CSDN博客_rsync脚本
- #!/bin/bash
-
- for i in hadoop01 hadoop02 hadoop03
-
- do
- echo "============= $i =============="
- ssh $i "/training/jdk1.8.0_171/bin/jps"
-
- done
注:具体请参考以下连接!
jps、kafka、zookeeper群起脚本和rsync文件分发脚本(超详细)_一个人的牛牛的博客-CSDN博客_rsync脚本
- #! /bin/bash
-
- case $1 in
- "start"){
- for i in hadoop01 hadoop02 hadoop03
- do
-
- echo "=============== $i =============="
- ssh $i "/training/zookeeper-3.4.5/bin/zkServer.sh start"
- echo "success to start $i zookeeper"
- echo -e "\n\n"
- done
- };;
-
- "stop"){
- for i in hadoop01 hadoop02 hadoop03
- do
- echo "============== $i =============="
- ssh $i "/training/zookeeper-3.4.5/bin/zkServer.sh stop"
- echo "success to stop $i zookeeper"
- echo -e "\n\n"
- done
- };;
-
- "status"){
- for i in hadoop01 hadoop02 hadoop03
- do
- echo "============== $i =============="
- ssh $i "/training/zookeeper-3.4.5/bin/zkServer.sh status"
- echo -e "\n\n"
- done
- };;
-
- esac
注:具体请参考以下连接!
jps、kafka、zookeeper群起脚本和rsync文件分发脚本(超详细)_一个人的牛牛的博客-CSDN博客_rsync脚本
- #! /bin/bash
-
- case $1 in
- "start"){
- for i in hadoop01 hadoop02 hadoop03
- do
- echo "============ $i ==========="
- ssh $i "/training/kafka_2.11-2.3.1/bin/kafka-server-start.sh -daemon /training/kafka_2.11-2.3.1/config/server.properties"
- echo "success to start $i kafka"
- echo -e "\n\n"
- done
- };;
-
- "stop"){
- for i in hadoop01 hadoop02 hadoop03
- do
- echo "============= $i ==============="
- ssh $i "/training/kafka_2.11-2.3.1/bin/kafka-server-stop.sh"
- echo "success to stop $i kafka"
- echo -e "\n\n"
- done
- };;
- esac
注:具体请参考以下连接!
jps、kafka、zookeeper群起脚本和rsync文件分发脚本(超详细)_一个人的牛牛的博客-CSDN博客_rsync脚本