大数据之群起脚本
注:启动群起脚本前给与脚本执行权限:chmod +x 脚本文件名称
一、zookeeper群起脚本
#!/bin/bash
case $1 in
"start") {
for i in hadoop102 hadoop103 hadoop104; do
echo ============== zookeeper $i 启动 ===============
ssh $i "/opt/module/zookeeper/bin/zkServer.sh start"
done
};;
"stop") {
for i in hadoop102 hadoop103 hadoop104; do
echo ============== zookeeper $i 停止 ===============
ssh $i "/opt/module/zookeeper/bin/zkServer.sh stop"
done
};;
"status") {
for i in hadoop102 hadoop103 hadoop104; do
echo ============== zookeeper $i 状态 ===============
ssh $i "/opt/module/zookeeper/bin/zkServer.sh status"
done
};;
esac
二、Kafka群起脚本
#!/bin/bash
case $1 in
"start") {
for i in hadoop102 hadoop103 hadoop104; do
echo ============== kafka $i 启动 ===============
ssh $i "/opt/module/kafka/bin/kafka-server-start.sh -daemon /opt/module/kafka/config/server.properties"
done
};;
"stop") {
for i in hadoop102 hadoop103 hadoop104; do
echo ============== kafka $i 停止 ===============
ssh $i "/opt/module/kafka/bin/kafka-server-stop.sh stop"
done
};;
esac
三、Flume脚本
#!/bin/bash
case $1 in
start )
for i in hadoop102 hadoop103; do
echo ============== flume $i 启动 ===============
ssh $i "/opt/module/flume-1.9.0/bin/flume-ng agent -n a1 -c /opt/module/flume-1.9.0/conf/ -f /opt/module/flume-1.9.0/datas/taildir_kafka.conf >/dev/null 2>&1 &"
done
;;
stop )
for i in hadoop102 hadoop103; do
echo ============== flume $i 启动 ===============
ssh $i "ps -ef | awk '/taildir_kafka.conf/ && !/awk{print \$2}' | xargs kill -9"
done
;;
esac
五、批量显示jps脚本
#!/bin/bash
params=$@
i=2
for((i=2 ; i <= 4 ; i = $i + 1)) ; do
echo ==============hadoop10$i $params =============
ssh hadoop10$i "source /etc/profile;$params"
done
六、日志生成脚本
for i in hadoop102 hadoop103; do
echo "=============$i=============="
ssh $i "cd /opt/module/applog ; java -jar gmall2020-mock-log-2020-05-10.jar >/dev/null 2>&1 &"
done
七、sqoop导数据脚本,不完善,后续需完善
APP=gmall
sqoop=/opt/module/sqoop/bin/sqoop
if [-n "$2" ]; then
do_date=$2
else
do_date=`date -d '-1 day' +%F`
fi
import_data(){
$sqoop import \
--connect jdbc:mysql://hadoop102:3306/$APP \
--username root \
--password 123456 \
--query "$2 and \$CONDITIONS" \
--target-dir /origin_data/$APP/db/$1/$do_date \
--delete-target-dir \
--num-mappers 2 \
--fields-terminated-by '\t' \
--compress \
--compression-codec lzop \
--null-string '\\N' \
--null-no-string '\\N'
#建索引
hadoop jar /opt/module/hadoop-3.1.4/share/hadoop/common/hadoop-lzo-0.4.21.jar \
com.hadoop.compression.lzo.DistributedLzoIndexer /origin_data/$APP/db/$1/$do_date
}