自动部署带有ACL的kafka集群

2018-05-24  本文已影响0人  朝阳_412d

此脚本 可以 自动部署带有ACL的kafka集群

例如 要在机器 cent1.steven,cent2.steven,cent3.steven 上安装部署kafka 集群
只需要两部
1:下载kafka_2.11-0.11.0.2.tgzzookeepercli
2:在每个broker上执行此脚本

./deploy.sh 1=cent1.steven 2=cent2.steven 3=cent3.steven ${brokerId}

其中参数brokerId,在cent1.steven上执行时为1,cent2.steven上执行时为2,cent3.steven上执行时为3

脚本:

#!/bin/bash
#use e.g: cd $baseDir; ./deploy.sh 2=cent2.steven 3=cent3.steven 4=cent4.steven 2
for brokerId; do true; done
zoo=""
host=""
brokers=""
for ((i=1;i<$#;i++))
    do
         inode=${!i}
         id=${inode%=*}
         brokers=${brokers}"
server.${inode}:2888:3888"
         zoo=${zoo}${inode#*=}":2181,"
         if [ $brokerId = $id ]; then
                host=${inode#*=}
         fi
    done
zoo=${zoo%*,}
if [ "$host" = "" ] || [ $brokerId -lt 1 ]; then
   echo "not find brokerId and host"
   exit -1
fi
baseDir=$(cd `dirname $0`; pwd)
unlink kafka
rm -rf kafka_2.11-0.11.0.2 data/zookeeper/myid

#wget http://cent0.steven:8080/kafka_2.11-0.11.0.2.tgz
tar -zxvf kafka_2.11-0.11.0.2.tgz
ln -s kafka_2.11-0.11.0.2 kafka
cp zookeepercli kafka/
cd kafka
mkdir -p data/zookeeper

#kafka path
kafkaPath="/kafka"

#create zookeeper users and password
zkAdmin="zkSuper"
zkAdminPwd="zkSuper-secret"
zkServer="zkQuorum"
zkServerPwd="zkQuorum-secret"
zkLearner="zkQuorum"
zkLearnerPwd="zkQuorum-secret"
#create kafka user and password
kafkaAdmin="kafkaSuper"
kafkaAdminPwd="kafkaSuper-secret"
kafkaUser="alice"
kafkaUserPwd="alice-secret"

echo "
listeners=SASL_PLAINTEXT://0.0.0.0:9092
advertised.listeners=SASL_PLAINTEXT://${host}:9092
auto.create.topics.enable=false
delete.topic.enable=true
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
sasl.enabled.mechanisms=SCRAM-SHA-256,PLAIN
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.mechanism.inter.broker.protocol=SCRAM-SHA-256
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
super.users=User:$kafkaAdmin" >> config/server.properties

echo "$brokers
maxClientCnxns=100
tickTime=2000
initLimit=10
syncLimit=5
authProvider.1=org.apache.zookeeper.server.auth.DigestAuthenticationProvider
authProvider.2=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
authProvider.3=org.apache.zookeeper.server.auth.IPAuthenticationProvider
requireClientAuthScheme=sasl
quorum.auth.enableSasl=true
quorum.auth.learnerRequireSasl=true
quorum.auth.serverRequireSasl=true
quorum.auth.learner.loginContext=QuorumLearner
quorum.auth.server.loginContext=QuorumServer" >> config/zookeeper.properties

echo "KafkaServer {
org.apache.kafka.common.security.scram.ScramLoginModule required
username=$kafkaAdmin
password=$kafkaAdminPwd;

org.apache.kafka.common.security.plain.PlainLoginModule required
username=$kafkaAdmin
password=$kafkaAdminPwd
user_$kafkaAdmin=$kafkaAdminPwd
user_$kafkaUser=$kafkaUserPwd;
};
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username=$zkAdmin
password=$zkAdminPwd;
};" > config/kafka_server_jaas.conf


echo "Server {
org.apache.zookeeper.server.auth.DigestLoginModule required
user_$zkAdmin=$zkAdminPwd
user_$kafkaAdmin=$kafkaAdminPwd;
};
QuorumServer {
org.apache.zookeeper.server.auth.DigestLoginModule required
username=$zkServer
password=$zkServerPwd
user_$zkServer=$zkServerPwd;
};
QuorumLearner {
org.apache.zookeeper.server.auth.DigestLoginModule required
username=$zkLearner
password=$zkLearnerPwd
user_$zkLearner=$zkLearnerPwd;
};" > config/kafka_zoo_jaas.conf

echo "KafkaClient {
org.apache.kafka.common.security.plain.PlainLoginModule required
username=$kafkaUser
password=$kafkaUserPwd;
};" > plain_kafka_jaas.conf

echo "KafkaClient {
org.apache.kafka.common.security.scram.ScramLoginModule required
username=$kafkaAdmin
password=$kafkaAdminPwd;
};" > scram_kafka_jaas.conf

echo "Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username=$zkAdmin
password=$zkAdminPwd;
};" > zk_admin_jaas.conf

echo ${brokerId} > data/zookeeper/myid

sed -i "s?broker.id=0?broker.id=${brokerId}?;s?localhost:2181?$zoo$kafkaPath?" config/server.properties

sed -i "s?log.dirs=/tmp/kafka-logs?log.dirs=`pwd`/data/kafka-logs?" config/server.properties

sed -i "s?dataDir=/tmp/zookeeper?dataDir=`pwd`/data/zookeeper?" config/zookeeper.properties

sed -i "s?exec ?export JMX_PORT=9999 \nexport KAFKA_OPTS=\" -Djava.security.auth.login.config=`pwd`/config/kafka_server_jaas.conf\"\nexec ?" bin/kafka-server-start.sh
zkAdminId=`echo -n "$zkAdmin:$zkAdminPwd" | openssl sha1 -binary | base64`
sed -i "s?exec ?export KAFKA_OPTS=\" -Djava.security.auth.login.config=`pwd`/config/kafka_zoo_jaas.conf -Dzookeeper.DigestAuthenticationProvider.superDigest=$zkAdmin:$zkAdminId\"\nexec ?" bin/zookeeper-server-start.sh

#start up zookeeper server
bin/zookeeper-server-start.sh -daemon config/zookeeper.properties 
sleep 2
echo "started zookeeper server myid:$brokerId"

#start up kafka server
bin/kafka-server-start.sh -daemon config/server.properties
sleep 2
echo "started kafka server brokerId:$brokerId"

export KAFKA_OPTS=" -Djava.security.auth.login.config=`pwd`/zk_admin_jaas.conf"
# add kafkaAdmin user for SCRAM
bin/kafka-configs.sh --zookeeper localhost:2181$kafkaPath --alter --add-config "SCRAM-SHA-256=[password=$kafkaAdminPwd]" --entity-type users --entity-name $kafkaAdmin
echo "add user $kafkaAdmin to zookeeper success."
#set acl on brokers/topics
./zookeepercli -servers localhost:2181 -auth_usr=$zkAdmin -auth_pwd=$zkAdminPwd -c setacl $kafkaPath/brokers/topics sasl:$zkAdmin:cdrwa
echo "set acl for path: $kafkaPath/brokers/topics"
./zookeepercli -servers localhost:2181 -auth_usr=$zkAdmin -auth_pwd=$zkAdminPwd -c setacl $kafkaPath/kafka-acl sasl:$zkAdmin:cdrwa
echo "set acl for path: $kafkaPath/kafka-acl"

echo '
# set kafka_opts
kafkaPath="/kafka"
cd kafka
export KAFKA_OPTS=" -Djava.security.auth.login.config=`pwd`/zk_admin_jaas.conf"

# add scram user 
username="alice"
password="alice-secret"
bin/kafka-configs.sh --zookeeper localhost:2181$kafkaPath --alter --add-config "SCRAM-SHA-256=[password=$password]" --entity-type users --entity-name $username

#topic 
bin/kafka-topics.sh --create --zookeeper localhost:2181$kafkaPath --partitions 3 --replication-factor 1 --topic test
bin/kafka-topics.sh --list --zookeeper localhost:2181$kafkaPath

#authorizer
bin/kafka-acls.sh --authorizer-properties zookeeper.connect=localhost:2181$kafkaPath --add --allow-principal User:* --allow-host=* --operation All --topic test --group=*
bin/kafka-acls.sh --authorizer-properties zookeeper.connect=localhost:2181$kafkaPath --add --operation IdempotentWrite --allow-principal User:* --allow-host=* --cluster
bin/kafka-acls.sh --authorizer-properties zookeeper.connect=localhost:2181$kafkaPath --add --operation All --allow-principal User:* --allow-host=* --transactional-id=*
bin/kafka-acls.sh --authorizer-properties zookeeper.connect=localhost:2181$kafkaPath --list --topic test

#producer
export KAFKA_OPTS=" -Djava.security.auth.login.config=`pwd`/scram_kafka_jaas.conf"
bin/kafka-console-producer.sh --broker-list localhost:9092 --producer-property security.protocol=SASL_PLAINTEXT --producer-property sasl.mechanism=SCRAM-SHA-256 --topic test

#consumer
export KAFKA_OPTS=" -Djava.security.auth.login.config=`pwd`/plain_kafka_jaas.conf"
bin/kafka-console-consumer.sh --new-consumer --bootstrap-server localhost:9092 --consumer-property security.protocol=SASL_PLAINTEXT --consumer-property sasl.mechanism=PLAIN --topic test'

上一篇下一篇

猜你喜欢

热点阅读