大数据

CDH安装Kerberos (包含主从配置)

2018-06-07  本文已影响0人  阿甘骑士
很多企业CDH是没有集成kerberos,原因是kerberos部署后,服务使用起来变复杂,大部分只配置了sentry做权限管理;但真正的CDH多租户应该是 身份验证 + 权限管理。也就是(kerberos + sentry)
接下来,我会图文介绍怎么安装这两个服务;
在实施方案前,假设
注意点
policy.png
安装KDC和配置服务
##执行命令
yum -y install krb5-server krb5-libs krb5-auth-dialog1 krb5-workstation
vi /etc/krb5.conf

[logging]
 default = FILE:/var/log/krb5libs.log
 kdc = FILE:/var/log/krb5kdc.log
 admin_server = FILE:/var/log/kadmind.log

[libdefaults]
 default_realm = W.COM
 dns_lookup_realm = false
 dns_lookup_kdc = false
 ticket_lifetime = 24h
 renew_lifetime = 7d
 forwardable = true

#注意的是node1是你kdc服务的主机host
#作用域一般一个集群配一个足矣
[realms]
 W.COM = {
  kdc = node1
  admin_server = node1
 }

[domain_realm]
 .node1 = W.COM
 node1 = W.COM
vi /var/kerberos/krb5kdc/kadm5.acl
#/admin结尾的都是带有管理权限的principal
*/admin@W.COM     *
#修改kdc.conf
vi /var/kerberos/krb5kdc/kdc.conf
[kdcdefaults]
 kdc_ports = 88
 kdc_tcp_ports = 88

[realms]
 W.COM = {
  #master_key_type = aes256-cts
  max_renewable_life= 7d 0h 0m 0s
  acl_file = /var/kerberos/krb5kdc/kadm5.acl
  dict_file = /usr/share/dict/words
  admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
  supported_enctypes = aes256-cts:normal aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
 }
kdb5_util create –r W.COM -s
# 密码默认为KERBEROS
kadmin.local
#然后输入 
addprinc admin/admin@W.COM
#管理员密码为:kerberos

#提示 created关键字之后,输入 exit退出
exit
chkconfig krb5kdc on
chkconfig kadmin on
service krb5kdc start
service kadmin start
kinit admin/admin@W.COM 
#然后输入上两步设置的密码
klist

#显示类似下面东西表示正常
[root@node1 krb5kdc]# klist
Ticket cache: FILE:/tmp/krb5cc_0
Default principal: admin/admin@W.COM

Valid starting     Expires            Service principal
05/31/18 17:14:34  06/01/18 17:14:34  krbtgt/W.COM@W.COM
        renew until 06/07/18 17:14:34
yum -y install krb5-libs krb5-workstation
#在kdc服务所在的机器
yum -y install openldap-clients
scp -r /etc/krb5.conf root@slave1:/etc/
scp -r /etc/krb5.conf root@slave2:/etc/
scp -r /etc/krb5.conf root@slave3:/etc/
CDH启用Kerberos
#在KDC中给Cloudera Manager添加管理员账号
#在KDC服务所在的机器操作
kadmin.local
addprinc cloudera-scm/admin@W.COM
#密码:kerberos

创建完成后输入  exit 退出
exit
(SASL(-4): no mechanism available: No worthy mechs found)

在报错节点执行以下命令

yum install cyrus-sasl-plain  cyrus-sasl-devel  cyrus-sasl-gssapi
验证
#执行
kadmin.local
#输入后回车
listprincs

#这时候会看到
kadmin.local:  listprincs
HTTP/node1@W.COM
HTTP/slave1@W.COM
HTTP/slave2@W.COM
HTTP/slave3@W.COM
K/M@W.COM
admin/admin@W.COM
cloudera-scm/admin@W.COM
deng_yb@W.COM
hbase/node1@W.COM
hbase/slave1@W.COM
hbase/slave2@W.COM
hbase/slave3@W.COM
hdfs/node1@W.COM
hdfs/slave1@W.COM
hdfs/slave2@W.COM
hdfs/slave3@W.COM
hive/node1@W.COM
hue/node1@W.COM
impala/node1@W.COM
impala/slave1@W.COM
impala/slave2@W.COM
impala/slave3@W.COM
kadmin/admin@W.COM
kadmin/node1@W.COM
kadmin/changepw@W.COM
kafka/slave1@W.COM
kafka/slave2@W.COM
kafka/slave3@W.COM
kafka_mirror_maker/node1@W.COM
krbtgt/W.COM@W.COM
kudu/node1@W.COM
kudu/slave1@W.COM
kudu/slave2@W.COM
kudu/slave3@W.COM
liu.zx@W.COM
mapred/node1@W.COM
oozie/node1@W.COM
sentry/bi-mast

#上述principal是CDH集成kerberos生成的
#通过切换hdfs用户达到具备访问hdfs目录权限
[root@bi-bdap-ue-001 ~]# su hdfs
[hdfs@bi-bdap-ue-001 root]$ hadoop fs -ls /user/hive/warehouse
Found 3 items
drwxrwx--x+  - hive hive          0 2018-04-20 14:09 /user/hive/warehouse/dm_wms.db
drwxrwx--x+  - hive hive          0 2018-03-15 17:13 /user/hive/warehouse/dw
drwxrwx--x+  - hive hive          0 2018-03-14 15:42 /user/hive/warehouse/wms.db
[hdfs@bi-bdap-ue-001 root]$ hadoop fs -ls /user/hive/warehouse/dw
Found 2 items
drwxrwx--x+  - hive hive          0 2018-05-25 17:37 /user/hive/warehouse/dw/dw_wms.db
drwxrwx--x+  - hive hive          0 2018-04-09 13:57 /user/hive/warehouse/dw/dw_wms_tmp.db

现在集成kerberos后,通过切换用户访问服务方式走不通

#就算切换成hdfs用户后依然没有权限访问hdfs
[root@node1 ~]# su hdfs
bash-4.1$ klist
klist: No credentials cache found (ticket cache FILE:/tmp/krb5cc_496)
bash-4.1$ hadoop fs -ls /user
18/06/07 00:55:31 WARN security.UserGroupInformation: PriviledgedActionException as:hdfs (auth:KERBEROS) cause:javax.security.sasl.SaslException: GSS initiate failed [Caused by GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt)]
18/06/07 00:55:31 WARN ipc.Client: Exception encountered while connecting to the server : javax.security.sasl.SaslException: GSS initiate failed [Caused by GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt)]
18/06/07 00:55:31 WARN security.UserGroupInformation: PriviledgedActionException as:hdfs (auth:KERBEROS) cause:java.io.IOException: javax.security.sasl.SaslException: GSS initiate failed [Caused by GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt)]
ls: Failed on local exception: java.io.IOException: javax.security.sasl.SaslException: GSS initiate failed [Caused by GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt)]; Host Details : local host is: "node1/172.17.194.20"; destination host is: "node1":8020; 

正确的方式是kinit 命令获得或更新 Kerberos 票据授权票据后访问服务

#现在服务只认票据
#而且相应的服务有相应的票据
[root@node1 1003-hdfs-NAMENODE]# kinit -kt hdfs.keytab hdfs/node1@W.COM
[root@node1 1003-hdfs-NAMENODE]# klist
Ticket cache: FILE:/tmp/krb5cc_0
Default principal: hdfs/node1@W.COM

Valid starting     Expires            Service principal
06/07/18 01:00:55  06/08/18 01:00:55  krbtgt/W.COM@W.COM
        renew until 06/12/18 01:00:55
[root@node1 1003-hdfs-NAMENODE]# hadoop fs -ls /user/hive/warehouse
Found 12 items
drwxrwx--x+  - hive hive          0 2018-05-24 19:01 /user/hive/warehouse/apache_sqoop_test
drwxrwx--x+  - hive hive          0 2018-06-06 15:13 /user/hive/warehouse/bi.db
drwxrwx--x+  - hive hive          0 2018-05-17 16:41 /user/hive/warehouse/gms.db
drwxrwx--x+  - hive hive          0 2018-06-04 14:08 /user/hive/warehouse/gtp.db
drwxrwx--x+  - hive hive          0 2018-05-25 20:43 /user/hive/warehouse/gtp_data.db
drwxrwx--x+  - hive hive          0 2018-06-01 17:15 /user/hive/warehouse/gtp_dc.db
drwxrwx--x+  - hive hive          0 2018-06-04 08:32 /user/hive/warehouse/gtp_test.db
drwxrwx--x+  - hive hive          0 2018-05-31 15:33 /user/hive/warehouse/gtp_txt.db
drwxrwx--x+  - hive hive          0 2018-04-23 17:46 /user/hive/warehouse/kudu_raw.db
drwxrwx--x+  - hive hive          0 2018-04-23 17:58 /user/hive/warehouse/kudu_test.db
drwxrwx--x+  - hive hive          0 2018-04-26 17:37 /user/hive/warehouse/kudu_vip.db
drwxrwx--x+  - hive hive          0 2018-04-20 10:51 /user/hive/warehouse/user_info

接下来,我会图文介绍怎么安装配置Kerberos高可用

yum -y install krb5-server krb5-libs krb5-auth-dialog krb5-workstation
注意:此处只安装服务,暂不做相应配置及启动服务。
vi /etc/krb5.conf
kdc高可用.png
scp /etc/krb5.conf root@slave1:/etc/
scp /etc/krb5.conf root@slave2:/etc/
scp /etc/krb5.conf root@slave3:/etc/
service krb5kdc restart
service kadmin restart
kadmin.local
kadmin.local:  addprinc -randkey host/node1
kadmin.local:  addprinc -randkey host/slave1
kadmin.local:  
kadmin.local:  ktadd host/node1
kadmin.local:  ktadd host/slave1
随机生成秘钥创建同步账号,并使用ktadd命令生成账号的keytab文件,文件默认生成在/etc/krb5.keytab下,多个账号则在krb5.keytab基础上追加
scp /etc/krb5.conf root@slave1:/etc/
scp /etc/krb5.keytab root@slave1:/etc/
scp /var/kerberos/krb5kdc/.k5.W.COM root@slave1:/var/kerberos/krb5kdc/
scp /var/kerberos/krb5kdc/kadm5.acl root@slave1:/var/kerberos/krb5kdc/
scp /var/kerberos/krb5kdc/kdc.conf root@slave1: /var/kerberos/krb5kdc/
[root@slave1 krb5kdc]# vi /var/kerberos/krb5kdc/kpropd.acl 
host/node1@W.COM
host/slave1@W.COM
:wq
kpropd –S

备节点上已经准备好数据传输

kdb5_util dump /var/kerberos/krb5kdc/master.dump
导出成功后生成master.dump和master.dump.dump_ok两个文件。
kprop -f /var/kerberos/krb5kdc/master.dump -d -P 754 slave1
32768 bytes sent.
44358 bytes sent.
Database propagation to slave1: SUCCEEDED
[root@slave1 krb5kdc]# cd /var/kerberos/krb5kdc/
[root@slave1 krb5kdc]# ll
total 104
-rw------- 1 root root 44358 Jun 15 16:19 from_master
-rw------- 1 root root    22 Jun 14 12:10 kadm5.acl
-rw------- 1 root root   439 Jun 14 12:11 kdc.conf
-rw-r--r-- 1 root root    54 Jun 14 12:15 kpropd.acl
-rw------- 1 root root 40960 Jun 15 16:19 principal
-rw------- 1 root root  8192 Jun 15 16:19 principal.kadm5
-rw------- 1 root root     0 Jun 15 09:36 principal.kadm5.lock
-rw------- 1 root root     0 Jun 15 16:19 principal.ok
service krb5kdc start
#不用启动kadmin
service krb5kdc stop
#可用不停kadmin,不影响验证
impala节点.png

impala-shell服务正常

impala-jdbc正常.png

impala-jdbc正常,注意krb5.conf文件要及时更新

vi /var/kerberos/krb5kdc/kprop_sync.sh
#!/bin/bash
source /etc/profile
echo "开始dump数据库文件"
DUMP=/var/kerberos/krb5kdc/master.dump
PORT=754
SLAVE="slave1"
TIMESTAMP=`date`
echo "Start at $TIMESTAMP"
kdb5_util dump $DUMP
kprop -f $DUMP -d -P $PORT $SLAVE
:wq
crontab -e
#每分钟同步一次
* * * * * sh /var/kerberos/krb5kdc/kprop_sync.sh > /var/kerberos/krb5kdc/lastupdate
:wq
上一篇 下一篇

猜你喜欢

热点阅读