隶属于文章系列:大数据安全实战 https://www.jianshu.com/p/76627fd8399c
过程
- 部署zookeeper集成Kerberos
- 创建Kerberos的principle
- 修改hbase-site.xml
- 分发文件
- 启动hbase
zookeeper集成Kerberos
在本文之前已经部署了zookeeper集成Kerberos。参考:zookeeper集成Kerberos
zookeeper集成Kerberos
kadmin.local -q "addprinc -randkey hbase/v-hadoop-kbds.sz.kingdee.net"
kadmin.local -q "addprinc -randkey hbase/v-hadoop2-kbds.sz.kingdee.net "
kadmin.local -q "addprinc -randkey hbase/v-hadoop3-kbds.sz.kingdee.net "
kadmin.local -q "addprinc -randkey hbase/v-hadoop4-kbds.sz.kingdee.net "
kadmin.local -q "addprinc -randkey hbase/v-hadoop5-kbds.sz.kingdee.net "
kadmin.local -q "ktadd -k /etc/hadoop/conf/hbase-service.keytab hbase/v-hadoop-kbds.sz.kingdee.net"
kadmin.local -q "ktadd -k /etc/hadoop/conf/hbase-service.keytab hbase/v-hadoop2-kbds.sz.kingdee.net "
kadmin.local -q "ktadd -k /etc/hadoop/conf/hbase-service.keytab hbase/v-hadoop3-kbds.sz.kingdee.net "
kadmin.local -q "ktadd -k /etc/hadoop/conf/hbase-service.keytab hbase/v-hadoop4-kbds.sz.kingdee.net "
kadmin.local -q "ktadd -k /etc/hadoop/conf/hbase-service.keytab hbase/v-hadoop5-kbds.sz.kingdee.net "
修改hbase-site.xml
<property>
<name>hbase.security.authorization</name>
<value>true</value>
</property>
<property>
<name>hbase.security.authentication</name>
<value>kerberos</value>
</property>
<property>
<name>hbase.rpc.engine</name>
<value>org.apache.hadoop.hbase.ipc.SecureRpcEngine</value>
</property>
<property>
<name>hbase.regionserver.kerberos.principal</name>
<value>hbase/_HOST@TT.COM</value>
</property>
<property>
<name>hbase.regionserver.keytab.file</name>
<value>/etc/hadoop/conf/hbase-service.keytab</value>
</property>
<property>
<name>hbase.master.kerberos.principal</name>
<value>hbase/_HOST@TT.COM</value>
</property>
<property>
<name>hbase.master.keytab.file</name>
<value>/etc/hadoop/conf/hbase-service.keytab</value>
</property>
分发文件
- 分发keytab文件
ansible hadoop --become -m copy -a “src=/etc/hadoop/conf/hbase-service.keytab dest=/etc/hadoop/conf/hbase-service.keytab”
- 分发 hadoop的core-site.xml hdfs-site.xml 到hbase的conf下
如果hdfs配置了ha,则hbase-site.xml 中需配置:
<property>
<name>hbase.rootdir</name>
<value>hdfs://cluster:8020/hbase</value>
</property>
这样需要hadoop的配置文件才能知道cluster代表什么。
ansible hadoop -m copy -a "src=/mnt/kbdsproject/hadoop/etc/hadoop/core-site.xml dest=/mnt/kbdsproject/hbase/conf/core-site.xml "
ansible hadoop -m copy -a "src=/mnt/kbdsproject/hadoop/etc/hadoop/hdfs-site.xml dest=/mnt/kbdsproject/hbase/conf/hdfs-site.xml "
- 修改 hbase-env.sh
export HBASE_PID_DIR=/var/hbase/pids
ansible hadoop -m shell --become -a "mkdir -p /var/hbase/pids "
ansible hadoop -m shell --become -a "chown hadoop:hadoop /var/hbase/pids "
ansible hadoop -m copy -a "src=/mnt/kbdsproject/hbase/conf/hbase-env.sh dest=/mnt/kbdsproject/hbase/conf/hbase-env.sh "
启动
/mnt/kbdsproject/hbase/bin/start-hbase.sh
/mnt/kbdsproject/hbase/bin/stop-hbase.sh
#启动所有regionserver
/mnt/kbdsproject/hbase/bin/hbase-daemons.sh start regionserver
# 启动单个regionserver
/mnt/kbdsproject/hbase/bin/hbase-daemon.sh start regionserver
/mnt/kbdsproject/hbase/bin/hbase-daemon.sh start HMaster