spark--环境搭建--5.kafka_292-081集群搭建

1. scala安装

$ cd /usr/local

$ tar -zxvf scala-2.11.4.tgz

$ mv scala-2.11.4 scala

$ vi ~/.bashrc

export SCALA_HOME=/usr/local/scala/
export PATH=$PATH:$SCALA_HOME/bin/

$ source ~/.bashrc

$ scala -version

$ scp -r scala root@spark2:/usr/local/

$ scp -r scala root@spark3:/usr/local/

$ scp ~/.bashrc root@spark2:~/.bashrc

$ scp ~/.bashrc root@spark3:~/.bashrc

# 2和3分别执行 source ~/.bashrc

2. kafka安装

$ tar -zxvf kafka_2.9.2-0.8.1.tgz

$ mv kafka_2.9.2-0.8.1 kafka

$ vi kafka/config/server.properties

# 修改
broker.id=0  #(2和3机器分别修改为1和2) zookeeper.connect=192.168.2.100:2181,192.168.2.101:2181,192.168.2.102:2181

$ yum install unzip

$ unzip slf4j-1.7.6.zip

$ cp slf4j-1.7.6/slf4j-nop-1.7.6.jar kafka/libs/

$ rm -rf slf4j-1.7.6

$ scp -r kafka root@spark2:/usr/local/

$ scp -r kafka root@spark3:/usr/local/

# 注意修改2和3的server.properties的配置

3. 启动kafka集群

$ cd kafka

$ nohup bin/kafka-server-start.sh config/server.properties &

$ jps

# 若没启动查看

$ cat nohup.out

# 解决kafka Unrecognized VM option ‘UseCompressedOops’问题(三台机器分别执行)

$ vi bin/kafka-run-class.sh

# 删除红色部分
# JVM performance options if [ -z "$KAFKA_JVM_PERFORMANCE_OPTS" ]; then KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:+UseCompressedOops -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled -XX:+CMSScavengeBeforeRemark -XX:+DisableExplicitGC -Djava.awt.headless=true" fi

$ nohup bin/kafka-server-start.sh config/server.properties &

4. 测试kafka集群

$ bin/kafka-topics.sh –zookeeper 192.168.2.100:2181,192.168.2.101:2181,192.168.2.102:2181 –topic TestTopic –replication-factor 1 –partitions 1 –create

$ bin/kafka-console-producer.sh –broker-list 192.168.2.100:9092,192.168.2.101:9092,192.168.2.102:9092 –topic TestTopic

$ bin/kafka-console-consumer.sh –zookeeper 192.168.2.100:2181,192.168.2.101:2181,192.168.2.102:2181 –topic TestTopic –from-beginning

 

    原文作者:spark
    原文地址: https://www.cnblogs.com/p---k/p/8600412.html
    本文转自网络文章,转载此文章仅为分享知识,如有侵权,请联系博主进行删除。
点赞