192.168.0.2
192.168.0.3 bsw-ubuntu
192.168.0.11 bsw-ubuntu-1
192.168.0.12 bsw-ubuntu-2
192.168.0.13 bsw-ubuntu-3
192.168.0.14 bsw-ubuntu-4
192.168.0.15 bsw-ubuntu-5
192.168.0.16 bsw-ubuntu-6
rm /etc/docker/daemon.json
mkdir /etc/docker
echo '{
"registry-mirrors" : [
"https://docker.mirrors.ustc.edu.cn",
"https://registry.docker-cn.com",
"http://hub-mirror.c.163.com"
],
"log-opts": {"max-size":"5m", "max-file":"3"}
}' > /etc/docker/daemon.json
sudo yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-engine
sudo yum install -y yum-utils
sudo yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
sudo yum install -y docker-ce docker-ce-cli containerd.io
sudo systemctl start docker
sudo systemctl enable docker
sudo curl -L "https://gitee.com/snycloudpub/docker-compose/attach_files/620730/download/docker-compose-Linux-x86_64" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
web-ui ip:8500
主机:
bsw-ubuntu-1
bsw-ubuntu-2
bsw-ubuntu-3
bsw-ubuntu-4
bsw-ubuntu-5
bsw-ubuntu-6
位置:/var/local/consul/
docker-compose.yaml
version: '3.6'
services:
consul:
image: consul:1.10.3
container_name: consul
network_mode: host
restart: always
volumes:
- /etc/consul/:/consul/conf.d/
- ./data/:/data/
command: agent --config-dir=/consul/conf.d/
安装:
docker-compose up -d
bsw-ubuntu-4
bsw-ubuntu-5
bsw-ubuntu-6
位置:/var/local/zookeeper/
docker-compose.yaml
version: '3.7'
# 配置zk集群的
# container services下的每一个子配置都对应一个zk节点的docker container
services:
zk:
# docker container所使用的docker image
image: zookeeper
container_name: zookeeper
# 配置docker container的环境变量
environment:
# 当前zk实例的id
ZOO_MY_ID: 14
# 整个zk集群的机器、端口列表 server.ZOO_MY_ID=host:2888:3888;2181
ZOO_SERVERS: server.14=192.168.0.14:2888:3888;2181 server.15=192.168.0.15:2888:3888;2181 server.16=192.168.0.16:2888:3888;2181
# 将docker container上的路径挂载到宿主机上 实现宿主机和docker container的数据共享
volumes:
- ./data:/data
- ./datalog:/datalog
network_mode: host
restart: always
安装:
docker-compose up -d
bsw-ubuntu-4
bsw-ubuntu-5
bsw-ubuntu-6
位置:/var/local/kafka/
docker-compose.yaml
version: "3.6"
services:
kafka:
user: root
container_name: kafka
image: docker.io/bitnami/kafka:2
network_mode: "host"
restart: always
volumes:
- "./:/bitnami/kafka"
environment:
- KAFKA_BROKER_ID=14
- KAFKA_CFG_ZOOKEEPER_CONNECT=192.168.0.14:2181,192.168.0.15:2181,192.168.0.16:2181
- KAFKA_CFG_DELETE_TOPIC_ENABLE=TRUE
- ALLOW_PLAINTEXT_LISTENER=yes
- KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://192.168.0.14:9092
修改Topic备份
increase-replication-factor.json
{
"version":1,
"partitions":[
{
"topic":"logstash",
"partition":0,
"replicas":[14, 15, 16]
}
]
}
bin/kafka-reassign-partitions.sh --zookeeper 192.168.0.14:2181,192.168.0.15:2181,192.168.0.16:2181 --reassignment-json-file increase-replication-factor.json --execute
web-ui: http://centos7-3:50070/
主机:
centos7-0
centos7-2
centos7-3 (master)
启动:
登录 centos7-3 hadoop 用户
hadoop-2.7.7/sbin/start-all.sh
主机:
centos7-0
centos7-2
centos7-3 (master)
启动:
登录 centos7-3 hadoop 用户
hbase-1.4.13/bin/start-hbase.sh
开启thrift2协议
hbase-1.4.13/bin/hbase-daemon.sh start thrift2