0


k8s部署zookeeper集群(3节点,1个leader,2个follower)

前言

环境:

centos 7.9 k8s集群

在k8s上面安装zookeeper集群,我们还是按照k8s的官方文档来安装吧,这样比较好,网上有各种各样的安装方式,这里使用

https://kubernetes.io/docs/tutorials/stateful-application/zookeeper/

k8s的官方文档来安装。

使用k8s官方安装文档安装zookeeper集群

#下载k8s官网的zk的yaml文件,也可以自己去https://kubernetes.io/docs/tutorials/stateful-application/zookeeper/上面自己查看wget https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/application/zookeeper/zookeeper.yaml

#查看zookeeper.yamlcat zookeeper.yaml文件会发现有一个PodDisruptionBudget资源,需要限定k8s集群中zookeeper集群最小pod数,通过PDB定义最大失效数;
还有官网的镜像下载不下来,可以拉取下面的这个镜像,然后再大标签即可:
docker pull mirrorgooglecontainers/kubernetes-zookeeper:1.0-3.4.10
docker tag mirrorgooglecontainers/kubernetes-zookeeper:1.0-3.4.10 registry.k8s.io/kubernetes-zookeeper:1.0-3.4.10
docker rmi mirrorgooglecontainers/kubernetes-zookeeper:1.0-3.4.10
#编辑zookeeper.yaml文件,我们需要根据我们实际情况做一下修改vim zookeeper.yaml
apiVersion: v1
kind: Service                #创建了一个无头server,名称叫zk-hs,目标端口是2888和3888
metadata:
  name: zk-hs
  labels:
    app: zk
spec:
  ports:
  - port: 2888
    name: server
  - port: 3888
    name: leader-election
  clusterIP: None
  selector:
    app: zk
---
apiVersion: v1
kind: Service            #还创建了一个client的service
metadata:
  name: zk-cs
  labels:
    app: zk
spec:
  ports:
  - port: 2181
    name: client
  selector:
    app: zk
---
apiVersion: policy/v1
kind: PodDisruptionBudget    #定义PodDisruptionBudget资源
metadata:
  name: zk-pdb
spec:
  selector:
    matchLabels:
      app: zk
  maxUnavailable: 1#最大不可用数1
---
apiVersion: apps/v1            #使用sts启动部署的zk集群
kind: StatefulSet    
metadata:
  name: zk
spec:
  selector:
    matchLabels:
      app: zk
  serviceName: zk-hs
  replicas: 3#3个副本
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: OrderedReady
  template:
    metadata:
      labels:
        app: zk
    spec:
#      affinity:                #这里pod反亲和性被我注释掉了,因为我只有2个node节点#        podAntiAffinity:#          requiredDuringSchedulingIgnoredDuringExecution:#            - labelSelector:#                matchExpressions:#                  - key: "app"#                    operator: In#                    values:#                    - zk#              topologyKey: "kubernetes.io/hostname"
      containers:
      - name: kubernetes-zookeeper
        imagePullPolicy: IfNotPresent        #镜像拉取策略原来是Always,改为IfNotPresent
        image: "registry.k8s.io/kubernetes-zookeeper:1.0-3.4.10"#这个镜像拉取不下来,等下我们换个镜像
        resources:
          requests:
            memory: "300M"#这里原本是1Gi的,被我改成300M
            cpu: "0.5"
        ports:
        - containerPort: 2181
          name: client
        - containerPort: 2888
          name: server
        - containerPort: 3888
          name: leader-election
        command:
        - sh
        - -c
        - "start-zookeeper \
          --servers=3 \
          --data_dir=/var/lib/zookeeper/data \
          --data_log_dir=/var/lib/zookeeper/data/log \
          --conf_dir=/opt/zookeeper/conf \
          --client_port=2181 \
          --election_port=3888 \
          --server_port=2888 \
          --tick_time=2000 \
          --init_limit=10 \
          --sync_limit=5 \
          --heap=512M \
          --max_client_cnxns=60 \
          --snap_retain_count=3 \
          --purge_interval=12 \
          --max_session_timeout=40000 \
          --min_session_timeout=4000 \
          --log_level=INFO"
        readinessProbe:
          exec:
            command:
            - sh
            - -c
            - "zookeeper-ready 2181"
          initialDelaySeconds: 10
          timeoutSeconds: 5
        livenessProbe:
          exec:
            command:
            - sh
            - -c
            - "zookeeper-ready 2181"
          initialDelaySeconds: 10
          timeoutSeconds: 5
        volumeMounts:
        - name: datadir
          mountPath: /var/lib/zookeeper
      securityContext:
        runAsUser: 1000
        fsGroup: 1000
  volumeClaimTemplates:
  - metadata:
      name: datadir
    spec:
      accessModes: ["ReadWriteOnce"]
      resources:
        requests:
          storage: 500M                            #这里原来是10G的,被我改成500M
      storageClassName: "nfs-storageclass"#这句是我新加的,指定使用的存储类[root@matser zookeeper-cluster]# #部署zookeeper集群
kubectl  apply  -f zookeeper.yaml 
#查看状态,均是running
kubectl  get pods -l app=zk
NAME   READY   STATUS    RESTARTS   AGE
zk-0   1/1     Running   0          6m54s
zk-1   1/1     Running   0          6m31s
zk-2   1/1     Running   0          6m19s

验证zookeeper是否正常

#先来看下3个zookeeper的pod完整主机名是什么[root@matser /]# for i in 0 1 2;do kubectl exec zk-$i -n default -- hostname -f;done
zk-0.zk-hs.default.svc.cluster.local
zk-1.zk-hs.default.svc.cluster.local
zk-2.zk-hs.default.svc.cluster.local
[root@matser /]# #我们发现,在其他pod直接ping zk-0这个pod的主机名,可以ping通[root@matser /]# kubectl  exec -it deployment-busybox-567674bd67-lklvf -- ping zk-0.zk-hs.default.svc.cluster.local
PING zk-0.zk-hs.default.svc.cluster.local (10.244.166.134): 56 data bytes    #64 bytes from 10.244.166.134: seq=0ttl=63time=0.071 ms
[root@matser /]# kubectl  get pods -o wide
NAME         READY   STATUS    RESTARTS      AGE     IP                  NODE    NOMINATED NODE   READINESS GATES

zk-0         1/1     Running   0             36m     10.244.166.134   node1   <none><none>[root@matser /]# 
#查看3个zookeeper节点的角色[root@matser /]# for i in 0 1 2;do kubectl exec zk-$i -n default -- zkServer.sh status;done
ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Mode: follower
ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Mode: follower
ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Mode: leader
[root@matser /]##查看myid[root@matser /]# for i in 0 1 2;do echo -n "zk-$i " ;kubectl exec zk-$i -n default -- cat /var/lib/zookeeper/data/myid;done
zk-0 1
zk-1 2
zk-2 3
#查看zookeeper的配置文件[root@matser /]# kubectl exec -it -n default  zk-0 -- cat /opt/zookeeper/conf/zoo.cfg#This file was autogenerated DO NOT EDITclientPort=2181dataDir=/var/lib/zookeeper/data
dataLogDir=/var/lib/zookeeper/data/log
tickTime=2000initLimit=10syncLimit=5maxClientCnxns=60minSessionTimeout=4000maxSessionTimeout=40000
autopurge.snapRetainCount=3
autopurge.purgeInteval=12
server.1=zk-0.zk-hs.default.svc.cluster.local:2888:3888
server.2=zk-1.zk-hs.default.svc.cluster.local:2888:3888
server.3=zk-2.zk-hs.default.svc.cluster.local:2888:3888
[root@matser /]# 

验证zookeeper集群可用性

#进入容器
kubectl exec -it -n default  zk-0 -- bash    
zookeeper@zk-0:/$ zkCli.sh                                    #登录,直接回车    [zk: localhost:2181(CONNECTED)11] create /zk-test hdfdf    #创建一个节点并写入数据[zk: localhost:2181(CONNECTED)11] get  /zk-test            #查看节点
hdfdf                #这是数据
cZxid = 0x100000003
ctime = Wed Nov 09 10:55:15 UTC 2022
mZxid = 0x100000003
mtime = Wed Nov 09 10:55:15 UTC 2022
pZxid = 0x100000003
cversion =0
dataVersion =0
aclVersion =0
ephemeralOwner = 0x0
dataLength =5
numChildren =0[zk: localhost:2181(CONNE
#登录其他zk节点,能看到我们上面创建的节点数据,说明zk集群是正常的
kubectl exec -it -n default  zk-1 -- bash    
zkCli.sh
[zk: localhost:2181(CONNECTED)0] get /zk-test
hdfdf
cZxid = 0x100000003
ctime = Wed Nov 09 10:55:15 UTC 2022
mZxid = 0x100000003
mtime = Wed Nov 09 10:55:15 UTC 2022
pZxid = 0x100000003
cversion =0
dataVersion =0
aclVersion =0
ephemeralOwner = 0x0
dataLength =5
numChildren =0[zk: localhost:2181(CONNECTED)1]

删掉pod,模拟pod挂掉验证集群是否正常

[root@matser /]# kubectl  delete  pods zk-2 zk-0
pod "zk-2" deleted
pod "zk-0" deleted
[root@matser /]# kubectl  get pods -l app=zk
NAME   READY   STATUS    RESTARTS   AGE
zk-0   1/1     Running   0          25s
zk-1   1/1     Running   0          63m
zk-2   0/1     Running   0          4s

#验证集群状态(正常)[root@matser /]# for i in 0 1 2;do kubectl exec zk-$i -n default -- zkServer.sh status;done
ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Mode: follower
ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Mode: leader
ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Mode: follower
[root@matser /]# #验证我们上面创建的节点是否还存在,数据是否还存在
kubectl exec -it -n default  zk-2 -- bash#进入pod的容器
zookeeper@zk-2:/$ zkCli.sh                    #登录zk集群[zk: localhost:2181(CONNECTED)0]ls /        #节点还在[zk-test, zookeeper][zk: localhost:2181(CONNECTED)1] get /zk-test        #数据也在
hdfdf
cZxid = 0x100000003
ctime = Wed Nov 09 10:55:15 UTC 2022
mZxid = 0x100000003
mtime = Wed Nov 09 10:55:15 UTC 2022
pZxid = 0x100000003
cversion =0
dataVersion =0
aclVersion =0
ephemeralOwner = 0x0
dataLength =5
numChildren =0[zk: localhost:2181(CONNECTED)2]

本文转载自: https://blog.csdn.net/MssGuo/article/details/127773132
版权归原作者 MssGuo 所有, 如有侵权,请联系我们删除。

“k8s部署zookeeper集群(3节点,1个leader,2个follower)”的评论:

还没有评论