0


K8S安装网络插件flannel问题修复(cni plugin not initialized)

K8S安装网络插件问题修复

卸载flannel网络步骤:

#第一步,在master节点删除flannel

kubectl delete -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl delete -f kube-flannel.yml

#第二步,在node节点清理flannel网络留下的文件

ifconfig cni0 down
ip link delete cni0
ifconfig flannel.1 down
ip link delete flannel.1
rm -rf /var/lib/cni/
rm -f /etc/cni/net.d/*

注:执行完上面的操作,重启kubelet

ifconfig cni0 down
ip link delete cni0

[root@master ~]# ifconfig vethb22xxxxx down (只复制@前面的串就行)
[root@master ~]# ip link delete vethb22xxxxx


[root@master ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 52:54:00:a5:74:7f brd ff:ff:ff:ff:ff:ff
    altname enp0s5
    altname ens5
    inet 123.12.0.10/24 brd 123.12.0.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
3: docker0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default
    link/ether 02:42:21:50:e2:be brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
       valid_lft forever preferred_lft forever
19: vethc12d83a@if18: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue master docker0 state UP group default
    link/ether b6:0c:87:e3:4c:fc brd ff:ff:ff:ff:ff:ff link-netnsid 0
86: vethb22xxxxx@if2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue state UP group default
    link/ether d6:4b:f8:fb:8f:38 brd ff:ff:ff:ff:ff:ff link-netns cni-c4dc8844-6897-7862-5367-f7ef4b6acc90

#ip a 命令 看不到cni0的问题

cni0找不到,是因为本节点上没有运行的pod,在该节点上运行一个pod就出来了

查看kubelet日志

journalctl -xefu kubelet

#异常,error: cni plugin not initialized

Nov 07 16:12:56 VM-0-5-centos kubelet[2278204]: E1107 16:12:56.747955 2278204 kubelet.go:2855] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized"
kubectl apply -f kube-flannel.yml
#10-flannel.conflist 这个文件有时候初始化有问题,需要自己补上

参考:k0s错误cni plugin not initialized

cat <<EOL > /etc/cni/net.d/10-flannel.conflist 
{
  "name": "cbr0",
  "cniVersion": "0.3.1",
  "plugins": [
    {
      "type": "flannel",
      "delegate": {
        "hairpinMode": true,
        "isDefaultGateway": true
      }
    },
    {
      "type": "portmap",
      "capabilities": {
        "portMappings": true
      }
    }
  ]
}
EOL

#查看conflist

cat /etc/cni/net.d/10-flannel.conflist
ifconfig cni0

修改containerd的镜像endpoint

编辑vim /etc/crictl.yaml
编辑/etc/crictl.yaml文件, 修改, 主要是新版本增加的image-endpoint

runtime-endpoint: "unix:///run/containerd/containerd.sock"
image-endpoint: "unix:///run/containerd/containerd.sock" #与上边runtime-endpoint一致即可
timeout: 10
debug: false
pull-image-on-create: false
disable-pull-on-run: false

加载
systemctl daemon-reload
systemctl restart kubelet

crictl image

配置 containerd cgroup 驱动程序 systemd(所有节点)

原文链接:Kubernetes 1.24 高可用集群部署

kubernets 自v 1.24.0 后,就不再使用 docker.shim,替换采用 containerd 作为容器运行时端点。因此需要安装 containerd(在 docker 的基础下安装),上面安装 docker 的时候就自动安装了 containerd 了。这里的 docker 只是作为客户端而已。容器引擎还是 containerd。

cat /etc/containerd/config.toml | grep -n**"SystemdCgroup"**
sed -i 's#SystemdCgroup = false#SystemdCgroup = true#g' /etc/containerd/config.toml

应用所有更改后,重新启动containerd

systemctl restart containerd

如果该文件没有,则需要生成一下,并且images也需要修改

生成 containerd 的默认配置文件

containerd config default > /etc/containerd/config.toml

查看 sandbox 的默认镜像仓库在文件中的第几行

cat /etc/containerd/config.toml | grep -n "sandbox_image"

使用 vim 编辑器 定位到 sandbox_image,将 仓库地址修改成 registry.aliyuncs.com/google_containers/pause:3.6

vim /etc/containerd/config.toml
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.6"

重启 containerd 服务

systemctl daemon-reload
systemctl restart containerd.service

#加入节点

kubeadm join 123.12.0.23:6443 --token nacoen.xxxxxxxxxxx
--discovery-token-ca-cert-hash sha256:xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

#以前加入过,有异常

error execution phase preflight: [preflight] Some fatal errors occurred:
[ERROR FileAvailable--etc-kubernetes-kubelet.conf]: /etc/kubernetes/kubelet.conf already exists
[ERROR FileAvailable--etc-kubernetes-pki-ca.crt]: /etc/kubernetes/pki/ca.crt already exists

直接删除

rm -f /etc/kubernetes/kubelet.conf
rm -f /etc/kubernetes/pki/ca.crt
将主节点的.kube目录复制过来,再重新加入

#加入超时

[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
[kubelet-check] Initial timeout of 40s passed.

参考:https://blog.csdn.net/gs80140/article/details/92798027
swapoff -a # will turn off the swap
** kubeadm reset**
systemctl daemon-reload
systemctl restart kubelet
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X # will reset iptables

重置之后再重新加入

scp -rP 你的sshd端口号(默认是22) root@123.12.0.23:/root/.kube /root/.kube

从节点镜像

#kube-flannel.yml也会用到镜像,可根据网络情况调整配置

cat kube-flannel.yml |grep image
image: docker.io/flannel/flannel-cni-plugin:v1.2.0
image: docker.io/flannel/flannel:v0.22.3
image: docker.io/flannel/flannel:v0.22.3

#从节点的镜像

[root@node02 ~]# crictl images
IMAGE                                                TAG                 IMAGE ID            SIZE
docker.io/flannel/flannel-cni-plugin                 v1.2.0            xxxxxxxxxxxxxxx      3.88MB
docker.io/flannel/flannel                            v0.22.3           xxxxxxxxxxxxxxx      27MB
registry.aliyuncs.com/google_containers/kube-proxy   v1.28.2           xxxxxxxxxxxxxxx      24.6MB
registry.aliyuncs.com/google_containers/pause        3.6               xxxxxxxxxxxxxxx      302kB

重启服务,查看状态

systemctl restart --now kubelet
systemctl status kubelet.service --now

#主节点启动后install-cni-plugin install-cni 这两个容器貌似启动了一下就退出,估计是为了把kube-flannel等其它容器 拉起来


[root@VM-0-5-centos ~]# crictl ps -a
CONTAINER           IMAGE               CREATED              STATE               NAME                 ATTEMPT        POD ID              POD
xxxxxxxxxxxx      xxxxxxxxxx           35 seconds ago       Running             kube-flannel              0             xxxxxxxxxxx      kube-flannel-ds-swtg5
xxxxxxxxxxxx      xxxxxxxxxx           36 seconds ago       Exited              install-cni               0             xxxxxxxxxxx      kube-flannel-ds-swtg5
xxxxxxxxxxxx      xxxxxxxxxx           36 seconds ago       Exited              install-cni-plugin        0             xxxxxxxxxxx      kube-flannel-ds-swtg5
xxxxxxxxxxxx      xxxxxxxxxx           54 seconds ago       Running             kube-proxy                0             xxxxxxxxxxx      kube-proxy-9rt8f
xxxxxxxxxxxx      xxxxxxxxxx           About a minute ago   Running             kube-controller-manager   1             xxxxxxxxxxx      kube-controller-manager-master01
xxxxxxxxxxxx      xxxxxxxxxx           About a minute ago   Running             kube-scheduler            1             xxxxxxxxxxx      kube-scheduler-master01
xxxxxxxxxxxx      xxxxxxxxxx           About a minute ago   Running             etcd                      1             xxxxxxxxxxx      etcd-master01
xxxxxxxxxxxx      xxxxxxxxxx           About a minute ago   Running             kube-apiserver            1             xxxxxxxxxxx  

测试创建一个临时pod

kubectl create deployment testapp --image=nginx -n kube-public
kubectl describe deployment.apps -n kube-public
kubectl describe replicaset.apps -n kube-public
kubectl describe pod/testapp-xxxxxxxxxx-2qh9n -n kube-public
kubectl exec -it pod名 -c 容器名 bash/sh #区别于docker exec只能登入本机的docker
#如果pod中只有一个容器,则无需-c指定
kubectl exec -it pod名 -c 容器名 -- 命令 #可以实现不进入容器执行 -- 后的命令
kubectl exec -it testapp-xxxxxxxxxx-2qh9n /bin/bash

标签: linux 服务器 运维

本文转载自: https://blog.csdn.net/alksjdfp32r/article/details/138306146
版权归原作者 crabdave123 所有, 如有侵权,请联系我们删除。

“K8S安装网络插件flannel问题修复(cni plugin not initialized)”的评论:

还没有评论