0


基于kubeadm安装1.30版本k8s

实验系统:CentOS7.9

实验环境:单节点虚拟机

配置国内软件源

  1. [root@k8s-master1 ~]# mv /etc/yum.repos.d/* /data
  2. [root@k8s-master1 ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
  3. [root@k8s-master1 ~]# yum makecache

添加host

  1. [root@k8s-master1 ~]# vi /etc/hosts
  2. 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
  3. ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
  4. 172.29.1.101 k8s-master1

关闭防火墙、selinux

  1. [root@k8s-master1 ~]# systemctl stop firewalld && systemctl disable firewalld
  2. [root@k8s-master1 ~]# setenforce 0
  3. [root@k8s-master1 ~]# cat /etc/selinux/config
  4. # This file controls the state of SELinux on the system.
  5. # SELINUX= can take one of these three values:
  6. # enforcing - SELinux security policy is enforced.
  7. # permissive - SELinux prints warnings instead of enforcing.
  8. # disabled - No SELinux policy is loaded.
  9. SELINUX=disabled
  10. # SELINUXTYPE= can take one of three values:
  11. # targeted - Targeted processes are protected,
  12. # minimum - Modification of targeted policy. Only selected processes are protected.
  13. # mls - Multi Level Security protection.
  14. SELINUXTYPE=targeted

关闭swap分区

  1. [root@k8s-master1 ~]# swapoff -a # 临时关闭
  2. [root@k8s-master1 ~]# vim /etc/fstab # 注释到swap那一行 永久关闭
  3. # /etc/fstab: static file system information.
  4. #
  5. # Use 'blkid' to print the universally unique identifier for a
  6. # device; this may be used with UUID= as a more robust way to name devices
  7. # that works even if disks are added and removed. See fstab(5).
  8. #
  9. # <file system> <mount point> <type> <options> <dump> <pass>
  10. # / was on /dev/sda3 during installation
  11. UUID=af1f3f13-f592-42af-a4c1-fa38c19e4fda / ext4 errors=remount-ro 0 1
  12. # /boot/efi was on /dev/sda2 during installation
  13. UUID=0FF3-84A3 /boot/efi vfat umask=0077 0 1
  14. # /swapfile none swap sw 0 0

安装docker

  1. [root@k8s-master1 ~]# cat /etc/yum.repos.d/docker-ce.repo
  2. [docker-ce-stable]
  3. name=Docker CE Stable - $basearch
  4. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/$basearch/stable
  5. enabled=1
  6. gpgcheck=1
  7. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  8. [docker-ce-stable-debuginfo]
  9. name=Docker CE Stable - Debuginfo $basearch
  10. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/debug-$basearch/stable
  11. enabled=0
  12. gpgcheck=1
  13. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  14. [docker-ce-stable-source]
  15. name=Docker CE Stable - Sources
  16. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/source/stable
  17. enabled=0
  18. gpgcheck=1
  19. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  20. [docker-ce-test]
  21. name=Docker CE Test - $basearch
  22. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/$basearch/test
  23. enabled=0
  24. gpgcheck=1
  25. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  26. [docker-ce-test-debuginfo]
  27. name=Docker CE Test - Debuginfo $basearch
  28. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/debug-$basearch/test
  29. enabled=0
  30. gpgcheck=1
  31. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  32. [docker-ce-test-source]
  33. name=Docker CE Test - Sources
  34. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/source/test
  35. enabled=0
  36. gpgcheck=1
  37. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  38. [docker-ce-nightly]
  39. name=Docker CE Nightly - $basearch
  40. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/$basearch/nightly
  41. enabled=0
  42. gpgcheck=1
  43. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  44. [docker-ce-nightly-debuginfo]
  45. name=Docker CE Nightly - Debuginfo $basearch
  46. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/debug-$basearch/nightly
  47. enabled=0
  48. gpgcheck=1
  49. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  50. [docker-ce-nightly-source]
  51. name=Docker CE Nightly - Sources
  52. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/source/nightly
  53. enabled=0
  54. gpgcheck=1
  55. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  56. [root@k8s-master1 ~]# yum install -y docker
  57. [root@k8s-master1 ~]# systemctl start docker && systemctl enable docker

配置镜像加速

  1. [root@k8s-master1 ~]# vi /etc/docker/daemon.json
  2. {
  3. # 配置为自己的阿里镜像加速地址
  4. "registry-mirrors": ["https://8740sp47.mirror.aliyuncs.com"]
  5. }
  6. [root@k8s-master1 ~]# systemctl daemon-reload
  7. [root@k8s-master1 ~]# systemctl restart docker

安装containerd

  1. [root@k8s-master1 ~]# yum install -y containerd
  2. [root@k8s-master1 ~]# mkdir -p /etc/containerd
  3. [root@k8s-master1 ~]# containerd config default | sudo tee /etc/containerd/config.toml
  4. [root@k8s-master1 ~]# vi /etc/containerd/config.toml
  5. # 修改或者添加这个选项
  6. [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
  7. SystemdCgroup = true
  8. # 修改此处替换成阿里云的源
  9. sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.7"
  10. [root@k8s-master1 ~]# systemctl restart containerd
  11. [root@k8s-master1 ~]# systemctl enable containerd

配置Kubernetes仓库 安装kubelet kubeadm kubectl 并锁定版本

  1. [root@k8s-master1 ~]# vi /etc/yum.repos.d/kubernetes.repo
  2. [kubernetes]
  3. name=Kubernetes
  4. baseurl=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/rpm/
  5. enabled=1
  6. gpgcheck=1
  7. gpgkey=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/rpm/repodata/repomd.xml.key
  8. [root@k8s-master1 ~]# yum install -y kubelet kubeadm kubectl

配置主节点

  1. # 替换成当前节点的ip
  2. [root@k8s-master1 ~]# kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address 172.29.1.101 --image-repository registry.aliyuncs.com/google_containers
  3. [root@k8s-master1 ~]# sudo mkdir -p $HOME/.kube
  4. [root@k8s-master1 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  5. [root@k8s-master1 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
  6. [root@k8s-master1 ~]# export KUBECONFIG=/etc/kubernetes/admin.conf

安装网络插件

  1. root@master:~# vi kube-flannel.yaml
  2. ---
  3. kind: Namespace
  4. apiVersion: v1
  5. metadata:
  6. name: kube-flannel
  7. labels:
  8. k8s-app: flannel
  9. pod-security.kubernetes.io/enforce: privileged
  10. ---
  11. kind: ClusterRole
  12. apiVersion: rbac.authorization.k8s.io/v1
  13. metadata:
  14. labels:
  15. k8s-app: flannel
  16. name: flannel
  17. rules:
  18. - apiGroups:
  19. - ""
  20. resources:
  21. - pods
  22. verbs:
  23. - get
  24. - apiGroups:
  25. - ""
  26. resources:
  27. - nodes
  28. verbs:
  29. - get
  30. - list
  31. - watch
  32. - apiGroups:
  33. - ""
  34. resources:
  35. - nodes/status
  36. verbs:
  37. - patch
  38. - apiGroups:
  39. - networking.k8s.io
  40. resources:
  41. - clustercidrs
  42. verbs:
  43. - list
  44. - watch
  45. ---
  46. kind: ClusterRoleBinding
  47. apiVersion: rbac.authorization.k8s.io/v1
  48. metadata:
  49. labels:
  50. k8s-app: flannel
  51. name: flannel
  52. roleRef:
  53. apiGroup: rbac.authorization.k8s.io
  54. kind: ClusterRole
  55. name: flannel
  56. subjects:
  57. - kind: ServiceAccount
  58. name: flannel
  59. namespace: kube-flannel
  60. ---
  61. apiVersion: v1
  62. kind: ServiceAccount
  63. metadata:
  64. labels:
  65. k8s-app: flannel
  66. name: flannel
  67. namespace: kube-flannel
  68. ---
  69. kind: ConfigMap
  70. apiVersion: v1
  71. metadata:
  72. name: kube-flannel-cfg
  73. namespace: kube-flannel
  74. labels:
  75. tier: node
  76. k8s-app: flannel
  77. app: flannel
  78. data:
  79. cni-conf.json: |
  80. {
  81. "name": "cbr0",
  82. "cniVersion": "0.3.1",
  83. "plugins": [
  84. {
  85. "type": "flannel",
  86. "delegate": {
  87. "hairpinMode": true,
  88. "isDefaultGateway": true
  89. }
  90. },
  91. {
  92. "type": "portmap",
  93. "capabilities": {
  94. "portMappings": true
  95. }
  96. }
  97. ]
  98. }
  99. net-conf.json: |
  100. {
  101. "Network": "10.244.0.0/16",
  102. "Backend": {
  103. "Type": "vxlan"
  104. }
  105. }
  106. ---
  107. apiVersion: apps/v1
  108. kind: DaemonSet
  109. metadata:
  110. name: kube-flannel-ds
  111. namespace: kube-flannel
  112. labels:
  113. tier: node
  114. app: flannel
  115. k8s-app: flannel
  116. spec:
  117. selector:
  118. matchLabels:
  119. app: flannel
  120. template:
  121. metadata:
  122. labels:
  123. tier: node
  124. app: flannel
  125. spec:
  126. affinity:
  127. nodeAffinity:
  128. requiredDuringSchedulingIgnoredDuringExecution:
  129. nodeSelectorTerms:
  130. - matchExpressions:
  131. - key: kubernetes.io/os
  132. operator: In
  133. values:
  134. - linux
  135. hostNetwork: true
  136. priorityClassName: system-node-critical
  137. tolerations:
  138. - operator: Exists
  139. effect: NoSchedule
  140. serviceAccountName: flannel
  141. initContainers:
  142. - name: install-cni-plugin
  143. image: docker.io/flannel/flannel-cni-plugin:v1.4.0-flannel1
  144. command:
  145. - cp
  146. args:
  147. - -f
  148. - /flannel
  149. - /opt/cni/bin/flannel
  150. volumeMounts:
  151. - name: cni-plugin
  152. mountPath: /opt/cni/bin
  153. - name: install-cni
  154. image: docker.io/flannel/flannel:v0.24.2
  155. command:
  156. - cp
  157. args:
  158. - -f
  159. - /etc/kube-flannel/cni-conf.json
  160. - /etc/cni/net.d/10-flannel.conflist
  161. volumeMounts:
  162. - name: cni
  163. mountPath: /etc/cni/net.d
  164. - name: flannel-cfg
  165. mountPath: /etc/kube-flannel/
  166. containers:
  167. - name: kube-flannel
  168. image: docker.io/flannel/flannel:v0.24.2
  169. command:
  170. - /opt/bin/flanneld
  171. args:
  172. - --ip-masq
  173. - --kube-subnet-mgr
  174. resources:
  175. requests:
  176. cpu: "100m"
  177. memory: "50Mi"
  178. securityContext:
  179. privileged: false
  180. capabilities:
  181. add: ["NET_ADMIN", "NET_RAW"]
  182. env:
  183. - name: POD_NAME
  184. valueFrom:
  185. fieldRef:
  186. fieldPath: metadata.name
  187. - name: POD_NAMESPACE
  188. valueFrom:
  189. fieldRef:
  190. fieldPath: metadata.namespace
  191. - name: EVENT_QUEUE_DEPTH
  192. value: "5000"
  193. volumeMounts:
  194. - name: run
  195. mountPath: /run/flannel
  196. - name: flannel-cfg
  197. mountPath: /etc/kube-flannel/
  198. - name: xtables-lock
  199. mountPath: /run/xtables.lock
  200. volumes:
  201. - name: run
  202. hostPath:
  203. path: /run/flannel
  204. - name: cni-plugin
  205. hostPath:
  206. path: /opt/cni/bin
  207. - name: cni
  208. hostPath:
  209. path: /etc/cni/net.d
  210. - name: flannel-cfg
  211. configMap:
  212. name: kube-flannel-cfg
  213. - name: xtables-lock
  214. hostPath:
  215. path: /run/xtables.lock
  216. type: FileOrCreate
  1. [root@k8s-master1 ~]# kubectl apply -f kube-flannel.yaml

查看节点状态并加入集群

  1. [root@k8s-master1 ~]# kubectl get nodes
  2. NAME STATUS ROLES AGE VERSION
  3. k8s-master1 Ready control-plane 22m v1.30.0
  4. [root@k8s-master1 ~]# kubeadm token create --print-join-command # 生成加入集群的命令

移除master节点上的污点

  1. [root@k8s-master1 ~]# kubectl taint nodes k8s-master1 node-role.kubernetes.io/control-plane:NoSchedule-
  2. node/k8s-master1 untainted

本文转载自: https://blog.csdn.net/m0_67019144/article/details/138046111
版权归原作者 GoSimplicity 所有, 如有侵权,请联系我们删除。

“基于kubeadm安装1.30版本k8s”的评论:

还没有评论