0


K8s之DashBoard

K8s之DashBoard

文章目录

一. 部署Dashboard

1. 介绍

  • 在kubernetes中完成的所有操作都是通过命令行工具kubectl完成的。
  • 为了提供更丰富的用户体验,k8s还开发了一个基于web的用户界面(Dashboard)。
  • 用户可以使用Dashboard部署容器化的应用,还可以监控应用的状态,执行故障排查以及管理k8s中各种资源。

2. 下载yaml,并运行Dashboard

# 下载yaml
[root@k8s-master ~]# wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml

# 修改kubernetes-dashboard的Service类型
# 修改kubernetes-dashboard的Service类型
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:type: NodePort  # 新增
  ports:- port:443
      targetPort:8443
      nodePort:30001  # 新增
  selector:
    k8s-app: kubernetes-dashboard
  • 文件如下
[root@k8s-master ~]# cat dashboard.yml 
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0(the "License");
# you may notuse this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed toin writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

apiVersion: v1
kind: Namespace
metadata:name: kubernetes-dashboard

---

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:type: NodePort
  ports:- port:443
      targetPort:8443
      nodePort:30001
  selector:
    k8s-app: kubernetes-dashboard

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kubernetes-dashboard
type: Opaque

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kubernetes-dashboard
type: Opaque
data:
  csrf:""---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kubernetes-dashboard
type: Opaque

---

kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kubernetes-dashboard

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
rules:
  # Allow Dashboard toget, update anddelete Dashboard exclusive secrets.
  - apiGroups: [""]
    resources: ["secrets"]
    resourceNames: ["kubernetes-dashboard-key-holder","kubernetes-dashboard-certs","kubernetes-dashboard-csrf"]
    verbs: ["get","update","delete"]
    # Allow Dashboard togetand update 'kubernetes-dashboard-settings' config map.
  - apiGroups: [""]
    resources: ["configmaps"]
    resourceNames: ["kubernetes-dashboard-settings"]
    verbs: ["get","update"]
    # Allow Dashboard toget metrics.
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["heapster","dashboard-metrics-scraper"]
    verbs: ["proxy"]
  - apiGroups: [""]
    resources: ["services/proxy"]
    resourceNames: ["heapster","http:heapster:","https:heapster:","dashboard-metrics-scraper","http:dashboard-metrics-scraper"]
    verbs: ["get"]

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper toget metrics from the Metrics server
  - apiGroups: ["metrics.k8s.io"]
    resources: ["pods","nodes"]
    verbs: ["get","list","watch"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:- kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:name: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kubernetes-dashboard
subjects:- kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas:1
  revisionHistoryLimit:10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      securityContext:
        seccompProfile:type: RuntimeDefault
      containers:-name: kubernetes-dashboard
          image: kubernetesui/dashboard:v2.7.0
          imagePullPolicy: Always
          ports:- containerPort:8443
              protocol: TCP
          args:---auto-generate-certificates
            ---namespace=kubernetes-dashboard
            # Uncomment the following lineto manually specify Kubernetes API server Host
            # Ifnot specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # ---apiserver-host=http://my-address:port
          volumeMounts:-name: kubernetes-dashboard-certs
              mountPath:/certs
              # Create on-disk volume to store exec logs
            - mountPath:/tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path:/
              port:8443
            initialDelaySeconds:30
            timeoutSeconds:30
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser:1001
            runAsGroup:2001
      volumes:-name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        -name: tmp-volume
          emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      nodeSelector:"kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:-key: node-role.kubernetes.io/master
          effect: NoSchedule

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  ports:- port:8000
      targetPort:8000
  selector:
    k8s-app: dashboard-metrics-scraper

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  replicas:1
  revisionHistoryLimit:10
  selector:
    matchLabels:
      k8s-app: dashboard-metrics-scraper
  template:
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
    spec:
      securityContext:
        seccompProfile:type: RuntimeDefault
      containers:-name: dashboard-metrics-scraper
          image: kubernetesui/metrics-scraper:v1.0.8
          ports:- containerPort:8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path:/
              port:8000
            initialDelaySeconds:30
            timeoutSeconds:30
          volumeMounts:- mountPath:/tmp
            name: tmp-volume
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser:1001
            runAsGroup:2001
      serviceAccountName: kubernetes-dashboard
      nodeSelector:"kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:-key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:-name: tmp-volume
          emptyDir: {}
[root@k8s-master ~]# 
  • 部署
[root@k8s-master ~]# kubectl apply -f dashboard.yml 
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created
[root@k8s-master ~]# 

# 查看namespace下的kubernetes-dashboard下的资源
[root@k8s-master ~]# kubectl get pod,svc -n kubernetes-dashboard
NAME                                             READY   STATUS    RESTARTS   AGE
pod/dashboard-metrics-scraper-64bcc67c9c-7kg5r   1/1     Running   0115s
pod/kubernetes-dashboard-5c8bd6b59-4phrf         1/1     Running   0115s

NAMETYPE        CLUSTER-IPEXTERNAL-IP   PORT(S)         AGE
service/dashboard-metrics-scraper   ClusterIP   10.110.249.184<none>8000/TCP        115s
service/kubernetes-dashboard        NodePort    10.97.68.225<none>443:30001/TCP   115s
[root@k8s-master ~]# 
  
  
[root@k8s-master ~]# kubectl get pods -n kubernetes-dashboard -o wide
NAME                                         READY   STATUS    RESTARTS   AGE     IP             NODE        NOMINATED NODE   READINESS GATES
dashboard-metrics-scraper-64bcc67c9c-7kg5r   1/1     Running   05m31s   10.244.1.133   k8s-node1   <none><none>
kubernetes-dashboard-5c8bd6b59-4phrf         1/1     Running   05m31s   10.244.2.153   k8s-node2   <none><none>
[root@k8s-master ~]# 

3. 创建访问账户,获取token

创建服务帐户
[root@k8s-master dashboard]# cat user.yml 
apiVersion: v1
kind: ServiceAccount
metadata:name: admin
  namespace: kubernetes-dashboard

创建集群RoleBinding
[root@k8s-master dashboard]# cat ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:name: admin
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:- kind: ServiceAccount
  name: admin
  namespace: kubernetes-dashboard
[root@k8s-master dashboard]# 
[root@k8s-master dashboard]# kubectl apply -f user.yml 
serviceaccount/admin created
[root@k8s-master dashboard]# kubectl apply -f ClusterRoleBinding 
clusterrolebinding.rbac.authorization.k8s.io/admin created
[root@k8s-master dashboard]# 
[root@k8s-master dashboard]# kubectl get-f user.yml NAME    SECRETS   AGE
admin   026s
[root@k8s-master dashboard]# kubectl get-f ClusterRoleBinding NAME    ROLE                        AGE
admin   ClusterRole/cluster-admin   28s
[root@k8s-master dashboard]# 

# 创建账号token
[root@k8s-master dashboard]# kubectl -n kubernetes-dashboard create token admin
eyJhbGciOiJSUzI1NiIsImtpZCI6IkNCaENTR3BMRlhEckRlWmduYXNZaXktUVdqbWNOYWNTbnRVSTU2Q3RxWXcifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNjYzNTk3MDAyLCJpYXQiOjE2NjM1OTM0MDIsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbiIsInVpZCI6IjI4YzZiNDhiLWY4MzEtNDYxNS1hNWIzLTc1MmVjYjZlYzIyZiJ9fSwibmJmIjoxNjYzNTkzNDAyLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4ifQ.QKJ4k1mWtFMCvrb3pg85Rzor3IAK9Y968sVu_Wg9cJ8DW6cKIDi3thFdvFWC3OmDHQMtxPrBwMRoKvCWlKpQ_kn4EU1eQGTNQJjStz9_J2N-thdpd1tROG-SSkUENWt173ob4bR_PPpKsEjske-kgtQEkuFbaXsq9wVXOMwIaFLixfRu5MqcaVSjbjMZDp2TYSCiG2PCA0Wqh8iD3fto9k9xinL-wMSxc-QU-lT_LU3oYOXxWUOd-klYPEm6jhimlxbfRy4UWDeZh9U4P_Lg9kKjgD7gvFkm0ReUZBKZshN50bBC0jWFCbbuZ1tbOvHZps57gK7tEQIqpHcDkiJsfw
[root@k8s-master dashboard]# 

[root@k8s-master ~]# kubectl get serviceaccount -n kubernetes-dashboard
NAME                   SECRETS   AGE
dashboard-admin        05m20s
default                024m
kubernetes-dashboard   024m
[root@k8s-master ~]# 

删除用户 
[root@k8s-master ~]# kubectl delete serviceaccount dashboard-admin -n kubernetes-dashboard
serviceaccount "dashboard-admin" deleted
[root@k8s-master ~]# 
  
  
删除授权
[root@k8s-master ~]# kubectl delete clusterrolebinding dashboard-admin-rb 
clusterrolebinding.rbac.authorization.k8s.io "dashboard-admin-rb" deleted
[root@k8s-master ~]#   
  • 删除admin ServiceAccount和ClusterRoleBinding。
kubectl -n kubernetes-dashboard delete serviceaccount admin
kubectl -n kubernetes-dashboard delete clusterrolebinding admin

4. 通过浏览器访问Dashboard的UI

  • 在登录页面上输入上面的token
  • Kubernetes Dashboard

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-fZSExrkw-1663604770297)(C:/Users/Administrator/AppData/Roaming/Typora/typora-user-images/image-20220919211841122.png)]

  • 出现下面的页面代表成功

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-wpax1umw-1663604770298)(C:/Users/Administrator/AppData/Roaming/Typora/typora-user-images/image-20220919212113704.png)]

二. 使用DashBoard

1. 先创建一个pod查看

[root@k8s-master manifest]# cat httpd.yml 
apiVersion: v1
kind: Pod
metadata:name: pod-pullimage
  namespace: dev
  labels:
    app: httpdlab
spec:
  nodeName: k8s-node2
  containers:-name: httpd
    image: httpd:latest
    imagePullPolicy: IfNotPresent
[root@k8s-master manifest]# 
[root@k8s-master manifest]# kubectl apply -f httpd.yml 
pod/pod-pullimage created
[root@k8s-master manifest]# kubectl get-f httpd.yml 
NAME            READY   STATUS    RESTARTS   AGE
pod-pullimage   1/1     Running   02s
[root@k8s-master manifest]# kubectl get pods
No resources found in default namespace.
[root@k8s-master manifest]# kubectl get pods -n dev
NAME            READY   STATUS    RESTARTS   AGE
pod-pullimage   1/1     Running   015s

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-TkTOtYRp-1663604770299)(C:/Users/Administrator/AppData/Roaming/Typora/typora-user-images/image-20220919212715647.png)]

  • 右下角

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-UnsnyGBE-1663604770300)(C:/Users/Administrator/AppData/Roaming/Typora/typora-user-images/image-20220919212948492.png)]

  • 删除

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-Dxg0pMNm-1663604770301)(C:/Users/Administrator/AppData/Roaming/Typora/typora-user-images/image-20220919213031251.png)]

2. 创建deploy

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-2nzzvUhY-1663604770302)(C:/Users/Administrator/AppData/Roaming/Typora/typora-user-images/image-20220919213658784.png)]

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-2Dwcd8cW-1663604770302)(C:/Users/Administrator/AppData/Roaming/Typora/typora-user-images/image-20220919223255859.png)]

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-PUDUPfqg-1663604770304)(C:/Users/Administrator/AppData/Roaming/Typora/typora-user-images/image-20220919213941598.png)]

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-NX9i2bLc-1663604770304)(C:/Users/Administrator/AppData/Roaming/Typora/typora-user-images/image-20220919214246686.png)]

  • 点击高级选项

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-DTPTVmfL-1663604770305)(C:/Users/Administrator/AppData/Roaming/Typora/typora-user-images/image-20220919220020123.png)]

  • 部署

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-GX1sx9QZ-1663604770306)(C:/Users/Administrator/AppData/Roaming/Typora/typora-user-images/image-20220919223325010.png)]

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-OV8oGAxW-1663604770306)(C:/Users/Administrator/AppData/Roaming/Typora/typora-user-images/image-20220919223400541.png)]

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-Y47pvwsM-1663604770307)(C:/Users/Administrator/AppData/Roaming/Typora/typora-user-images/image-20220919224712534.png)]

[root@k8s-master manifest]# curl 10.104.120.92:8888<html><body><h1>It works!</h1></body></html>
[root@k8s-master manifest]# 
  • 修改

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-jlp7AYiV-1663604770308)(C:/Users/Administrator/AppData/Roaming/Typora/typora-user-images/image-20220919224304770.png)]

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-wlufGFmO-1663604770309)(C:/Users/Administrator/AppData/Roaming/Typora/typora-user-images/image-20220919224444949.png)]

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-bQjkD80Z-1663604770310)(C:/Users/Administrator/AppData/Roaming/Typora/typora-user-images/image-20220919224521228.png)]

  • 访问
[root@k8s-master manifest]# curl 10.104.120.92:8888<html><body><h1>It works!</h1></body></html>
[root@k8s-master manifest]# kubectl get svc -n dev
NAMETYPE       CLUSTER-IPEXTERNAL-IP   PORT(S)          AGE
web1   NodePort   10.104.120.92<none>8888:30514/TCP   11m
[root@k8s-master manifest]# 

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-RY912XAS-1663604770311)(C:/Users/Administrator/AppData/Roaming/Typora/typora-user-images/image-20220919224629205.png)]


本文转载自: https://blog.csdn.net/mushuangpanny/article/details/126944780
版权归原作者 慕霜ヾ 所有, 如有侵权,请联系我们删除。

“K8s之DashBoard”的评论:

还没有评论