1、查看集群状态

[root@node-3 ~]# kubectl get nodes
NAME     STATUS   ROLES                  AGE   VERSION
node-1   Ready    <none>                 14h   v1.21.3
node-3   Ready    control-plane,master   14h   v1.21.3

查看master状态


[root@node-3 ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok
controller-manager   Healthy   ok
etcd-0               Healthy   {"health":"true"}

2、查看运行运行容器,指定命名空间: -n 空间名称

#查看所有运行容器实时更新状态
watch kubectl get pods -A -o wide 

[root@node-3 ~]# kubectl get pods -A
NAMESPACE              NAME                                         READY   STATUS              RESTARTS   AGE
kube-flannel           kube-flannel-ds-jtgdx                        0/1     CrashLoopBackOff    177        14h
kube-flannel           kube-flannel-ds-ml786                        0/1     CrashLoopBackOff    179        14h
kube-system            coredns-59d64cd4d4-dgh95                     0/1     ContainerCreating   0          15h
kube-system            coredns-59d64cd4d4-qr5b6                     0/1     ContainerCreating   0          15h
kube-system            etcd-node-3                                  1/1     Running             0          15h
kube-system            kube-apiserver-node-3                        1/1     Running             0          15h
kube-system            kube-controller-manager-node-3               1/1     Running             1          14h
kube-system            kube-proxy-pcdjx                             1/1     Running             0          15h
kube-system            kube-proxy-zsbjd                             1/1     Running             0          14h
kube-system            kube-scheduler-node-3                        1/1     Running             2          14h
kubernetes-dashboard   dashboard-metrics-scraper-7c857855d9-748df   1/1     Running             0          12h
kubernetes-dashboard   kubernetes-dashboard-bcf9d8968-vw7d4         0/1     CrashLoopBackOff    6          12m

3、查看单个pod容器状态,event节点显示pod的具体事件情况,pod异常可以查看该事件处理

[root@node-3 ~]#  kubectl describe pod -n kubernetes-dashboard   kubernetes-dashboard-bcf9d8968-vw7d4
Name:         kubernetes-dashboard-bcf9d8968-vw7d4
Namespace:    kubernetes-dashboard
Priority:     0
Node:         node-1/192.168.254.130
Start Time:   Wed, 20 Mar 2024 12:36:25 -0400
Labels:       k8s-app=kubernetes-dashboard
              pod-template-hash=bcf9d8968
Annotations:  seccomp.security.alpha.kubernetes.io/pod: runtime/default
Status:       Running
IP:           10.244.0.7
IPs:
  IP:           10.244.0.7
Controlled By:  ReplicaSet/kubernetes-dashboard-bcf9d8968
Containers:
  kubernetes-dashboard:
    Container ID:  docker://e4c192a494ee62d00f7b9a02467781e1b50cdff6fac1ef0b2cf36c56842c4699
    Image:         kubernetesui/dashboard:v2.6.0
    Image ID:      docker-pullable://kubernetesui/dashboard@sha256:4af9580485920635d888efe1eddbd67e12f9d5d84dba87100e93feb4e46636b3
    Port:          8443/TCP
    Host Port:     0/TCP
    Args:
      --auto-generate-certificates
      --namespace=kubernetes-dashboard
    State:          Running
      Started:      Wed, 20 Mar 2024 12:51:33 -0400
    Last State:     Terminated
      Reason:       Error
      Exit Code:    2
      Started:      Wed, 20 Mar 2024 12:45:53 -0400
      Finished:     Wed, 20 Mar 2024 12:46:23 -0400
    Ready:          True
    Restart Count:  7
    Liveness:       http-get https://:8443/ delay=30s timeout=30s period=10s #success=1 #failure=3
    Environment:    <none>
    Mounts:
      /certs from kubernetes-dashboard-certs (rw)
      /tmp from tmp-volume (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-kzpfd (ro)
Conditions:
  Type              Status
  Initialized       True
  Ready             True
  ContainersReady   True
  PodScheduled      True
Volumes:
  kubernetes-dashboard-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  kubernetes-dashboard-certs
    Optional:    false
  tmp-volume:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:
    SizeLimit:  <unset>
  kube-api-access-kzpfd:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              kubernetes.io/os=linux
Tolerations:                 node-role.kubernetes.io/master:NoSchedule
                             node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason     Age                   From               Message
  ----     ------     ----                  ----               -------
  Normal   Scheduled  15m                   default-scheduler  Successfully assigned kubernetes-dashboard/kubernetes-dashboard-bcf9d8968-vw7d4 to node-1
  Normal   Pulled     15m                   kubelet            Successfully pulled image "kubernetesui/dashboard:v2.6.0" in 4.554979473s
  Normal   Pulled     14m                   kubelet            Successfully pulled image "kubernetesui/dashboard:v2.6.0" in 4.754875076s
  Warning  Unhealthy  14m                   kubelet            Liveness probe failed: Get "https://10.244.0.7:8443/": dial tcp 10.244.0.7:8443: connect: connection refused
  Normal   Pulled     14m                   kubelet            Successfully pulled image "kubernetesui/dashboard:v2.6.0" in 4.36192404s
  Normal   Pulling    13m (x4 over 15m)     kubelet            Pulling image "kubernetesui/dashboard:v2.6.0"
  Normal   Pulled     13m                   kubelet            Successfully pulled image "kubernetesui/dashboard:v2.6.0" in 4.331921605s
  Normal   Created    13m (x4 over 15m)     kubelet            Created container kubernetes-dashboard
  Normal   Started    13m (x4 over 15m)     kubelet            Started container kubernetes-dashboard
  Warning  BackOff    5m34s (x31 over 14m)  kubelet            Back-off restarting failed container
  Normal   Pulled     36s                   kubelet            Successfully pulled image "kubernetesui/dashboard:v2.6.0" in 4.65477593s

4、查看pod日志


[root@node-3 ~]# kubectl logs -n  kubernetes-dashboard  kubernetes-dashboard-bcf9d8968-vw7d4
2024/03/20 16:52:09 Starting overwatch
2024/03/20 16:52:09 Using namespace: kubernetes-dashboard
2024/03/20 16:52:09 Using in-cluster config to connect to apiserver
2024/03/20 16:52:09 Using secret token for csrf signing
2024/03/20 16:52:09 Initializing csrf token from kubernetes-dashboard-csrf secret
panic: Get "https://10.96.0.1:443/api/v1/namespaces/kubernetes-dashboard/secrets/kubernetes-dashboard-csrf": dial tcp 10.96.0.1:443: i/o timeout

goroutine 1 [running]:
github.com/kubernetes/dashboard/src/app/backend/client/csrf.(*csrfTokenManager).init(0xc00061faf0)
        /home/runner/work/dashboard/dashboard/src/app/backend/client/csrf/manager.go:41 +0x30e
github.com/kubernetes/dashboard/src/app/backend/client/csrf.NewCsrfTokenManager(...)
        /home/runner/work/dashboard/dashboard/src/app/backend/client/csrf/manager.go:66
github.com/kubernetes/dashboard/src/app/backend/client.(*clientManager).initCSRFKey(0xc0004cf300)
        /home/runner/work/dashboard/dashboard/src/app/backend/client/manager.go:527 +0x94
github.com/kubernetes/dashboard/src/app/backend/client.(*clientManager).init(0x19f098c)
        /home/runner/work/dashboard/dashboard/src/app/backend/client/manager.go:495 +0x32
github.com/kubernetes/dashboard/src/app/backend/client.NewClientManager(...)
        /home/runner/work/dashboard/dashboard/src/app/backend/client/manager.go:594
main.main()
        /home/runner/work/dashboard/dashboard/src/app/backend/dashboard.go:95 +0x1cf

5、删除pod


[root@node-3 ~]# kubectl delete pod -n kubernetes-dashboard  kubernetes-dashboard-bcf9d8968-g68wk
pod "kubernetes-dashboard-bcf9d8968-g68wk" deleted

6、进入pod内部,查看应用日志等

kubectl exec -it <PodName> /bin/bash --namespace=<NAMESPACE>
 
# 进入Pod中指定容器
kubectl exec -it <PodName> -c <ContainerName> /bin/bash --namespace=<NAMESPACE>

7、pod的CRUD操作命令

1 增
1)create:[Create a resource by filename or stdin]

2)run:[ Run a particular image on the cluster]

3)apply:[Apply a configuration to a resource by filename or stdin]

4)proxy:[Run a proxy to the Kubernetes API server ]

2 删
1)delete:[Delete resources ]

3 改
1)scale:[Set a new size for a Replication Controller]

2)exec:[Execute a command in a container]

3)attach:[Attach to a running container]

4)patch:[Update field(s) of a resource by stdin]

5)edit:[Edit a resource on the server]

6) label:[Update the labels on a resource]

7)annotate:[Auto-scale a replication controller]

8)replace:[Replace a resource by filename or stdin]

9)config:[config modifies kubeconfig files]

4 查
1)get:[Display one or many resources]

2)describe:[Show details of a specific resource or group of resources]

3)log:[Print the logs for a container in a pod]

4)cluster-info:[Display cluster info]

5) version:[Print the client and server version information]

6)api-versions:[Print the supported API versions]


还有其他一些node隔离、恢复、调整资源、升级镜像,调整副本数量等等待更新。