咱们想要的是单个容器只运行一个进程node
然而有时咱们须要多个进程协同工做,因此咱们须要另一种更加高级的结构将容器组合在一块儿---podnginx
咱们来看一个最基本的podgit
这个pod的image是我根据centos:7的镜像构建的,很简单,镜像的Dockerfile以下:docker
FROM 192.168.80.84:5000/centos:7 entrypoint ["sleep"] cmd ["999"] # 一个容器必需要有一个守护进程才可以运行起来 # 换言之,把Dockerfile中的sleep命令去掉,单纯的一个centos是没法运行的
咱们将这个镜像做为pod的image运行起来:shell
kubectl run my-cmd --image=192.168.80.84:5000/centos_cmd:v1
centos
使用-o yaml来看一下对应的yaml文件:api
[root@k8s-master01 centos]# kubectl get pod my-cmd -o yaml apiVersion: v1 # 指定apiVersion版本 kind: Pod # 对应的资源类型,这里为pod metadata: # 实例的元数据 creationTimestamp: "2021-01-13T02:36:02Z" labels: # 自动给实例打的标签 run: my-cmd managedFields: # 为了方便内部管理的一组字段 - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:labels: .: {} f:run: {} f:spec: f:containers: k:{"name":"my-cmd"}: .: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} manager: kubectl-run # 写明该pod的启动方式 operation: Update time: "2021-01-13T02:36:02Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"10.40.0.4"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update time: "2021-01-13T02:36:11Z" name: my-cmd # pod名 namespace: default # pod所处的命名空间 resourceVersion: "418695" # pod的版本数字,用于乐观并发控制的,详细信息请见以后的k8s核心原理 uid: 12e3b858-f79f-4378-8ea0-1103ea120c34 # pod实例的uid spec: # pod的实际说明 containers: # 定义pod中的容器,这里只有一个 - image: 192.168.80.84:5000/centos_cmd:v1 # 镜像地址 imagePullPolicy: IfNotPresent # 镜像的pull规则,指的是是否在建立pod的时候要pull镜像,IdNotPresent表示本地不存在时才会去仓库pull name: my-cmd # 容器名,即镜像转化为容器后的名字 resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: # 挂载卷 - mountPath: /var/run/secrets/kubernetes.io/serviceaccount # 挂载路径 name: default-token-s9dfj # 卷名,这里挂载的实际上是每一个pod都会挂载的secret卷,用来进行身份验证的 readOnly: true # 只读 dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: k8s-node02 # 分配到的节点,由调度器指定 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always # 指定当pod重启时,该容器是否还会启动,其实也就是制定该容器随Pod的启动而启动 schedulerName: default-scheduler # 指定调度器,k8s中能够运行多个调度器实例,若是未指定则是默认调度器 securityContext: {} serviceAccount: default # 服务账号 serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: # 卷 - name: default-token-s9dfj secret: defaultMode: 420 secretName: default-token-s9dfj status: # pod运行时的状态 conditions: - lastProbeTime: null lastTransitionTime: "2021-01-13T02:36:02Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2021-01-13T02:36:10Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2021-01-13T02:36:10Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2021-01-13T02:36:02Z" status: "True" type: PodScheduled containerStatuses: - containerID: docker://965a9b86cc334705d3fbaac15d28ef6b0a20de8f00915c1ffdf4c025b1c29206 image: 192.168.80.84:5000/centos_cmd:v1 imageID: docker-pullable://192.168.80.84:5000/centos_cmd@sha256:948479967390e7a98979d4b98beec6dfa3fc92c6ce832ece882e8b1843e0779f lastState: {} name: my-cmd ready: true restartCount: 0 started: true state: running: startedAt: "2021-01-13T02:36:09Z" hostIP: 192.168.80.83 phase: Running podIP: 10.40.0.4 podIPs: - ip: 10.40.0.4 qosClass: BestEffort startTime: "2021-01-13T02:36:02Z"
能够发现其中的东西有些多,然而咱们使用yaml文件建立pod时并不须要编写这么多的东西,由于API server会帮咱们添加其他的默认值bash
使用yaml文件手动建立一个pod:markdown
apiVersion: v1 kind: Pod metadata: name: my-cmd spec: containers: - image: 192.168.80.84:5000/centos_cmd:v1 name: centos-cmd # 须要注意的是spec.containers中的name字段,这里的命名规则和pod的命名规则是同样的,也就是若是"my_cmd"则会报错 # 其次注意"Pod"的“P”要大写
咱们来看一下这样建立的pod的yaml文件:kubectl create -f my-cmd.yaml
,咱们能够经过kubectl get pod my-cmd -o yaml
来查看一下该pod网络
[root@k8s-master01 centos]# kubectl get pod my-cmd -o yaml apiVersion: v1 kind: Pod metadata: creationTimestamp: "2021-01-13T03:32:42Z" managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:spec: f:containers: k:{"name":"my-cmd"}: .: {} f:image: {} f:imagePullPolicy: {} f:name: {} f:resources: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} manager: kubectl-create # 这里的启动方式有所不一样,由于咱们是经过create的方式建立的pod operation: Update time: "2021-01-13T03:32:42Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"10.40.0.4"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update time: "2021-01-13T04:39:23Z" name: my-cmd namespace: default resourceVersion: "429073" uid: 15d9f4f2-1fc8-4595-a00e-f96f52038ef9 spec: containers: - image: 192.168.80.84:5000/centos_cmd:v1 imagePullPolicy: IfNotPresent name: my-cmd resources: {} terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /var/run/secrets/kubernetes.io/serviceaccount name: default-token-s9dfj readOnly: true dnsPolicy: ClusterFirst enableServiceLinks: true nodeName: k8s-node02 preemptionPolicy: PreemptLowerPriority priority: 0 restartPolicy: Always schedulerName: default-scheduler securityContext: {} serviceAccount: default serviceAccountName: default terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute key: node.kubernetes.io/not-ready operator: Exists tolerationSeconds: 300 - effect: NoExecute key: node.kubernetes.io/unreachable operator: Exists tolerationSeconds: 300 volumes: - name: default-token-s9dfj secret: defaultMode: 420 secretName: default-token-s9dfj status: conditions: - lastProbeTime: null lastTransitionTime: "2021-01-13T03:32:42Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2021-01-13T04:39:23Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2021-01-13T04:39:23Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2021-01-13T03:32:42Z" status: "True" type: PodScheduled containerStatuses: - containerID: docker://d7fee9118b0d5d2ccaa346d4cd97130a9f744e9bf6ee1b1ae32dfa0e583c2b41 image: 192.168.80.84:5000/centos_cmd:v1 imageID: docker-pullable://192.168.80.84:5000/centos_cmd@sha256:948479967390e7a98979d4b98beec6dfa3fc92c6ce832ece882e8b1843e0779f lastState: terminated: containerID: docker://0e6a82fe9e50924b7254fe06f131e43f3f66d8007de5524e31af38c6abd05d51 exitCode: 0 finishedAt: "2021-01-13T04:39:21Z" reason: Completed startedAt: "2021-01-13T04:22:42Z" name: my-cmd ready: true restartCount: 4 started: true state: running: startedAt: "2021-01-13T04:39:22Z" hostIP: 192.168.80.83 phase: Running podIP: 10.40.0.4 podIPs: - ip: 10.40.0.4 qosClass: BestEffort startTime: "2021-01-13T03:32:42Z" # 对一个字段的含义不清楚的话,可使用"kubectl explain"来查看某一字段的含义
将本地网络中的端口转发给pod中的端口
首先咱们可使用一个nginx镜像:
# 我已经先将nginx:alpine的镜像推到了本地仓库 关于alpine版本 早先的alpine版本的镜像还有这段注释,可是后来大多数都给删掉了,特此记录 ``` postgres:<version>-alpine This image is based on the popular Alpine Linux project, available in the alpine official image. Alpine Linux is much smaller than most distribution base images (~5MB), and thus leads to much slimmer images in general. This variant is highly recommended when final image size being as small as possible is desired. The main caveat to note is that it does use musl libc instead of glibc and friends, so certain software might run into issues depending on the depth of their libc requirements. However, most software doesn't have an issue with this, so this variant is usually a very safe choice. See this Hacker News comment thread for more discussion of the issues that might arise and some pro/con comparisons of using Alpine-based images. To minimize image size, it's uncommon for additional related tools (such as git or bash) to be included in Alpine-based images. Using this image as a base, add the things you need in your own Dockerfile (see the alpine image description for examples of how to install packages if you are unfamiliar). ```
kubectl port-forward mynginx 8000:8080
这里设置的是端口转发,容许咱们不经过service的方式来和某个特定的pod进行通讯
3. 中止和移除Pod ```kubectl delete <podName>``` *** ### 使用标签组织pod > 标签一样是k8s资源中最重要的概念之一,不少功能的实现都须要依靠标签选择器 1. yaml文件中指定标签 ```yaml apiVersion: v1 kind: Pod metadata: name: mynginx labels: # 一个资源能够分配多个标签 app: nginx rel: alpine spec: ......
查看资源时显示标签
正常查看资源时是不显示标签的,经过-o wide咱们能够看到pod所在的节点和pod的ip,而经过“--show labels”参数,咱们能够看到资源的标签
[root@k8s-master01 centos]# kubectl get po --show-labels NAME READY STATUS RESTARTS AGE LABELS getname-deploy-68bd4cc6b4-j7gxz 1/1 Running 4 6d21h app=getname,pod-template-hash=68bd4cc6b4 getname-deploy-68bd4cc6b4-pt2cb 1/1 Running 4 6d21h app=getname,pod-template-hash=68bd4cc6b4 getname-deploy-68bd4cc6b4-srqfn 1/1 Running 4 6d21h app=getname,pod-template-hash=68bd4cc6b4 my-cmd-labels 1/1 Running 0 11s app=nginx,rel=alpine # 这里是刚才我所打标签的pod # 可能会发现我前面还有三个带标签的pod,这三个pod不是我使用这种方法建立的 # 实际上这三个pod是我建立的一个rs建立的 # 因此说标签在k8s管理资源中的用处很大
查看指定标签
咱们可能只对一些标签感兴趣,那么咱们能够经过“-L <标签键名>”来只显示指定标签
[root@k8s-master01 centos]# kubectl get po -L app NAME READY STATUS RESTARTS AGE APP getname-deploy-68bd4cc6b4-j7gxz 1/1 Running 4 6d21h getname getname-deploy-68bd4cc6b4-pt2cb 1/1 Running 4 6d21h getname getname-deploy-68bd4cc6b4-srqfn 1/1 Running 4 6d21h getname my-cmd-labels 1/1 Running 0 6m46s nginx
修改现有标签
# 使用 kubectl label <resourceName> <instanceName> <labelKey>=<labelValue>,<labelKey>=<labelValue> 来添加新的标签 [root@k8s-master01 centos]# kubectl label po my-cmd-labels node=node1 pod/my-cmd-labels labeled [root@k8s-master01 centos]# kubectl get po --show-labels NAME READY STATUS RESTARTS AGE LABELS my-cmd-labels 1/1 Running 0 11m app=nginx,node=node1,rel=alpine # 发现已经增长了新标签 # 须要修改旧标签,要添加“--overwrite”参数 [root@k8s-master01 centos]# kubectl label po my-cmd-labels rel=stable --overwrite pod/my-cmd-labels labeled [root@k8s-master01 centos]# kubectl get po --show-labels NAME READY STATUS RESTARTS AGE LABELS fortune-env 2/2 Running 8 7d4h <none> my-cmd-labels 1/1 Running 0 13m app=nginx,node=node1,rel=stable # 发现rel标签已经重写完成
使用标签选择器列出指望Pod
咱们可不能够只显示特定标签的pod呢
# 咱们可使用"-l"参数,来使用标签选择器 [root@k8s-master01 centos]# kubectl get po -l rel=stable --show-labels NAME READY STATUS RESTARTS AGE LABELS my-cmd-labels 1/1 Running 1 20m app=nginx,node=node1,rel=stable 标签选择器固然不会只能根据特定的标签对来筛选资源 # 咱们能够光指定标签的key,这样就会显示全部包含该标签的资源 [root@k8s-master01 centos]# kubectl get po -l app --show-labels NAME READY STATUS RESTARTS AGE LABELS getname-deploy-68bd4cc6b4-j7gxz 1/1 Running 4 6d21h app=getname,pod-template-hash=68bd4cc6b4 getname-deploy-68bd4cc6b4-pt2cb 1/1 Running 4 6d21h app=getname,pod-template-hash=68bd4cc6b4 getname-deploy-68bd4cc6b4-srqfn 1/1 Running 4 6d21h app=getname,pod-template-hash=68bd4cc6b4 my-cmd-labels 1/1 Running 1 24m app=nginx,node=node1,rel=stable # 咱们可使用!=或!来筛选不包含某标签或某标签对的资源 # 须要注意的是,当你在筛选器中使用符号时,你应该在两边加上引号,不然shell没法理解你想要作什么 [root@k8s-master01 centos]# kubectl get po -l '!node' --show-labels NAME READY STATUS RESTARTS AGE LABELS fortune-env 2/2 Running 8 7d4h <none> getname-deploy-68bd4cc6b4-j7gxz 1/1 Running 4 6d21h app=getname,pod-template-hash=68bd4cc6b4 getname-deploy-68bd4cc6b4-pt2cb 1/1 Running 4 6d21h app=getname,pod-template-hash=68bd4cc6b4 getname-deploy-68bd4cc6b4-srqfn 1/1 Running 4 6d21h app=getname,pod-template-hash=68bd4cc6b4 [root@k8s-master01 centos]# kubectl get po -l "app!=getname" --show-labels NAME READY STATUS RESTARTS AGE LABELS my-cmd-labels 1/1 Running 1 27m app=nginx,node=node1,rel=stable # 咱们还可使用in ()和 notin()来对标签对进行更复杂的筛选 [root@k8s-master01 centos]# kubectl get po -l "app in (nginx)" --show-labels NAME READY STATUS RESTARTS AGE LABELS my-cmd-labels 1/1 Running 1 30m app=nginx,node=node1,rel=stable [root@k8s-master01 centos]# kubectl get po -l "app notin (getname)" --show-labels NAME READY STATUS RESTARTS AGE LABELS my-cmd-labels 1/1 Running 1 31m app=nginx,node=node1,rel=stable # 关于一次筛选多个条件,使用“,”分割 [root@k8s-master01 centos]# kubectl get po -l app=nginx,node=node1 --show-labels NAME READY STATUS RESTARTS AGE LABELS my-cmd-labels 1/1 Running 1 32m app=nginx,node=node1,rel=stable
上一节中写了能够给资源打标签,而k8s中节点一样也是一种资源,咱们能够经过给节点打标签的方式将pod运行到指定节点上
# 先给节点打上标签 [root@k8s-master01 centos]# kubectl label node k8s-node01 node=node1 node/k8s-node01 labeled [root@k8s-master01 centos]# kubectl label node k8s-node02 node=node2 node/k8s-node02 labeled # 来查看一下 [root@k8s-master01 centos]# kubectl get node -L node NAME STATUS ROLES AGE VERSION NODE k8s-master01 Ready control-plane,master 18d v1.20.1 k8s-node01 Ready <none> 18d v1.20.1 node1 k8s-node02 Ready <none> 18d v1.20.1 node2 # 如今节点已经成功给两个node打上标签了
接下来咱们来编辑yaml文件,来将pod分配到指定节点上
apiVersion: v1 kind: Pod metadata: name: my-cmd-node1 spec: nodeSelector: # 在这里设置一个节点选择器 node: "node1" # 只会被分配到节点标签含有“node=node1”的节点上 containers: - name: my-cmd-node1 image: 192.168.80.84:5000/centos_cmd:v1 --- # 在一个yaml文件中可使用“---”来一次建立多个资源 apiVersion: v1 kind: Pod metadata: name: my-cmd-node2 spec: nodeSelector: node: "node2" containers: - name: my-cmd-node2 image: 192.168.80.84:5000/centos_cmd:v1
来看一下执行结果
[root@k8s-master01 centos]# kubectl get po -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES my-cmd-node1 1/1 Running 0 12s 10.32.0.8 k8s-node01 <none> <none> my-cmd-node2 1/1 Running 0 12s 10.40.0.6 k8s-node02 <none> <none> # 发现预设的pod确实分配到了指望的node上
命名空间是一种在资源之上更高层面的做用域
这样能够容许咱们屡次使用相同的资源名称,也能够将一些系统层面的资源和用户层面的相隔离
查看命名空间
命名空间也是一种资源,咱们一样可使用get来查看
# 可使用ns来简写namespace [root@k8s-master01 centos]# kubectl get ns NAME STATUS AGE default Active 18d kube-node-lease Active 18d kube-public Active 18d kube-system Active 18d # 可使用"-n <namespaceName>"来指定命名空间 [root@k8s-master01 centos]# kubectl get po -n kube-system NAME READY STATUS RESTARTS AGE coredns-7f89b7bc75-9z9g8 1/1 Running 13 18d coredns-7f89b7bc75-dmhjl 1/1 Running 13 18d etcd-k8s-master01 1/1 Running 26 18d kube-apiserver-k8s-master01 1/1 Running 26 18d kube-controller-manager-k8s-master01 1/1 Running 30 18d kube-proxy-s2rmh 1/1 Running 13 18d kube-proxy-wq2kz 1/1 Running 13 18d kube-proxy-wvcgk 1/1 Running 24 18d kube-scheduler-k8s-master01 1/1 Running 26 18d weave-net-9lhgf 2/2 Running 37 18d weave-net-dhv26 2/2 Running 36 18d weave-net-q95gm 2/2 Running 65 18d # 这里其实也能够看出k8s原理中的一条,即: # k8s中只用node的kubelet以实际进程的方式存在,其余的都是以pod的形式存在 # 这里能够看到 etcd、apiserver、proxy、schedule、controller等
建立命名空间
既可使用命令kubectl create namespace <namespaceName>
来建立一个命名空间
也能够经过编写yaml文件的方式
apiVersion: v1 kind: Namespace metadata: name: custom-namespace # 而后使用kubectl create -f 来建立
指定命名空间建立对象
默认状况下咱们是在default中建立资源的,经过“-n
使用标签选择器删除pod
# 仍然是经过"-l"来指定标签选择器 kubectl delete pod -l "app=nginx"
删除整个命名空间
kubectl delete ns <namespaceName>
删除命名空间后,会删除其内的全部资源
删除全部pod,保留命名空间
kubectl delete po -all -ns <namespaceName>
删除命名空间内的全部资源,保留命名空间
kubectl delete all -all -ns <namespaceName>