k8s ansible部署部署文档

一:基础系统准备
ubuntu 1804----> root密码:123456
主要操做:
 
1.更改网卡名称为eth0:
# vim /etc/default/grub
GRUB_CMDLINE_LINUX="net.ifnames=0 biosdevname=0"
root@ubuntu:update-grub
root@ubuntu:reboot
 
2.更改系统ip地址:
# vim /etc/netplan/01-netcfg.yaml
network:
  version: 2
  renderer: networkd
  ethernets:
    eth0:
      dhcp4: no
      addresses: [172.16.99.121/24]
      gateway4: 172.16.99.254
      nameservers:
              addresses: [172.16.99.254]
 
3.应用ip配置并重启测试:              
root@ubuntu:netplan  apply
 
4.更改主机名:
root@k8s-m1:~#echo 'k8s-m1' >/etc/hostname
root@k8s-m1:~# cat /etc/rc.local
#!/bin/sh -e
#
# rc.local
#
# This script is executed at the end of each multiuser runlevel.
# Make sure that the script will "exit 0" on success or any other
# value on error.
#
# In order to enable or disable this script just change the execution
# bits.
#
# By default this script does nothing.
echo 'k8s-m1' >/etc/hostname
exit 0
 
 
5.#安装经常使用命令
apt-get update
apt-get purge ufw lxd lxd-client lxcfs lxc-common #卸载不用的包
apt-get  install iproute2  ntpdate  tcpdump telnet traceroute nfs-kernel-server nfs-common  lrzsz tree  openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev ntpdate tcpdump telnet traceroute  gcc openssh-server lrzsz tree  openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev ntpdate tcpdump telnet traceroute iotop unzip zip
 
 
6.安装docker:
# apt-get update
# apt-get -y install apt-transport-https ca-certificates curl software-properties-common
# curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
# add-apt-repository "deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
# apt-get -y update && apt-get -y install docker-ce
# docker info
 
7.作快照
 
 
 
7.其余配置:
# grep "^[a-Z]" /etc/sysctl.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
net.ipv4.ip_forward = 1
 
一:服务器初始化及证书制做:
 
配置主机名和host文件: 同步各服务器时间
 
172.16.99.145 ansible-vm2 ansible2.dexter.com
172.16.99.144 ansible-vm1  ansible1.dexter.com
172.16.99.143 etcd-vm3    etcd3.dexter.com
172.16.99.142 etcd-vm2     etcd2.dexter.com
172.16.99.141 etcd-vm1     etcd1.dexter.com
172.16.99.128 harbor-vm2  harbor2.dexter.com
172.16.99.127 harbor-vm1  harbor1.dexter.com
172.16.99.126 haproxy-vm2 haproxy2.dexter.com
172.16.99.125 haproxy-vm1  haproxy1.dexter.com
172.16.99.124 k8s-n2      k8sn2.dexter.com
172.16.99.123 k8s-n1       k8sn1.dexter.com
172.16.99.122 k8s-m2       k8sm2.dexter.com
172.16.99.121 k8s-m1       k8sm1.dexter.com
VIP 172.16.99.148
 
echo '*/10 * * * * root timedatectl set-timezone Asia/Shanghai && ntpdate time1.aliyun.com && hwclock -w >/dev/null 2>&1' >>/etc/crontab
 
 
二:安装keepalived和haproxy服务器
安装keepalived和haproxy
root@haproxy-vm1:~# apt-get install keepalived haproxy -y
root@haproxy-vm1:~# find / -name keepalived.*
配置keepalived
root@haproxy-vm1:~# cp /usr/share/doc/keepalived/samples/keepalived.conf.sample /etc/keepalived/keepalived.conf
root@haproxy-vm1:~# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
 
global_defs {
   notification_email {
     acassen
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}
 
vrrp_instance VI_1 {
    interface eth0
    virtual_router_id 50
    nopreempt
    priority 100
    advert_int 1
    virtual_ipaddress {
      172.16.99.148 dev eth0
    }
}
root@haproxy-vm1:~# systemctl restart keepalived
root@haproxy-vm1:~# systemctl enable keepalived
Synchronizing state of keepalived.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable keepalived
验证keepalived是否生效
root@haproxy-vm1:~# ip addr show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether fa:16:3e:f3:bc:58 brd ff:ff:ff:ff:ff:ff
    inet 172.16.99.125/24 brd 172.16.99.255 scope global dynamic eth0
       valid_lft 84622sec preferred_lft 84622sec
    inet 172.16.99.148/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::f816:3eff:fef3:bc58/64 scope link
       valid_lft forever preferred_lft forever
 
配置haproxy
root@haproxy-vm1:~# cat /etc/haproxy/haproxy.cfg
global
    log /dev/log    local0
    log /dev/log    local1 notice
    chroot /var/lib/haproxy
    stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
    stats timeout 30s
    user haproxy
    group haproxy
    daemon
    ca-base /etc/ssl/certs
    crt-base /etc/ssl/private
    ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS
    ssl-default-bind-options no-sslv3
 
defaults
    log    global
    mode    http
    option    httplog
    option    dontlognull
        timeout connect 5000
        timeout client  50000
        timeout server  50000
    errorfile 400 /etc/haproxy/errors/400.http
    errorfile 403 /etc/haproxy/errors/403.http
    errorfile 408 /etc/haproxy/errors/408.http
    errorfile 500 /etc/haproxy/errors/500.http
    errorfile 502 /etc/haproxy/errors/502.http
    errorfile 503 /etc/haproxy/errors/503.http
    errorfile 504 /etc/haproxy/errors/504.http
 
listen k8s-api-server
    bind 0.0.0.0:6443
    mode tcp
    balance source
    server k8s-m1 172.16.99.121:6443 check inter 2000 fall 3 rise 5
    server k8s-m2 172.16.99.122:6443 check inter 2000 fall 3 rise 5
 
 
root@haproxy-vm1:~# systemctl start haproxy
root@haproxy-vm1:~# systemctl enable haproxy
 
拷贝配置给haproxy-vm2
root@haproxy-vm1:~# scp /etc/keepalived/keepalived.conf root@172.16.99.126:/etc/keepalived/
root@haproxy-vm1:~# scp /etc/haproxy/haproxy.cfg  root@172.16.99.126:/etc/haproxy/
 
haproxy-vm2作相同的操做
 
 
注:因为我这边的机器都是openstack的虚拟机,因此开通VIP后,要让其余虚机能ping通VIP地址,必须让VIP关联实例
[root@node1 ~]# openstack port list | grep "125\|126"
| 509886e0-cafe-4c87-b6ce-c3df3c5b9e19 |      | fa:16:3e:f3:bc:58 | ip_address='172.16.99.125', subnet_id='bbd536c6-a975-4841-8082-35b28de16ef0' | ACTIVE |
| f19c5f92-c101-49a3-a950-43d27578e805 |      | fa:16:3e:7a:44:4a | ip_address='172.16.99.126', subnet_id='bbd536c6-a975-4841-8082-35b28de16ef0' | ACTIVE |
[root@node1 ~]# neutron  port-update 509886e0-cafe-4c87-b6ce-c3df3c5b9e19  --allowed_address_pairs list=true type=dict ip_address=172.16.99.148
Updated port: 509886e0-cafe-4c87-b6ce-c3df3c5b9e19
[root@node1 ~]#  neutron  port-update f19c5f92-c101-49a3-a950-43d27578e805  --allowed_address_pairs list=true type=dict ip_address=172.16.99.148
Updated port: f19c5f92-c101-49a3-a950-43d27578e805
关联后VIP能够ping通
二:安装harbor服务器:
安装harbor
root@harbor-vm1:/usr/local/src# ls
harbor-offline-installer-v1.7.5.tgz
root@harbor-vm1:/usr/local/src# tar -xvf harbor-offline-installer-v1.7.5.tgz
root@harbor-vm1:/usr/local/src# cd harbor/
root@harbor-vm1:/usr/local/src/harbor# mkdir certs
root@harbor-vm1:/usr/local/src/harbor# vim harbor.cfg
hostname =  harbor1.dexter.com
ui_url_protocol = https
ssl_cert = /usr/local/src/harbor/cert/server.crt
ssl_cert_key = /usr/local/src/harbor/cert/server.key
harbor_admin_password = 123456
 
生成证书
root@harbor-vm1:~# mkdir  /usr/local/src/harbor/cert
root@harbor-vm1:~# cd  /usr/local/src/harbor/cert
root@harbor-vm1:/usr/local/src/harbor/cert# openssl genrsa -out server.key 2048  #生成私有key
root@harbor-vm1:/usr/local/src/harbor/cert# openssl req -x509 -new -nodes -key  server.key  -subj "/CN= harbor1.dexter.com" -days 7120 -out server.crt   #建立有效期时间的自签名证书
root@harbor-vm2:/usr/local/src/harbor/cert# openssl req -x509 -new -nodes -key server.key -subj "/CN= harbor2.dexter.com" -days 7120 -out server.crt   #建立有效期时间的自签名证书
注:若是没法在ubuntu系统上生成server.crt,能够尝试在centos上生成后在复制到ubuntu上。
 
 
安装docker
使用官方安装脚本自动安装 (仅适用于公网环境)
curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
 
安装docker ce
apt-get install docker-compose -y
 
# ./install.sh
 
配置客户端使用harbor:
mkdir /etc/docker/certs.d/ harbor1.dexter.com -pv
mkdir /etc/docker/certs.d/ harbor2.dexter.com -pv
注:客户端主要指master和node
 
[root@k8s-harbor1 harbor]# scp cert/server.crt  172.16.99.121:/etc/docker/certs.d/ harbor1.dexter.com/
[root@k8s-harbor2 harbor]# scp cert/server.crt  172.16.99.121:/etc/docker/certs.d/ harbor2.dexter.com/
 
#测试登陆
[root@k8s-m1 ~]# docker login harbor1.dexter.com
Username: admin
Password:
Login Succeeded
[root@k8s-m1 ~]# docker login harbor2.dexter.com
Username: admin
Password:
Login Succeeded
 
 
修改本机C:\Windows\System32\drivers\etc\hosts文件,添加以下两行
172.16.99.128   harbor2.dexter.com
172.16.99.127   harbor1.dexter.com
 
尝试使用浏览器打开harbor,帐号:admin,密码:123456。
顺便新建一个基础镜像库
 
测试push镜像到harbor:
root@k8s-m1:~# docker pull alpine
root@k8s-m1:~# docker tag alpine:latest harbor1.dexter.com/baseimages/alpine:latest
root@k8s-m1:~# docker push harbor1.dexter.com/baseimages/alpine:latest
 
注:提早要给master安装docker
安装docker
使用官方安装脚本自动安装 (仅适用于公网环境)
# curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
 
 
ansible部署:
基础环境准备(各节点):
# apt-get install python2.7 -y
# ln -s /usr/bin/python2.7 /usr/bin/python
root@ansible-vm1:~# apt-get install  ansible -y #ansible节点
root@ansible-vm1:~# apt-get install git -y         #ansible节点
 
分发密钥(为了ansible部署k8s集群时免密钥)
root@ansible-vm1:~# ssh-keygen  #生成密钥对
root@ansible-vm1:~# apt-get install sshpass -y #ssh同步公钥到各k8s服务器
#分发公钥脚本:
root@ansible-vm1:~# cat scp.sh
#!/bin/bash
#目标主机列表
IP="
172.16.99.121
172.16.99.122
172.16.99.127
172.16.99.128
172.16.99.141
172.16.99.142
172.16.99.143
172.16.99.144
172.16.99.145
172.16.99.123
172.16.99.124
172.16.99.125
172.16.99.126"
 
for node in ${IP};do
    sshpass -p 123456 ssh-copy-id  -p22 ${node}  -o StrictHostKeyChecking=no
    if [ $? -eq 0 ];then
        echo "${node} 秘钥copy完成"
    else
    echo "${node} 秘钥copy失败"
    fi
done
#执行脚本同步:
root@ansible-vm1:~# bash scp.sh
root@s2:~# vim ~/.vimrc #取消vim 自动缩进功能
set paste
 
1.6.2:clone项目:
 
root@ansible-vm1:~# git clone  -b  0.6.1 https://github.com/easzlab/kubeasz.git
root@ansible-vm1:~# mv /etc/ansible/* /opt/
root@ansible-vm1:~# mv kubeasz/* /etc/ansible/
root@ansible-vm1:~# cd  /etc/ansible/
root@ansible-vm1:/etc/ansible# cat hosts
root@ansible-vm1:/etc/ansible# cp example/hosts.m-masters.example ./hosts #复制hosts模 板文件
 
1.6.3:准备hosts文件:
 
root@ansible-vm1:/etc/ansible# pwd
/etc/ansible
root@ansible-vm1:/etc/ansible#cp example/hosts.m-masters.example ./hosts
root@ansible-vm1:/etc/ansible# cat hosts
# 集群部署节点:通常为运行ansible 脚本的节点
# 变量 NTP_ENABLED (=yes/no) 设置集群是否安装 chrony 时间同步
[deploy]
172.16.99.144 NTP_ENABLED=no
 
# etcd集群请提供以下NODE_NAME,注意etcd集群必须是1,3,5,7...奇数个节点
[etcd]
172.16.99.141 NODE_NAME=etcd1
172.16.99.142 NODE_NAME=etcd2
172.16.99.143 NODE_NAME=etcd3
 
[new-etcd] # 预留组,后续添加etcd节点使用
#192.168.1.x NODE_NAME=etcdx
 
[kube-master]
172.16.99.121
172.16.99.122
 
[new-master] # 预留组,后续添加master节点使用
#192.168.1.5
 
[kube-node]
172.16.99.123
172.16.99.124
 
[new-node] # 预留组,后续添加node节点使用
#192.168.1.xx
 
# 参数 NEW_INSTALL:yes表示新建,no表示使用已有harbor服务器
# 若是不使用域名,能够设置 HARBOR_DOMAIN=""
[harbor]
#172.16.99.127 HARBOR_DOMAIN="harbor1.dexter.com" NEW_INSTALL=no
 
# 负载均衡(目前已支持多于2节点,通常2节点就够了) 安装 haproxy+keepalived
[lb]
#192.168.1.1 LB_ROLE=backup
#192.168.1.2 LB_ROLE=master
 
#【可选】外部负载均衡,用于自有环境负载转发 NodePort 暴露的服务等
[ex-lb]
#192.168.1.6 LB_ROLE=backup EX_VIP=192.168.1.250
#192.168.1.7 LB_ROLE=master EX_VIP=192.168.1.250
 
[all:vars]
# ---------集群主要参数---------------
#集群部署模式:allinone, single-master, multi-master
DEPLOY_MODE=multi-master
 
#集群主版本号,目前支持: v1.8, v1.9, v1.10,v1.11, v1.12, v1.13
K8S_VER="v1.13"
 
# 集群 MASTER IP即 LB节点VIP地址,为区别与默认apiserver端口,设置VIP监听的服务端口8443
# 公有云上请使用云负载均衡内网地址和监听端口
MASTER_IP="172.16.99.148"
KUBE_APISERVER="https://{{ MASTER_IP }}:6443"
 
# 集群网络插件,目前支持calico, flannel, kube-router, cilium
CLUSTER_NETWORK="calico"
 
# 服务网段 (Service CIDR),注意不要与内网已有网段冲突
SERVICE_CIDR="10.20.0.0/16"
 
# POD 网段 (Cluster CIDR),注意不要与内网已有网段冲突
CLUSTER_CIDR="172.31.0.0/16"
 
# 服务端口范围 (NodePort Range)
NODE_PORT_RANGE="30000-60000"
 
# kubernetes 服务 IP (预分配,通常是 SERVICE_CIDR 中第一个IP)
CLUSTER_KUBERNETES_SVC_IP="10.20.0.1"
 
# 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)
CLUSTER_DNS_SVC_IP="10.20.254.254"
 
# 集群 DNS 域名
CLUSTER_DNS_DOMAIN="cluster.local."
 
# 集群basic auth 使用的用户名和密码
BASIC_AUTH_USER="admin"
BASIC_AUTH_PASS="123456"
 
# ---------附加参数--------------------
#默认二进制文件目录
bin_dir="/usr/bin"
 
#证书目录
ca_dir="/etc/kubernetes/ssl"
 
#部署目录,即 ansible 工做目录,建议不要修改
base_dir="/etc/ansible"
 
1.6.4:准备二进制文件:
上传k8s.1-13-5.tar.gz到ansible服务器的/etc/ansible/bin目录下
root@ansible-vm1:~# cd /etc/ansible/bin
root@ansible-vm1:/etc/ansible/bin# pwd
/etc/ansible/bin
root@ansible-vm1:/etc/ansible/bin# tar xvf k8s.1-13-5.tar.gz
root@ansible-vm1:/etc/ansible/bin# mv bin/* .
 
 
1.6.4:开始按步骤部署:
经过ansible脚本初始化环境及部署k8s 高可用集群
1.6.4.1:环境初始化
root@ansible-vm1:/etc/ansible/bin# cd /etc/ansible/
root@ansible-vm1:/etc/ansible# ansible-playbook 01.prepare.yml
 
1.6.4.2:部署etcd集群:
可选更改启动脚本路径
 
root@ansible-vm1:/etc/ansible# ansible-playbook 02.etcd.yml
 
各etcd服务器验证etcd服务:
root@etcd-vm1:~# export NODE_IPS="172.16.99.141 172.16.99.142 172.16.99.143"
root@etcd-vm1:~# for ip in ${NODE_IPS}; do ETCDCTL_API=3 /usr/bin/etcdctl --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem endpoint health; done
https://172.16.99.141:2379 is healthy: successfully committed proposal: took = 7.789938ms
https://172.16.99.142:2379 is healthy: successfully committed proposal: took = 6.976676ms
https://172.16.99.143:2379 is healthy: successfully committed proposal: took = 7.911517ms
 
1.6.4.3:部署docker:
可选更改启动脚本路径,可是docker已经提早安装,所以不须要从新执行
root@ansible-vm1:/etc/ansible# ansible-playbook 03.docker.yml
 
1.6.4.4:部署master:
可选更改启动脚本路径
 
root@ansible-vm1:/etc/ansible# ansible-playbook 04.kube-master.yml
 
1.6.4.5:部署node:
node节点必须安装docker
root@ansible-vm1:/etc/ansible# vim roles/kube-node/defaults/main.yml
# 基础容器镜像
SANDBOX_IMAGE: "harbor1.dexter.com/baseimages/pause-amd64:3.1"
root@ansible-vm1:/etc/ansible# ansible-playbook 05.kube-node.yml
 
验证
root@k8s-m1:~# kubectl get nodes
NAME            STATUS                     ROLES    AGE   VERSION
172.16.99.121   Ready,SchedulingDisabled   master   75m   v1.13.5
172.16.99.122   Ready,SchedulingDisabled   master   75m   v1.13.5
172.16.99.123   Ready                      node     71m   v1.13.5
172.16.99.124   Ready                      node     71m   v1.13.5
1.6.4.5:部署网络服务calico:
可选更改calico服务启动脚本路径,csr证书信息
 
# docker  load -i calico-cni.tar
# docker tag calico/cni:v3.3.6 harbor1.dexter.com/baseimages/cni:v3.3.6
# docker push harbor1.dexter.com/baseimages/cni:v3.3.6
 
# docker load -i calico-node.tar
# docker tag calico/node:v3.3.6  harbor1.dexter.com/baseimages/node:v3.3.6
# docker push harbor1.dexter.com/baseimages/node:v3.3.6
 
# docker load -i calico-kube-controllers.tar
# docker tag calico/kube-controllers:v3.3.6   harbor1.dexter.com/baseimages/kube-controllers:v3.3.6
# docker push harbor1.dexter.com/baseimages/kube-controllers:v3.3.6
 
root@ansible-vm1:/etc/ansible# vim roles/calico/defaults/main.yml
calico_ver: "v3.3.6"
root@ansible-vm1:/etc/ansible# mv /bin/calicoctl{,.bak}
上传3.3.6的bin目录的calicoctl到/etc/ansible/bin目录下
root@ansible-vm1:/etc/ansible/bin# chmod +x calicoctl
 
修改以下部分,使用本地镜像仓库中的镜像
root@ansible-vm1:/etc/ansible# vim roles/calico/templates/calico-v3.3.yaml.j2
        - name: calico-node
          image: harbor1.dexter.com/baseimages/node:v3.3.6
 
        - name: install-cni
          image: harbor1.dexter.com/baseimages/cni:v3.3.6
 
        - name: calico-kube-controllers
          image: harbor1.dexter.com/baseimages/kube-controllers:v3.3.6
执行部署网络:
root@ansible-vm1:/etc/ansible# ansible-playbook 06.network.yml
 
验证calico:
root@k8s-n1:~# calicoctl version
Client Version:    v3.3.6
Build date:        2019-03-28T00:10:36+0000
Git commit:        00031ac8
Cluster Version:   v3.3.6
Cluster Type:      k8s,bgp
 
root@k8s-m1:~# calicoctl node status
Calico process is running.
 
IPv4 BGP status
+---------------+-------------------+-------+----------+-------------+
| PEER ADDRESS  |     PEER TYPE     | STATE |  SINCE   |    INFO     |
+---------------+-------------------+-------+----------+-------------+
| 172.16.99.122 | node-to-node mesh | up    | 05:09:29 | Established |
+---------------+-------------------+-------+----------+-------------+
 
IPv6 BGP status
No IPv6 peers found.
 
kubectl run net-test1 --image=alpine --replicas=4 sleep 360000 #建立pod测试夸主机网络通讯是否正常
 
1.6.4.6:添加node节点:
[kube-node]
192.168.7.110
 
[new-node] # 预留组,后续添加node节点使用
192.168.7.111
root@ansible-vm1:/etc/ansible# ansible-playbook 20.addnode.yml
 
1.6.4.7:添加master节点:
注释掉lb,不然没法下一步
 
[kube-master]
192.168.7.101
 
[new-master] # 预留组,后续添加master节点使用
192.168.7.102
 
root@k8s-m1:/etc/ansible# ansible-playbook 21.addmaster.yml
 
1.6.4.8:验证当前状态:
root@k8s-m1:~# calicoctl node status
Calico process is running.
 
IPv4 BGP status
+---------------+-------------------+-------+----------+-------------+
| PEER ADDRESS  |     PEER TYPE     | STATE |  SINCE   |    INFO     |
+---------------+-------------------+-------+----------+-------------+
| 172.16.99.122 | node-to-node mesh | up    | 06:18:57 | Established |
| 172.16.99.123 | node-to-node mesh | up    | 06:19:33 | Established |
| 172.16.99.124 | node-to-node mesh | up    | 06:19:14 | Established |
+---------------+-------------------+-------+----------+-------------+
 
IPv6 BGP status
No IPv6 peers found.
 
root@k8s-m1:~# kubectl  get nodes
NAME            STATUS                     ROLES    AGE    VERSION
172.16.99.121   Ready,SchedulingDisabled   master   123m   v1.13.5
172.16.99.122   Ready,SchedulingDisabled   master   123m   v1.13.5
172.16.99.123   Ready                      node     119m   v1.13.5
172.16.99.124   Ready                      node     119m   v1.13.5
相关文章
相关标签/搜索