Master节点脚本:node
#!/bin/sh
#使用系统的PATH环境
export PATH=`echo $PATH` #中止firewall防火墙,并禁止开机自启动 systemctl stop firewalld.service systemctl disable firewalld.service #禁止selinux安全 sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config setenforce 0 #关闭swap交换内存,K8S强制的,否则安装会报错 swapoff -a sed -i 's/.*swap.*/#&/' /etc/fstab #配置内核参数 yum install bridge-utils -y modprobe br_netfilter cat <<EOF > /etc/sysctl.d/k8s.conf net.ipv4.ip_forward = 1 net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 EOF #使配置生效 sysctl --system cat <<EOF > /etc/security/limits.d/20-nproc.conf * soft nproc 20480 * hard nproc 20480 * soft nofile 102400 * hard nofile 102400 root soft nproc unlimited EOF #设置主机名称解析 cat >> /etc/hosts << EOF 192.168.20.210 docker-master1 192.168.20.211 docker-master2 192.168.20.212 docker-master3 192.168.20.213 docker-slave1 192.168.20.214 docker-slave2 EOF #安装docker-ce-18.06.1版本 sudo yum install -y yum-utils device-mapper-persistent-data lvm2 sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo yum install -y docker-ce-18.06.1.ce-3.el7 systemctl enable docker && systemctl start docker #fixation docker version yum -y install yum-plugin-versionlock yum versionlock docker-ce #install kubernetes1.12.2 version cat <<EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=0 repo_gpgcheck=0 gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF yum clean all && yum makecache yum -y install kubelet kubeadm kubectl kubernetes-cni systemctl enable kubelet.service && systemctl start kubelet.service #pull k8s images docker pull mirrorgooglecontainers/kube-apiserver:v1.12.2 docker pull mirrorgooglecontainers/kube-controller-manager:v1.12.2 docker pull mirrorgooglecontainers/kube-scheduler:v1.12.2 docker pull mirrorgooglecontainers/kube-proxy:v1.12.2 docker pull mirrorgooglecontainers/pause:3.1 docker pull mirrorgooglecontainers/etcd:3.2.24 docker pull coredns/coredns:1.2.2 #modefiy docker image tag docker tag docker.io/mirrorgooglecontainers/kube-proxy:v1.12.2 k8s.gcr.io/kube-proxy:v1.12.2 docker tag docker.io/mirrorgooglecontainers/kube-scheduler:v1.12.2 k8s.gcr.io/kube-scheduler:v1.12.2 docker tag docker.io/mirrorgooglecontainers/kube-apiserver:v1.12.2 k8s.gcr.io/kube-apiserver:v1.12.2 docker tag docker.io/mirrorgooglecontainers/kube-controller-manager:v1.12.2 k8s.gcr.io/kube-controller-manager:v1.12.2 docker tag docker.io/mirrorgooglecontainers/etcd:3.2.24 k8s.gcr.io/etcd:3.2.24 docker tag docker.io/mirrorgooglecontainers/pause:3.1 k8s.gcr.io/pause:3.1 docker tag docker.io/coredns/coredns:1.2.2 k8s.gcr.io/coredns:1.2.2 #delete source images tag docker rmi docker.io/mirrorgooglecontainers/kube-proxy:v1.12.2 docker rmi docker.io/mirrorgooglecontainers/kube-scheduler:v1.12.2 docker rmi docker.io/mirrorgooglecontainers/kube-apiserver:v1.12.2 docker rmi docker.io/mirrorgooglecontainers/kube-controller-manager:v1.12.2 docker rmi docker.io/mirrorgooglecontainers/etcd:3.2.24 docker rmi docker.io/mirrorgooglecontainers/pause:3.1 docker rmi docker.io/coredns/coredns:1.2.2 #init kubeadm master kubeadm init --kubernetes-version=v1.12.2 --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=0.0.0.0 mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf
#安装flannel网络
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/bc79dd1505b0c8681ece4de4c0d86c5cd2643275/Documentation/kube-flannel.yml
当脚本执行完kubeadm init,会产生一个kubeadm join的命令,例如:linux
kubeadm join 192.168.20.210:6443 --token n962df.2bjqc7w81n2ufz3z --discovery-token-ca-cert-hash sha256:88d46eb99b43be7afa66d91a138393e1614a090772530d250dc6d75d59d863b5git
此命令是在node节点上执行将该node加入kubernetes集群的。github
kubectl get pod -n kube-system 能够查看该明明空间的程序运行情况。都为Running为正常。docker
除了安装flannel网络以外 还能够安装其余网络,能够参考kubernetes集群pod网络。bootstrap
Node节点安装脚本:centos
#!/bin/sh #使用系统的PATH环境 export PATH=`echo $PATH` #中止firewall防火墙,并禁止开机自启动 systemctl stop firewalld.service systemctl disable firewalld.service #禁止selinux安全 sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config setenforce 0 #关闭swap交换内存,K8S强制的,否则安装会报错 swapoff -a sed -i 's/.*swap.*/#&/' /etc/fstab #配置内核参数 yum install bridge-utils -y modprobe br_netfilter cat <<EOF > /etc/sysctl.d/k8s.conf net.ipv4.ip_forward = 1 net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 EOF #使配置生效 sysctl --system cat <<EOF > /etc/security/limits.d/20-nproc.conf * soft nproc 20480 * hard nproc 20480 * soft nofile 102400 * hard nofile 102400 root soft nproc unlimited EOF #设置主机名称解析 cat >> /etc/hosts << EOF 192.168.20.210 docker-master1 192.168.20.211 docker-master2 192.168.20.212 docker-master3 192.168.20.213 docker-slave1 192.168.20.214 docker-slave2 EOF #安装docker-ce-18.06.1版本 sudo yum install -y yum-utils device-mapper-persistent-data lvm2 sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo yum install -y docker-ce-18.06.1.ce-3.el7 systemctl enable docker && systemctl start docker #fixation docker version yum -y install yum-plugin-versionlock yum versionlock docker-ce #install kubernetes1.12.2 version cat <<EOF > /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=0 repo_gpgcheck=0 gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF yum clean all && yum makecache
yum -y install kubelet kubeadm kubectl kubernetes-cni
#pull k8s images
docker pull mirrorgooglecontainers/kube-proxy:v1.12.2
docker pull mirrorgooglecontainers/pause:3.1
#modefiy docker image tag
docker tag docker.io/mirrorgooglecontainers/kube-proxy:v1.12.2 k8s.gcr.io/kube-proxy:v1.12.2
docker tag docker.io/mirrorgooglecontainers/pause:3.1 k8s.gcr.io/pause:3.1
#delete source images tag
docker rmi docker.io/mirrorgooglecontainers/kube-proxy:v1.12.2
docker rmi docker.io/mirrorgooglecontainers/pause:3.1
完成以上操做后在master上执行kubectl get nodes 既能够查看全部节点状态。Ready为正常。api
1、master隔离安全
默认状况下,出于安全缘由,您的群集不会在主服务器上安排容器。若是您但愿可以在主服务器上安排pod,例如,对于用于开发的单机Kubernetes集群,请运行:服务器
kubectl taint nodes --all node-role.kubernetes.io/master-
输出看起来像:
node "test-01" untainted taint "node-role.kubernetes.io/master:" not found taint "node-role.kubernetes.io/master:" not found
这将从node-role.kubernetes.io/master
包含主节点的任何节点中删除,这意味着调度程序将可以在任何地方安排pod。
2、后续加入node节点
节点是运行工做负载(容器和容器等)的位置。要向群集添加新节点,请为每台计算机执行如下操做:
sudo su -
)kubeadm init
。例如:
kubeadm join --token <token> <master-ip>:<master-port> --discovery-token-ca-cert-hash sha256:<hash>
若是您没有令牌,能够经过在主节点上运行如下命令来获取它:
kubeadm token list
输出相似于:
TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS 8ewj1p.9r9hcjoqgajrj4gi 23h 2018-06-12T02:51:28Z authentication, The default bootstrap system: signing token generated by bootstrappers: 'kubeadm init'. kubeadm: default-node-token
默认状况下,令牌在24小时后过时。若是在当前令牌过时后将节点加入群集,则能够经过在主节点上运行如下命令来建立新令牌:
kubeadm token create
输出相似于:
5didvk.d09sbcov8ph2amjw
若是没有值--discovery-token-ca-cert-hash
,能够经过在主节点上运行如下命令链来获取它:
openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | \ openssl dgst -sha256 -hex | sed 's/^.* //'
输出相似于:
8cb2de97839780a412b93877f8507ad6c94f73add17d5d7058e91741c9d5ec78
注意:要指定IPv6元组<master-ip>:<master-port>
,必须将IPv6地址括在方括号中,例如:[fd00::101]:2073
。
几秒钟后,您应该能够看到kubectl get nodes
在主服务器上运行时输出中的此节点。
也能够直接执行如下命令,获取加入集群节点的命令
kubeadm token create --print-join-command