kubeadm安装单master两node节点-v1.13.2

hostname ip
k8s-master 172.16.40.97
k8s-node1 172.16.40.98
k8s-node2 172.16.40.99

1、k8s初始化环境:(三台宿主机须要操做)

a,关闭防火墙和selinux
[root@k8s-master ~]# systemctl stop firewalld && systemctl disable firewalld
[root@k8s-master ~]# sed -ri '/^[^#]*SELINUX=/s#=.+$#=disabled#' /etc/selinux/config
[root@k8s-master ~]# setenforce 0
b,设置时间同步客户端
[root@k8s-master ~]# yum install chrony -y
[root@k8s-master ~]# cat <<EOF > /etc/chrony.conf
server ntp.aliyun.com iburst
stratumweight 0
driftfile /var/lib/chrony/drift
rtcsync
makestep 10 3
bindcmdaddress 127.0.0.1
bindcmdaddress ::1
keyfile /etc/chrony.keys
commandkey 1
generatecommandkey
logchange 0.5
logdir /var/log/chrony
EOF
[root@k8s-master ~]# systemctl restart chronyd && systemctl enable chronyd
c,升级内核
[root@k8s-master ~]# wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
[root@k8s-master ~]# yum install wget git  jq psmisc -y
[root@k8s-master ~]# wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
[root@k8s-master ~]# yum install https://mirrors.aliyun.com/saltstack/yum/redhat/salt-repo-latest-2.el7.noarch.rpm
[root@k8s-master ~]# sed -i "s/repo.saltstack.com/mirrors.aliyun.com\/saltstack/g" /etc/yum.repos.d/salt-latest.repo
[root@k8s-master ~]# yum update -y
[root@k8s-master ~]# wget  https://github.com/sky-daiji/salt-k8s-ha-v2/raw/master/apps/kernel-ml-4.18.16-1.el7.elrepo.x86_64.rpm
[root@k8s-master ~]# yum localinstall -y kernel-ml*

查看这个内核里是否有这个内核模块
[root@k8s-master ~]# find /lib/modules -name '*nf_conntrack_ipv4*' -type f

修改内核启动顺序,默认启动的顺序应该为1,升级之后内核是往前面插入,为0(若是每次启动时须要手动选择哪一个内核,该步骤能够省略)
[root@k8s-master ~]# grub2-set-default  0 && grub2-mkconfig -o /etc/grub2.cfg

使用下面命令看看确认下是否启动默认内核指向上面安装的内核
[root@k8s-master ~]# grubby --default-kernel
[root@k8s-master ~]# grubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"

重启加载新内核版本
[root@k8s-master ~]# reboot

须要设定/etc/sysctl.d/k8s.conf的系统参数
[root@k8s-master ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
EOF
[root@k8s-master ~]# sysctl --system
检查系统内核和模块是否适合运行 docker (仅适用于 linux 系统)

[root@k8s-master ~]# curl https://raw.githubusercontent.com/docker/docker/master/contrib/check-config.sh > check-config.sh
[root@k8s-master ~]# bash ./check-config.sh

安装docker-ce
[root@k8s-master ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
[root@k8s-master ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@k8s-master ~]# yum makecache fast
[root@k8s-master ~]# yum install docker-ce-18.09.2 -y
[root@k8s-master ~]# systemctl daemon-reload && systemctl enable docker && systemctl start docker

2、安装初始化k8s集群

a,kubectl kubelet kubeadm安装(三台宿主机须要操做)
[root@k8s-master ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
[root@k8s-master ~]# yum install -y kubelet kubeadm kubectl
[root@k8s-master ~]# systemctl enable kubelet && systemctl start kubelet
b,master宿主机忽略交换分区未关闭warning:
[root@k8s-master ~]# cat <<EOF > /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--fail-swap-on=false --cgroup-driver=cgroupfs"
EOF
[root@k8s-master ~]# systemctl daemon-reload
c,master节点进行kubeadm初始化
[root@k8s-master ~]# kubeadm init --kubernetes-version=1.13.2 \
--ignore-preflight-errors=Swap \
--apiserver-advertise-address=172.16.40.97 \
--image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16 \
--ignore-preflight-errors=Swap \
--ignore-preflight-errors=NumCPU

*[init] Using Kubernetes version: v1.13.2
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using ‘kubeadm config images pull’
[kubelet-start] Writing kubelet environment file with flags to file “/var/lib/kubelet/kubeadm-flags.env”
[kubelet-start] Writing kubelet configuration to file “/var/lib/kubelet/config.yaml”
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder “/etc/kubernetes/pki”
[certs] Generating “ca” certificate and key
[certs] Generating “apiserver-kubelet-client” certificate and key
[certs] Generating “apiserver” certificate and key
[certs] apiserver serving cert is signed for DNS names [master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.16.40.97]
[certs] Generating “front-proxy-ca” certificate and key
[certs] Generating “front-proxy-client” certificate and key
[certs] Generating “etcd/ca” certificate and key
[certs] Generating “etcd/server” certificate and key
[certs] etcd/server serving cert is signed for DNS names [master localhost] and IPs [172.16.40.97 127.0.0.1 ::1]
[certs] Generating “etcd/peer” certificate and key
[certs] etcd/peer serving cert is signed for DNS names [master localhost] and IPs [172.16.40.97 127.0.0.1 ::1]
[certs] Generating “etcd/healthcheck-client” certificate and key
[certs] Generating “apiserver-etcd-client” certificate and key
[certs] Generating “sa” key and public key
[kubeconfig] Using kubeconfig folder “/etc/kubernetes”
[kubeconfig] Writing “admin.conf” kubeconfig file
[kubeconfig] Writing “kubelet.conf” kubeconfig file
[kubeconfig] Writing “controller-manager.conf” kubeconfig file
[kubeconfig] Writing “scheduler.conf” kubeconfig file
[control-plane] Using manifest folder “/etc/kubernetes/manifests”
[control-plane] Creating static Pod manifest for “kube-apiserver”
[control-plane] Creating static Pod manifest for “kube-controller-manager”
[control-plane] Creating static Pod manifest for “kube-scheduler”
[etcd] Creating static Pod manifest for local etcd in “/etc/kubernetes/manifests”
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory “/etc/kubernetes/manifests”. This can take up to 4m0s
[apiclient] All control plane components are healthy after 20.003620 seconds
[uploadconfig] storing the configuration used in ConfigMap “kubeadm-config” in the “kube-system” Namespace
[kubelet] Creating a ConfigMap “kubelet-config-1.13” in namespace kube-system with the configuration for the kubelets in the cluster
[patchnode] Uploading the CRI Socket information “/var/run/dockershim.sock” to the Node API object “master” as an annotation
[mark-control-plane] Marking the node master as control-plane by adding the label “node-role.kubernetes.io/master=’’”
[mark-control-plane] Marking the node master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: 2s9xxt.8lgyw6yzt21qq8xf
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstraptoken] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstraptoken] creating the “cluster-info” ConfigMap in the “kube-public” namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes master has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run “kubectl apply -f [podnetwork].yaml” with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of machines by running the following on each node
as root:

kubeadm join 172.16.40.97:6443 –token 2s9xxt.8lgyw6yzt21qq8xf –discovery-token-ca-cert-hash sha256:c141fb0608b4b83136272598d2623589d73546762abc987391479e8e049b0d76*
d,master节点配置kubeconfig访问集群
[root@k8s-master ~]# mkdir -p $HOME/.kube
[root@k8s-master ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master ~]# chown $(id -u):$(id -g) $HOME/.kube/config
e,master节点查看集群状态
[root@master ~]# kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
controller-manager   Healthy   ok
scheduler            Healthy   ok
etcd-0               Healthy   {"health": "true"}
f,接下来咱们来安装flannel网络插件
[root@k8s-master ~]# kubectl apply -f https://raw.githubusercontent.com/sky-daiji/k8s-install/master/kube-flannel/kube-flannel.yml
[root@k8s-master ~]# kubectl get pod -n kube-system |grep kube-flannel
kube-flannel-ds-amd64-mj89k          1/1     Running   0          1m
kube-flannel-ds-amd64-rt9fj          1/1     Running   0          2m
kube-flannel-ds-amd64-zs6lb          1/1     Running   0          2m
g,node节点加入集群
[root@k8s-node1 ~]# kubeadm join 172.16.40.97:6443 --token 2s9xxt.8lgyw6yzt21qq8xf --discovery-token-ca-cert-hash sha256:c141fb0608b4b83136272598d2623589d73546762abc987391479e8e049b0d76
h,查看节点是否都添加到集群里
[root@master ~]# kubectl get node
NAME     STATUS   ROLES    AGE   VERSION
master   Ready    master   15m   v1.13.2
node1    Ready    <none>   13m   v1.13.2
node2    Ready    <none>   13m   v1.13.2
i,查看k8s各自组件运行状况
[root@k8s-master ~]# kubectl get pod --all-namespaces
NAMESPACE     NAME                                 READY   STATUS    RESTARTS   AGE
kube-system   coredns-d5947d4b-2p6tv               1/1     Running   0          20h
kube-system   coredns-d5947d4b-lzqwg               1/1     Running   0          20h
kube-system   etcd-k8s-master                      1/1     Running   0          20h
kube-system   kube-apiserver-k8s-master            1/1     Running   0          20h
kube-system   kube-controller-manager-k8s-master   1/1     Running   0          20h
kube-system   kube-flannel-ds-amd64-mj89k          1/1     Running   0          174m
kube-system   kube-flannel-ds-amd64-rt9fj          1/1     Running   0          174m
kube-system   kube-flannel-ds-amd64-zs6lb          1/1     Running   0          174m
kube-system   kube-proxy-8zbl9                     1/1     Running   0          144m
kube-system   kube-proxy-v7vkb                     1/1     Running   0          144m
kube-system   kube-proxy-wdqgv                     1/1     Running   0          144m
kube-system   kube-scheduler-k8s-master            1/1     Running   0          20h
j,在全部节点启用ipvs模块
[root@k8s-master ~]# yum install -y ipvsadm
[root@k8s-master ~]# vim /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4

[root@k8s-master ~]# chmod +x /etc/sysconfig/modules/ipvs.modules

[root@k8s-master ~]# source /etc/sysconfig/modules/ipvs.modules

[root@k8s-master ~]# lsmod | grep -e ip_vs -enf_conntrack_ipv4

[root@k8s-master ~]# kubectl edit cm kube-proxy -n kube-system  
…

kind: KubeProxyConfiguration

metricsBindAddress: 127.0.0.1:10249

mode: "ipvs"

nodePortAddresses: null

oomScoreAdj: -999

…

批量删除并重建kube-proxy
[root@k8s-master ~]# kubectl get pod -n kube-system | grep kube-proxy | awk '{system("kubectl delete pod"$1" -n kube-system")}'

  

k,安装kuber-dashboard插件
[root@k8s-master ~]# kubectl apply -f https://raw.githubusercontent.com/sky-daiji/salt-k8s-ha-v2/master/addons/dashboard/kubernetes-dashboard.yaml
[root@k8s-master ~]# kubectl apply -f https://raw.githubusercontent.com/sky-daiji/salt-k8s-ha-v2/master/addons/dashboard/admin-user.yaml

查看kubernetes-dashboard插件安装是否成功
[root@k8s-master ~]# kubectl get pod -n kube-system  |grep kubernetes-dashboard

使用火狐浏览器访问Dashboard的web界面
https://172.16.40.97:30091
选择Token令牌模式登陆。
kubectl describe secret/$(kubectl get secret -n kube-system |grep admin|awk '{print $1}') -n kube-system

若是你以为这份文档对你有帮助,请支付宝扫描下方的二维码进行捐赠,谢谢!
node