#!/bin/sh export localip=10.11.151.97 export name=bjcnctest0 export port1=2380 export port2=2379 export port3=4002 sudo ./etcd \ -name $name \ -initial-advertise-peer-urls http://$localip:$port1 \ -listen-peer-urls http://0.0.0.0:$port1 \ -listen-client-urls http://0.0.0.0:$port2,http://0.0.0.0:$port3 \ -advertise-client-urls http://$localip:$port2,http://$localip:$port3\ -initial-cluster-token etcd-cluster \ -initial-cluster bjcnctest0=http://$localip:$port1,bjcnctest1=http://10.11.151.100:$port1,bjcnctest2=http://10.11.151.101:$port1 \ -initial-cluster-state new &
三台主机上分别运行etcd,组建好cluster,正确组建后,每一个主机应该看到published namehtml
./kube-apiserver --logtostderr=true --v=0 --etcd_servers=http://127.0.0.1:2379 --kubelet_port=10250 --allow_privileged=false --service-cluster-ip-range=172.16.0.0/12 --insecure-bind-address=0.0.0.0 --insecure-port=8080 2>&1 >apiserver.out &
kube-controller-manager:node
./kube-controller-manager --logtostderr=true --v=0 --master=http://tc-151-97:8080 --cloud-provider="" 2>&1 >controller.out &
kube-scheduler:git
./kube-scheduler --logtostderr=true --v=0 --master=http://tc-151-97:8080 2>&1 > scheduler.out &
在Master上运行Calico服务:github
./calicoctl node --ip=10.11.151.97
用docker ps能够看到Calico node的containerdocker
vi /etc/sysconfig/network-scripts/ifcfg-cbr0 ----------------------- DEVICE=cbr0 TYPE=Bridge IPADDR=172.1.0.0 NETMASK=255.255.255.0 ONBOOT=yes BOOTPROTO=static
这里给出一个ip地址段172.1.0.0/24,每一个主机上可使用256个IP。centos
vi /usr/lib/systemd/system/docker.service
在/usr/bin/docker daemon的启动参数中添加“--bridge=cbr0 --iptables=false --ip-masq=false”
./calicoctl node --ip=10.11.151.100
mv calico-kubernetes /usr/libexec/kubernetes/kubelet-plugins/net/exec/calico/calico
chmod +x /usr/libexec/kubernetes/kubelet-plugins/net/exec/calico/calico
KUBE_API_ROOT=http://10.11.151.97:8080/api/v1/ ./kube-proxy --logtostderr=true --v=0 --master=http://tc-151-97:8080 --proxy-mode=iptables &
这里KUBE_API_ROOT变量会传递给Calico,calico能够经过master的api去获取当前的pod或者node信息。api
CALICO_IPAM=false KUBE_API_ROOT=http://10.11.151.97:8080/api/v1/ ./kubelet --logtostderr=true --v=0 --api_servers=http://tc-151-97:8080 --address=0.0.0.0 --network-plugin=calico --allow_privileged=false --pod-infra-container-image=10.11.150.76:5000/kubernetes/pause:latest > a.txt &
CALICO_IPAM=false是指定Calico的网络ip分配使用docker的网桥ip地址段,也就上上面咱们本身新建的cbr0的网段。网络
./calicoctl pool show
./calicoctl pool add 172.0.0.0/8 --nat-outgoing
apiVersion: v1 kind: ReplicationController metadata: name: test-5 spec: replicas: 1 template: metadata: labels: app: test-5 spec: containers: - name: iperf image: 10.11.150.76:5000/openxxs/iperf:1.2 nodeSelector: kubernetes.io/hostname: tc-151-100
在master上:app
./kubelet create -f test.yaml
apiVersion: v1 kind: ReplicationController metadata: name: test-5 spec: replicas: 70 template: metadata: labels: app: test-5 spec: containers: - name: iperf image: 10.11.150.76:5000/openxxs/iperf:1.2 nodeSelector: kubernetes.io/hostname: tc-151-100
这里咱们在100主机上建立70个iperf的pods。ide