参考文档:html
Github project:https://github.com/heketi/heketinode
MANAGING VOLUMES USING HEKETI:https://access.redhat.com/documentation/en-us/red_hat_gluster_storage/3.3/html/administration_guide/ch05s02linux
StorageClass:https://kubernetes.io/docs/concepts/storage/storage-classes/git
StorageClass(中文):https://k8smeetup.github.io/docs/concepts/storage/storage-classes/github
Dynamic Volume Provisioning:https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/json
nfs- provisioner:https://github.com/kubernetes-incubator/external-storage/tree/master/nfsvim
Heketi是一个提供RESTful API管理GlusterFS卷的框架,便于管理员对GlusterFS进行操做:后端
在集群中经过zone区分故障域。centos
Kubernetes与GlusterFS集群已提早部署完成,请参考:api
GlusterFS:http://www.javashuo.com/article/p-hauqdlzn-w.html
注意:GlusterFS只须要安装并启动便可,没必要组建受信存储池(trusted storage pools)
Hostname |
IP |
Remark |
kubenode1 |
172.30.200.21 |
|
kubenode2 |
172.30.200.22 |
|
kubenode3 |
172.30.200.23 |
|
heketi |
172.30.200.80 |
selinux disabled |
glusterfs01 |
172.30.200.81 |
|
glusterfs02 |
172.30.200.82 |
|
glusterfs03 |
172.30.200.83 |
# 设置iptables,heketi默认以tcp8080端口提供RESTful API服务; [root@heketi ~]# vim /etc/sysconfig/iptables -A INPUT -p tcp -m state --state NEW -m tcp --dport 8080 -j ACCEPT [root@heketi ~]# service iptables restart
# 添加gluster yum源,默认yum源中无相关package; # heketi:heketi服务; # heketi-client:heketi客户端/命令行工具 [root@heketi ~]# yum install -y centos-release-gluster [root@heketi ~]# yum install -y heketi heketi-client
# 注意红色字体是修改部分 [root@heketi ~]# vim /etc/heketi/heketi.json { # 默认端口tcp8080 "_port_comment": "Heketi Server Port Number", "port": "8080", # 默认值false,不须要认证 "_use_auth": "Enable JWT authorization. Please enable for deployment", "use_auth": true, "_jwt": "Private keys for access", "jwt": { "_admin": "Admin has access to all APIs", "admin": { "key": "admin@123" }, "_user": "User only has access to /volumes endpoint", "user": { "key": "user@123" } }, "_glusterfs_comment": "GlusterFS Configuration", "glusterfs": { "_executor_comment": [ "Execute plugin. Possible choices: mock, ssh", "mock: This setting is used for testing and development.", " It will not send commands to any node.", "ssh: This setting will notify Heketi to ssh to the nodes.", " It will need the values in sshexec to be configured.", "kubernetes: Communicate with GlusterFS containers over", " Kubernetes exec api." ], # mock:测试环境下建立的volume没法挂载; # kubernetes:在GlusterFS由kubernetes建立时采用 "executor": "ssh", "_sshexec_comment": "SSH username and private key file information", "sshexec": { "keyfile": "/etc/heketi/heketi_key", "user": "root", "port": "22", "fstab": "/etc/fstab" }, "_kubeexec_comment": "Kubernetes configuration", "kubeexec": { "host" :"https://kubernetes.host:8443", "cert" : "/path/to/crt.file", "insecure": false, "user": "kubernetes username", "password": "password for kubernetes user", "namespace": "OpenShift project or Kubernetes namespace", "fstab": "Optional: Specify fstab file on node. Default is /etc/fstab" }, "_db_comment": "Database file name", "db": "/var/lib/heketi/heketi.db", "_loglevel_comment": [ "Set log level. Choices are:", " none, critical, error, warning, info, debug", "Default is warning" ], # 默认设置为debug,不设置时的默认值便是warning; # 日志信息输出在/var/log/message "loglevel" : "warning" } }
# 选择ssh执行器,heketi服务器须要免密登录GlusterFS集群的各节点; # -t:秘钥类型; # -q:安静模式; # -f:指定生成秘钥的目录与名字,注意与heketi.json的ssh执行器中"keyfile"值一致; # -N:秘钥密码,””即为空 [root@heketi ~]# ssh-keygen -t rsa -q -f /etc/heketi/heketi_key -N "" # heketi服务由heketi用户启动,heketi用户须要有新生成key的读赋权,不然服务没法启动 [root@heketi ~]# chown heketi:heketi /etc/heketi/heketi_key # 分发公钥; # -i:指定公钥 [root@heketi ~]# ssh-copy-id -i /etc/heketi/heketi_key.pub root@172.30.200.81 [root@heketi ~]# ssh-copy-id -i /etc/heketi/heketi_key.pub root@172.30.200.82 [root@heketi ~]# ssh-copy-id -i /etc/heketi/heketi_key.pub root@172.30.200.83
# 经过yum安装heketi,默认的systemd文件有1处错误; # /usr/lib/systemd/system/heketi.service文件的”-config=/etc/heketi/heketi.json”应该修改成”--config=/etc/heketi/heketi.json”; # 不然启动时报”Error: unknown shorthand flag: 'c' in -config=/etc/heketi/heketi.json“错,致使服务没法启动 [root@heketi ~]# systemctl enable heketi [root@heketi ~]# systemctl restart heketi [root@heketi ~]# systemctl status heketi
# 验证 [root@heketi ~]# curl http://localhost:8080/hello
# 经过topology.json文件定义组建GlusterFS集群; # topology指定了层级关系:clusters-->nodes-->node/devices-->hostnames/zone; # node/hostnames字段的manage填写主机ip,指管理通道,在heketi服务器不能经过hostname访问GlusterFS节点时间不能填写hostname; # node/hostnames字段的storage填写主机ip,指存储数据通道,与manage能够不同; # node/zone字段指定了node所处的故障域,heketi经过跨故障域建立副本,提升数据高可用性质,如能够经过rack的不一样区分zone值,建立跨机架的故障域; # devices字段指定GlusterFS各节点的盘符(能够是多块盘),必须是未建立文件系统的裸设备 [root@heketi ~]# vim /etc/heketi/topology.json { "clusters": [ { "nodes": [ { "node": { "hostnames": { "manage": [ "172.30.200.81" ], "storage": [ "172.30.200.81" ] }, "zone": 1 }, "devices": [ "/dev/sdb" ] }, { "node": { "hostnames": { "manage": [ "172.30.200.82" ], "storage": [ "172.30.200.82" ] }, "zone": 2 }, "devices": [ "/dev/sdb" ] }, { "node": { "hostnames": { "manage": [ "172.30.200.83" ], "storage": [ "172.30.200.83" ] }, "zone": 3 }, "devices": [ "/dev/sdb" ] } ] } ] }
# GlusterFS集群各节点的glusterd服务已正常启动,但没必要组建受信存储池; # heketi-cli命令行也可手动逐层添加cluster,node,device,volume等; # “--server http://localhost:8080”:localhost执行heketi-cli时,可不指定; # ”--user admin --secret admin@123 “:heketi.json中设置了认证,执行heketi-cli时须要带上认证信息,不然报”Error: Invalid JWT token: Unknown user”错 [root@heketi ~]# heketi-cli --server http://localhost:8080 --user admin --secret admin@123 topology load --json=/etc/heketi/topology.json
# 查看heketi topology信息,此时volume与brick等未建立; # 经过”heketi-cli cluster info“能够查看集群相关信息; # 经过”heketi-cli node info“能够查看节点相关信息; # 经过”heketi-cli device info“能够查看device相关信息 [root@heketi ~]# heketi-cli --user admin --secret admin@123 topology info
kubernetes共享存储供应模式:
静态模式(Static):集群管理员手工建立PV,在定义PV时需设置后端存储的特性;
动态模式(Dynamic):集群管理员不须要手工建立PV,而是经过StorageClass的设置对后端存储进行描述,标记为某种"类型(Class)";此时要求PVC对存储的类型进行说明,系统将自动完成PV的建立及与PVC的绑定;PVC能够声明Class为"",说明PVC禁止使用动态模式。
基于StorageClass的动态存储供应总体过程以下图所示:
集群管理员预先建立存储类(StorageClass);
用户建立使用存储类的持久化存储声明(PVC:PersistentVolumeClaim);
存储持久化声明通知系统,它须要一个持久化存储(PV: PersistentVolume);
系统读取存储类的信息;
系统基于存储类的信息,在后台自动建立PVC须要的PV;
用户建立一个使用PVC的Pod;
Pod中的应用经过PVC进行数据的持久化;
而PVC使用PV进行数据的最终持久化处理。
# provisioner:表示存储分配器,须要根据后端存储的不一样而变动; # reclaimPolicy: 默认即”Delete”,删除pvc后,相应的pv及后端的volume,brick(lvm)等一块儿删除;设置为”Retain”时则保留数据,须要手工处理 # resturl:heketi API服务提供的url; # restauthenabled:可选参数,默认值为”false”,heketi服务开启认证时必须设置为”true”; # restuser:可选参数,开启认证时设置相应用户名; # secretNamespace:可选参数,开启认证时能够设置为使用持久化存储的namespace; # secretName:可选参数,开启认证时,须要将heketi服务的认证密码保存在secret资源中; # clusterid:可选参数,指定集群id,也能够是1个clusterid列表,格式为”id1,id2”; # volumetype:可选参数,设置卷类型及其参数,若是未分配卷类型,则有分配器决定卷类型;如”volumetype: replicate:3”表示3副本的replicate卷,”volumetype: disperse:4:2”表示disperse卷,其中‘4’是数据,’2’是冗余校验,”volumetype: none”表示distribute卷# [root@kubenode1 ~]# mkdir -p heketi [root@kubenode1 ~]# cd heketi/ [root@kubenode1 heketi]# vim gluster-heketi-storageclass.yaml apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: gluster-heketi-storageclass provisioner: kubernetes.io/glusterfs reclaimPolicy: Delete parameters: resturl: "http://172.30.200.80:8080" restauthenabled: "true" restuser: "admin" secretNamespace: "default" secretName: "heketi-secret" volumetype: "replicate:2" # 生成secret资源,其中”key”值须要转换为base64编码格式 [root@kubenode1 heketi]# echo -n "admin@123" | base64 # 注意name/namespace与storageclass资源中定义一致; # 密码必须有“kubernetes.io/glusterfs” type [root@kubenode1 heketi]# cat heketi-secret.yaml apiVersion: v1 kind: Secret metadata: name: heketi-secret namespace: default data: # base64 encoded password. E.g.: echo -n "mypassword" | base64 key: YWRtaW5AMTIz type: kubernetes.io/glusterfs
# 建立secret资源 [root@kubenode1 heketi]# kubectl create -f heketi-secret.yaml # 建立storageclass资源; # 注意:storageclass资源建立后不可变动,如修改只能删除后重建 [root@kubenode1 heketi]# kubectl create -f gluster-heketi-storageclass.yaml
# 查看storageclass资源 [root@kubenode1 heketi]# kubectl describe storageclass gluster-heketi-storageclass
# 注意“storageClassName”的对应关系 [root@kubenode1 heketi]# vim gluster-heketi-pvc.yaml kind: PersistentVolumeClaim apiVersion: v1 metadata: name: gluster-heketi-pvc spec: storageClassName: gluster-heketi-storageclass # ReadWriteOnce:简写RWO,读写权限,且只能被单个node挂载; # ReadOnlyMany:简写ROX,只读权限,容许被多个node挂载; # ReadWriteMany:简写RWX,读写权限,容许被多个node挂载; accessModes: - ReadWriteOnce resources: requests: # 注意格式,不能写“GB” storage: 1Gi # 建立pvc资源 [root@kubenode1 heketi]# kubectl create -f gluster-heketi-pvc.yaml
# 查看PVC,状态为”Bound”; # “Capacity”为2G,是由于同步建立meta数据 [root@kubenode1 heketi]# kubectl describe pvc gluster-heketi-pvc
# 查看PV详细信息,除容量,引用storageclass信息,状态,回收策略等外,同时给出GlusterFS的Endpoint与path; [root@kubenode1 heketi]# kubectl get pv [root@kubenode1 heketi]# kubectl describe pv pvc-532cb8c3-cfc6-11e8-8fde-005056bfa8ba
# 查看endpoints资源,能够从pv信息中获取,固定格式:glusterfs-dynamic-PVC_NAME; # endpoints资源中指定了挂载存储时的具体地址 [root@kubenode1 heketi]# kubectl describe endpoints glusterfs-dynamic-gluster-heketi-pvc
# volume与brick已经建立; # 主挂载点(通讯)在glusterfs01节点,其他两个节点备选; # 两副本的状况下,glusterfs03节点并未建立brick [root@heketi ~]# heketi-cli --user admin --secret admin@123 topology info
# 以glusterfs01节点为例 [root@glusterfs01 ~]# lsblk
[root@glusterfs01 ~]# df -Th
# 查看volume的具体信息:2副本的replicate卷; # 另有”vgscan”,”vgdisplay”也可查看逻辑卷组信息等 [root@glusterfs01 ~]# gluster volume list [root@glusterfs01 ~]# gluster volume info vol_308342f1ffff3aea7ec6cc72f6d13cd7
# 设置1个volume被pod引用,volume的类型为”persistentVolumeClaim” [root@kubenode1 heketi]# vim gluster-heketi-pod.yaml kind: Pod apiVersion: v1 metadata: name: gluster-heketi-pod spec: containers: - name: gluster-heketi-container image: busybox command: - sleep - "3600" volumeMounts: - name: gluster-heketi-volume mountPath: "/pv-data" readOnly: false volumes: - name: gluster-heketi-volume persistentVolumeClaim: claimName: gluster-heketi-pvc # 建立pod [root@kubenode1 heketi]# kubectl create -f gluster-heketi-pod.yaml
# 在容器的挂载目录中建立文件 [root@kubenode1 heketi]# kubectl exec -it gluster-heketi-pod /bin/sh / # cd /pv-data /pv-data # echo "This is a file!" >> a.txt /pv-data # echo "This is b file!" >> b.txt /pv-data # ls
# 在GlusterFS节点对应挂载目录查看建立的文件; # 挂载目录经过”df -Th”或”lsblk”获取 [root@glusterfs01 ~]# df -Th [root@glusterfs01 ~]# cd /var/lib/heketi/mounts/vg_af339b60319a63a77b05ddbec1b21bbe/brick_d712f1543476c4198d3869c682cdaa9a/brick/ [root@glusterfs01 brick]# ls [root@glusterfs01 brick]# cat a.txt [root@glusterfs01 brick]# cat b.txt
# 删除Pod应用后,再删除pvc [root@kubenode1 heketi]# kubectl delete -f gluster-heketi-pod.yaml [root@kubenode1 heketi]# kubectl delete -f gluster-heketi-pvc.yaml # k8s资源 [root@kubenode1 heketi]# kubectl get pvc [root@kubenode1 heketi]# kubectl get pv [root@kubenode1 heketi]# kubectl get endpoints
# heketi [root@heketi ~]# heketi-cli --user admin --secret admin@123 topology info
# GlusterFS节点 [root@glusterfs01 ~]# lsblk [root@glusterfs01 ~]# df -Th [root@glusterfs01 ~]# gluster volume list