部署环境

HostIPk8s 版本glusterFS版本heketi版本heketi-client 版本k8s-master1192.168.10.11.20.09.5-1.el7heketi-8.0.0-1heketi-client-8.0.0-1k8s-master2192.168.10.21.20.09.5-1.el7heketi-8.0.0-1heketi-client-8.0.0-1k8s-master3192.168.10.31.20.09.5-1.el7heketi-8.0.0-1heketi-client-8.0.0-1

前置条件: 每个用来做GFS数据存储必须是一个设备块,不能是一个目录。如/dev/sdb,/dev/sdb1。这块需要提前规划

部署 glusterFS 集群

hosts信息

# /etc/hosts

192.168.10.1 k8s-master1

192.168.10.2 k8s-master2

192.168.10.3 k8s-master3

$ ntpdate time.windows.com # 三个节点必须做时钟同步

# 配置yum源(k8s-master1-k8s-master3)

$ yum -y install centos-release-gluster9.noarch # 下载GFS的第9个大版本的源,v6与当前k8s不兼容

# 安装GlusterFS(k8s-master1-k8s-master3)

$ yum -y install glusterfs glusterfs-fuse glusterfs-server

配置服务和集群

# 启动glusterfs服务(k8s-master1-k8s-master3)

$ systemctl enable glusterd

$ systemctl start glusterd

# 配置集群(将节点k8s-master1-k8s-master3加入集群)

$ gluster peer probe k8s-master1

peer probe: success. Probe on localhost not needed

$ gluster peer probe k8s-master2

peer probe: success.

$ gluster peer probe k8s-master3

peer probe: success.

$ gluster peer status

Number of Peers: 2

Hostname: k8s-master2

Uuid: c08a504c-4a92-403b-ae86-48e51b1efd05

State: Peer in Cluster (Connected)

Hostname: k8s-master3

Uuid: 9bbc907a-3f60-49fd-9911-9cfed9bb0844

State: Peer in Cluster (Connected)

部署GlusterFS管理客户端heketi

节点初始化

# 每个节点都需要创建一个heketi用户,root用户起不来

adduser heketi

passwd heketi

# 添加到sudoers

# 增加可写权限/etc/sudoers

chmod u+w /etc/sudoers

# 添加sudo

# vim /etc/sudoers

# root ALL=(ALL) ALL下面增加

heketi ALL=(ALL) NOPASSWD: ALL

# 恢复/etc/sudoers

chmod u-w /etc/sudoers

# 服务节点免密授权

# login heketi

# 下面操作只需要在k8s-master1节点执行

su - heketi

ssh-keygen -t rsa -b 4096

ssh-copy-id -i heketi@k8s-master1

ssh-copy-id -i heketi@k8s-master2

ssh-copy-id -i heketi@k8s-master3

安装heketi

# 所有节点安装heketi-client

yum install -y heketi-client

# 服务节点安装heketi

# login heketi

yum install -y heketi

# 配置服务

cd /etc/heketi/

cp heketi.json heketi.json_bak

heketi.json

{

"_port_comment": "Heketi Server Port Number",

"port": "18080",

"_use_auth": "Enable JWT authorization. Please enable for deployment",

"use_auth": true,

"_jwt": "Private keys for access",

"jwt": {

"_admin": "Admin has access to all APIs",

"admin": {

"key": "admin" ## 与后面一致

},

"_user": "User only has access to /volumes endpoint",

"user": {

"key": "admin"

}

},

"_glusterfs_comment": "GlusterFS Configuration",

"glusterfs": {

"_executor_comment": [

"Execute plugin. Possible choices: mock, ssh",

"mock: This setting is used for testing and development.",

" It will not send commands to any node.",

"ssh: This setting will notify Heketi to ssh to the nodes.",

" It will need the values in sshexec to be configured.",

"kubernetes: Communicate with GlusterFS containers over",

" Kubernetes exec api."

],

"executor": "ssh",

"_sshexec_comment": "SSH username and private key file information",

"sshexec": {

"keyfile": "/home/heketi/.ssh/id_rsa",

"user": "heketi",

"port": "22",

"sudo": true,

"fstab": "/etc/fstab"

},

"_kubeexec_comment": "Kubernetes configuration",

"kubeexec": {

"host" :"https://kubernetes.host:8443",

"cert" : "/path/to/crt.file",

"insecure": false,

"user": "kubernetes username",

"password": "password for kubernetes user",

"namespace": "OpenShift project or Kubernetes namespace",

"fstab": "Optional: Specify fstab file on node. Default is /etc/fstab"

},

"_db_comment": "Database file name",

"db": "/var/lib/heketi/heketi.db",

"_loglevel_comment": [

"Set log level. Choices are:",

" none, critical, error, warning, info, debug",

"Default is warning"

],

"loglevel" : "debug"

}

}

# 开机启动

systemctl enable heketi

# 启动服务

systemctl start heketi

# 启动状态

systemctl status heketi

初始化GFS 复制卷集群

复制卷:所有组成卷的服务器中存放的内容都完全相同,类似RAID1。

# 创建集群

$ heketi-cli --user admin --secret admin --server http://k8s-master1:18080 --json cluster create

{"id":"60d0c41c0b232906f90b528fbb58400a","nodes":[],"volumes":[],"block":true,"file":true,"blockvolumes":[]}

# 添加节点

$ heketi-cli --user admin --secret admin --server http://k8s-master1:18080 --json node add --cluster "60d0c41c0b232906f90b528fbb58400a" --management-host-name 192.168.10.1 --storage-host-name 192.168.10.1 --zone 1

{"zone":1,"hostnames":{"manage":["192.168.10.1"],"storage":["192.168.10.1"]},"cluster":"60d0c41c0b232906f90b528fbb58400a","id":"918f73f5e17662abbcd0552445a53766","state":"online","devices":[]}

$ heketi-cli --user admin --secret admin --server http://k8s-master1:18080 --json node add --cluster "60d0c41c0b232906f90b528fbb58400a" --management-host-name 192.168.10.2 --storage-host-name 192.168.10.2 --zone 1

{"zone":1,"hostnames":{"manage":["192.168.10.2"],"storage":["192.168.10.2"]},"cluster":"60d0c41c0b232906f90b528fbb58400a","id":"918f73f5e17662abbcd0552445a53766","state":"online","devices":[]}

$ heketi-cli --user admin --secret admin --server http://k8s-master1:18080 --json node add --cluster "60d0c41c0b232906f90b528fbb58400a" --management-host-name 192.168.10.3 --storage-host-name 192.168.10.3 --zone 1

{"zone":1,"hostnames":{"manage":["192.168.10.3"],"storage":["192.168.10.3"]},"cluster":"60d0c41c0b232906f90b528fbb58400a","id":"918f73f5e17662abbcd0552445a53766","state":"online","devices":[]}

# 添加存储设备

$ heketi-cli --user admin --secret admin --server http://k8s-master1:18080 --json node list

Id:46e7b325ce20528bf8160d8c65a3c652 Cluster:60d0c41c0b232906f90b528fbb58400a

Id:524481c119e6295117979bb80046f9c8 Cluster:60d0c41c0b232906f90b528fbb58400a

Id:918f73f5e17662abbcd0552445a53766 Cluster:60d0c41c0b232906f90b528fbb58400a

## sdb为设备块,不是目录

$ heketi-cli --user admin --secret admin --server http://k8s-master1:18080 device add --name "/dev/sdb" --node 46e7b325ce20528bf8160d8c65a3c652

$ heketi-cli --user admin --secret admin --server http://k8s-master1:18080 device add --name "/dev/sdb" --node 524481c119e6295117979bb80046f9c8

$ heketi-cli --user admin --secret admin --server http://k8s-master1:18080 device add --name "/dev/sdb" --node 918f73f5e17662abbcd0552445a53766

# 查看集群拓扑

$ heketi-cli --user admin --secret admin --server http://k8s-master1:18080 topology info

Cluster Id: 60d0c41c0b232906f90b528fbb58400a

File: true

Block: true

Volumes:

Name: vol_af2533d49e30b106c7c1181807c93ad1

Size: 10

Id: af2533d49e30b106c7c1181807c93ad1

Cluster Id: 60d0c41c0b232906f90b528fbb58400a

Mount: 192.168.10.2:vol_af2533d49e30b106c7c1181807c93ad1

Mount Options: backup-volfile-servers=192.168.10.3,192.168.10.1

Durability Type: replicate

Replica: 3

Snapshot: Enabled

Snapshot Factor: 1.00

Bricks:

Id: 3dbf6dd1f57f119054c583489a29242d

Path: /var/lib/heketi/mounts/vg_a161942c4ea242e92cb753a18abc0483/brick_3dbf6dd1f57f119054c583489a29242d/brick

Size (GiB): 10

Node: 524481c119e6295117979bb80046f9c8

Device: a161942c4ea242e92cb753a18abc0483

Id: 90dc64eea016285370a4df707f235371

Path: /var/lib/heketi/mounts/vg_04041ffb900f9e8a2d73cdd9a32ed62a/brick_90dc64eea016285370a4df707f235371/brick

Size (GiB): 10

Node: 46e7b325ce20528bf8160d8c65a3c652

Device: 04041ffb900f9e8a2d73cdd9a32ed62a

Id: bf602a9741293e6f1cd5102a385f87d5

Path: /var/lib/heketi/mounts/vg_c0307d49fd4e8b3a5bd2b4ed48c4dcd9/brick_bf602a9741293e6f1cd5102a385f87d5/brick

Size (GiB): 10

Node: 918f73f5e17662abbcd0552445a53766

Device: c0307d49fd4e8b3a5bd2b4ed48c4dcd9

Nodes:

Node Id: 46e7b325ce20528bf8160d8c65a3c652

State: online

Cluster Id: 60d0c41c0b232906f90b528fbb58400a

Zone: 1

Management Hostnames: 192.168.10.2

Storage Hostnames: 192.168.10.2

Devices:

Id:04041ffb900f9e8a2d73cdd9a32ed62a Name:/dev/sdb State:online Size (GiB):99 Used (GiB):10 Free (GiB):89

Bricks:

Id:90dc64eea016285370a4df707f235371 Size (GiB):10 Path: /var/lib/heketi/mounts/vg_04041ffb900f9e8a2d73cdd9a32ed62a/brick_90dc64eea016285370a4df707f235371/brick

Node Id: 524481c119e6295117979bb80046f9c8

State: online

Cluster Id: 60d0c41c0b232906f90b528fbb58400a

Zone: 1

Management Hostnames: 192.168.10.3

Storage Hostnames: 192.168.10.3

Devices:

Id:a161942c4ea242e92cb753a18abc0483 Name:/dev/sdb State:online Size (GiB):99 Used (GiB):10 Free (GiB):89

Bricks:

Id:3dbf6dd1f57f119054c583489a29242d Size (GiB):10 Path: /var/lib/heketi/mounts/vg_a161942c4ea242e92cb753a18abc0483/brick_3dbf6dd1f57f119054c583489a29242d/brick

Node Id: 918f73f5e17662abbcd0552445a53766

State: online

Cluster Id: 60d0c41c0b232906f90b528fbb58400a

Zone: 1

Management Hostnames: 192.168.10.1

Storage Hostnames: 192.168.10.1

Devices:

Id:c0307d49fd4e8b3a5bd2b4ed48c4dcd9 Name:/dev/sdb State:online Size (GiB):99 Used (GiB):10 Free (GiB):89

Bricks:

Id:bf602a9741293e6f1cd5102a385f87d5 Size (GiB):10 Path: /var/lib/heketi/mounts/vg_c0307d49fd4e8b3a5bd2b4ed48c4dcd9/brick_bf602a9741293e6f1cd5102a385f87d5/brick

GlusterFS 在 kubernetes 集群的应用

StorageClass

apiVersion: storage.k8s.io/v1

kind: StorageClass

metadata:

name: gluster-heketi-storageclass # 存储类名称

provisioner: kubernetes.io/glusterfs

reclaimPolicy: Retain

parameters:

resturl: "http://192.168.10.1:18080" # heketi server 地址

restauthenabled: "true"

restuser: "admin" # 要与上面heketi.json中配置一致

restuserkey: "admin" # 要与上面heketi.json中配置一致

gidMin: "2000"

#secretName: "heketi-secret"

#secretNamespace: "default"

#volumetype: "none"

volumetype: "replicate:3" # 当前是3个副本数

clusterid: "60d0c41c0b232906f90b528fbb58400a" # 执行 heketi-cli --user admin --secret admin --server http://k8s-master1:18080 --json node list 可以看到

# 支持扩容

allowVolumeExpansion: true

test

kind: Deployment

apiVersion: apps/v1

metadata:

name: demo-mode3-nginx

labels:

name: demo-mode3-nginx

spec:

replicas: 1

selector:

matchLabels:

name: demo-mode3-nginx

template:

metadata:

labels:

name: demo-mode3-nginx

spec:

containers:

- name: nginx

image: nginx

ports:

- containerPort: 80

volumeMounts:

- name: demo-mode3-nginx-vol

mountPath: "/usr/share/nginx/html"

volumes:

- name: demo-mode3-nginx-vol

persistentVolumeClaim:

claimName: glusterfs-vol-pvc02

---

kind: PersistentVolumeClaim

apiVersion: v1

metadata:

name: glusterfs-vol-pvc02

namespace: default

spec:

storageClassName: gluster-heketi-storageclass # 指定存储类名称

accessModes:

- ReadWriteMany

resources:

requests:

storage: 10Gi

$ kubectl get pvc,pv |grep glusterfs-vol-pvc02

persistentvolumeclaim/glusterfs-vol-pvc02 Bound pvc-7623c393-0f56-40cc-ae86-ab71c9900895 10Gi RWX gluster-heketi-storageclass 11m

persistentvolume/pvc-7623c393-0f56-40cc-ae86-ab71c9900895 10Gi RWX Retain Bound default/glusterfs-vol-pvc02 gluster-heketi-storageclass 9m50s

$ kubectl get po |grep demo-mode3-nginx

demo-mode3-nginx-68df46746d-qhh4g 1/1 Running 0 12m

pod,pvc 的状态不为 Pending 即正常。

推荐阅读

评论可见,请评论后查看内容,谢谢!!!
 您阅读本篇文章共花了: