Ceph wen件系统的名称是 CephFS ,它是一个 POSIX 兼容的分布式文件系统,并使用Ceph RADOS 存储数据 。要实现 Ceph wen件系统,需要一个正常运行的 Ceph 存储集群,并且至少包含一个 Ceph 元数据服务器( Ceph Metadata Server, MDS) 。
客户端可以通过两种方式使用 Ceph wen件系统:使用本地内核驱动程序挂载 CephFS ,或者使用 Ceph FUSE。
[root@node140 mds]# ceph -s
cluster:
id: 58a12719-a5ed-4f95-b312-6efd6e34e558
health: HEALTH_OK
services:
mon: 2 daemons, quorum node140,node142 (age 22h)
mgr: admin(active, since 6d), standbys: node140
osd: 16 osds: 16 up (since 17h), 16 in (since 3d)
data:
pools: 5 pools, 768 pgs
objects: 2.61k objects, 9.8 GiB
usage: 47 GiB used, 8.7 TiB / 8.7 TiB avail
pgs: 768 active+clean
[root@node140 ceph]# mkdir /var/lib/ceph/mds/ceph-node140 -pv
[root@node140 ceph]# # ceph-authtool --create-keyring /var/lib/ceph/bootstrap-mds/ceph.keyring --gen-key -n client.bootstrap-mds
[root@node140 ceph]# ceph auth add client.bootstrap-mds mon 'allow profile bootstrap-mds' -i /var/lib/ceph/bootstrap-mds/ceph.keyring
[root@node140 ceph]#touch /root/ceph.bootstrap-mds.keyring
[root@node140 ceph]# ceph-authtool --import-keyring /var/lib/ceph/bootstrap-mds/ceph.keyring ceph.bootstrap-mds.keyring
[root@node140 ceph]# ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.a osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/ceph-a/keyring
[root@node140 ceph]# chown -R ceph.ceph /var/lib/ceph/mds/ceph-node140
vim /etc/ceph/ceph.conf
[mds.node140]
host = node140
[root@node140 mds]# systemctl enable ceph-mds@node140
[root@node140 mds]# systemctl start ceph-mds@node140
[root@node140 mds]# systemctl status ceph-mds@node140
[root@node141 ceph]# vim /etc/ceph/ceph.conf
[mds.node140]
host = node140
[mds.node141]
host = node141
[root@node140 ceph]# scp /var/lib/ceph/bootstrap-mds/ceph.keyring node141:/etc/ceph/
[root@node140 ceph]# scp /root/ceph.bootstrap-mds.keyring node141:/etc/ceph/
[root@node141 ceph]# mkdir /var/lib/ceph/mds/ceph-node141/ -pv
[root@node141 ceph]# cp /etc/ceph/ceph.keyring /var/lib/ceph/bootstrap-mds/
[root@node141 ceph]# ceph auth add client.bootstrap-mds mon 'allow profile bootstrap-mds' -i /var/lib/ceph/bootstrap-mds/ceph.keyring
[root@node141 ceph]# ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.node141 osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/ceph-node141/keyring
[root@node141 ceph]# systemctl start ceph-mds@node141
[root@node141 ceph]# systemctl status ceph-mds@node141
[root@node141 ceph]# systemctl enable ceph-mds@node141
[root@node140 mds]# ceph osd pool create cephfs_data 128
pool 'cephfs_data' created
[root@node140 mds]# ceph osd pool create cephfs_metadata 128
pool 'cephfs_metadata' created
[root@node140 mds]# ceph fs new cephfs cephfs_metadata cephfs_data
new fs with metadata pool 7 and data pool 6
[root@node140 mds]# ceph fs ls
name: cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]
[root@node140 mds]# ceph mds stat
cephfs:1 {0=node140=up:active}
[root@node140 ceph-node140]# cat /etc/ceph/ceph.client.admin.keyring
[client.admin]
key = AQB9w2BdnggFIBAA7SR+7cO/PtZl9PTlriBL1A==
caps mds = "allow "
caps mgr = "allow "
caps mon = "allow "
caps osd = "allow "
[root@docker38 ceph]# vim admin.key #复制ceph.client.admin.keyring,内容到客户端
[root@docker38 ceph]# chown 600 admin.key
[root@docker38 ceph]# mount -t ceph node140:6789:/ /mnt -o name=admin,secret=AQB9w2BdnggFIBAA7SR+7cO/PtZl9PTlriBL1A==
[root@docker38 ~]# vim /etc/rc.local
mount -t ceph node140:6789:/ /mnt -o name=admin,secret=AQB9w2BdnggFIBAA7SR+7cO/PtZl9PTlriBL1A==
如果开机不自动执行 /etc/rc.local
[root@docker38 ~]# chmod +x /etc/rc.d/rc.local
[root@docker38 ceph]# yum -y install ceph-fuse
[root@docker38 ~]# mkdir /ceph/cephfs -pv
[root@node140 ceph]# scp ceph.client.admin.keyring root@10.10.204.38:/etc/ceph/
[root@docker38 ~]# ceph-fuse -m node140:6789 /ceph/cephfs/
[root@docker38 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/centos-root 46G 24G 22G 54% /
devtmpfs 4.8G 0 4.8G 0% /dev
tmpfs 4.9G 0 4.9G 0% /dev/shm
tmpfs 4.9G 8.9M 4.8G 1% /run
tmpfs 4.9G 0 4.9G 0% /sys/fs/cgroup
/dev/sda1 1014M 255M 760M 26% /boot
10.10.202.140:6789:/ 2.8T 0 2.8T 0% /mnt
tmpfs 984M 0 984M 0% /run/user/0
ceph-fuse 2.8T 0 2.8T 0% /ceph/cephfs
[root@docker38 ~]# vim /etc/rc.local
ceph-fuse -m node140:6789 /ceph/cephfs/
免责声明:本站发布的内容(图片、视频和文字)以原创、转载和分享为主,文章观点不代表本网站立场,如果涉及侵权请联系站长邮箱:is@yisu.com进行举报,并提供相关证据,一经查实,将立刻删除涉嫌侵权内容。