
参考链接:
https://docs.ceph.com/en/squid/rbd/rados-rbd-cmds/
- 1.创建存储池
root@ceph141 ~]# ceph osd pool create violet 8 8 --autoscale_mode off --size 3
pool 'violet' created
[root@ceph141 ~]#
- 2.声明存储池为rbd应用类型
[root@ceph141 ~]# ceph -s
cluster:
id: 11e66474-0e02-11f0-82d6-4dcae3d59070
health: HEALTH_WARN
1 pool(s) do not have an application enabled
services:
mon: 3 daemons, quorum ceph141,ceph142,ceph143 (age 17h)
mgr: ceph141.mbakds(active, since 17h), standbys: ceph142.qgifwo
osd: 9 osds: 9 up (since 17h), 9 in (since 17h)
data:
pools: 2 pools, 9 pgs
objects: 2 objects, 449 KiB
usage: 652 MiB used, 5.3 TiB / 5.3 TiB avail
pgs: 9 active+clean
[root@ceph141 ~]#
[root@ceph141 ~]# ceph osd pool ls detail
pool 1 '.mgr' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 1 pgp_num 1 autoscale_mode on last_change 21 flags hashpspool stripe_width 0 pg_num_max 32 pg_num_min 1 application mgr read_balance_score 9.09
pool 8 'violet' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 autoscale_mode off last_change 346 flags hashpspool stripe_width 0 read_balance_score 3.38
[root@ceph141 ~]#
[root@ceph141 ~]# rbd pool init oldboyedu
[root@ceph141 ~]#
[root@ceph141 ~]# ceph osd pool ls detail | grep application
pool 1 '.mgr' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 1 pgp_num 1 autoscale_mode on last_change 21 flags hashpspool stripe_width 0 pg_num_max 32 pg_num_min 1 application mgr read_balance_score 9.09
pool 8 'violet' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 autoscale_mode off last_change 349 flags hashpspool,selfmanaged_snaps stripe_width 0 application rbd read_balance_score 3.38
[root@ceph141 ~]#
[root@ceph141 ~]# ceph -s
cluster:
id: 11e66474-0e02-11f0-82d6-4dcae3d59070
health: HEALTH_OK
services:
mon: 3 daemons, quorum ceph141,ceph142,ceph143 (age 17h)
mgr: ceph141.mbakds(active, since 17h), standbys: ceph142.qgifwo
osd: 9 osds: 9 up (since 17h), 9 in (since 17h)
data:
pools: 2 pools, 9 pgs
objects: 3 objects, 449 KiB
usage: 652 MiB used, 5.3 TiB / 5.3 TiB avail
pgs: 9 active+clean
[root@ceph141 ~]#
- 3.创建一个块设备
[root@ceph141 ~]# rbd create -s 2G violet/linux
[root@ceph141 ~]#
- 4.查看块设备列表
[root@ceph141 ~]# rbd ls violet
linux
[root@ceph141 ~]#
- 5.查看块设备的详细信息
[root@ceph141 ~]# rbd info violet/linux
rbd image 'linux':
size 2 GiB in 512 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: d68f96768e8f
block_name_prefix: rbd_data.d68f96768e8f
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Tue Apr 1 11:25:38 2025
access_timestamp: Tue Apr 1 11:25:38 2025
modify_timestamp: Tue Apr 1 11:25:38 2025
[root@ceph141 ~]#
- 6.扩容块设备大小
[root@ceph141 ~]# rbd resize -s 4G violet/linux
Resizing image: 100% complete...done.
[root@ceph141 ~]#
[root@ceph141 ~]# rbd info violet/linux
rbd image 'linux':
size 4 GiB in 1024 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: d68f96768e8f
block_name_prefix: rbd_data.d68f96768e8f
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Tue Apr 1 11:25:38 2025
access_timestamp: Tue Apr 1 11:25:38 2025
modify_timestamp: Tue Apr 1 11:25:38 2025
[root@ceph141 ~]#
- 7.缩容块设备大小
[root@ceph141 ~]# rbd resize -s 1G violet/linux --allow-shrink
Resizing image: 100% complete...done.
[root@ceph141 ~]#
[root@ceph141 ~]# rbd info violet/linux
rbd image 'linux':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: d68f96768e8f
block_name_prefix: rbd_data.d68f96768e8f
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Tue Apr 1 11:25:38 2025
access_timestamp: Tue Apr 1 11:25:38 2025
modify_timestamp: Tue Apr 1 11:25:38 2025
[root@ceph141 ~]#
- 8.修改块设备的大小
[root@ceph141 ~]# rbd ls violet
linux96
[root@ceph141 ~]#
[root@ceph141 ~]# rbd rename -p violet linux LINUX
[root@ceph141 ~]# rbd ls violet
LINUX96
[root@ceph141 ~]# rbd info violet/LINUX
rbd image 'LINUX':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: d68f96768e8f
block_name_prefix: rbd_data.d68f96768e8f
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Tue Apr 1 11:25:38 2025
access_timestamp: Tue Apr 1 11:25:38 2025
modify_timestamp: Tue Apr 1 11:25:38 2025
[root@ceph141 ~]#
- 9.删除块设备
[root@ceph141 ~]# rbd rm violet/LINUX
Removing image: 100% complete...done.
[root@ceph141 ~]#
[root@ceph141 ~]# rbd ls violet
[root@ceph141 ~]#
Ubuntu客户端挂载rbd设备
- 1.ceph集群创建RBD设备
[root@ceph141 ~]# rbd create -s 4G violet/prometheus-server
[root@ceph141 ~]# rbd info violet/prometheus-server
rbd image 'prometheus-server':
size 4 GiB in 1024 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: d6f5a501cf29
block_name_prefix: rbd_data.d6f5a501cf29
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Tue Apr 1 11:37:28 2025
access_timestamp: Tue Apr 1 11:37:28 2025
modify_timestamp: Tue Apr 1 11:37:28 2025
[root@ceph141 ~]#
- 2.Ubuntu客户端挂载rbd设备格式化ext4文件系统格式
2.1 安装ceph通用包环境
[root@prometheus-server31 ~]# apt -y install ceph-common
[root@prometheus-server31 ~]# ceph --version
ceph version 17.2.7 (b12291d110049b2f35e32e0de30d70e9a4c060d2) quincy (stable)
[root@prometheus-server31 ~]#
2.2 拷贝ceph集群认证文件
[root@ceph141 ~]# scp /etc/ceph/{ceph.conf,ceph.client.admin.keyring} 10.0.0.31:/etc/ceph
2.3 客户端挂载rbd设备
[root@prometheus-server31 ~]# rbd map violet/prometheus-server
/dev/rbd0
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# fdisk -l /dev/rbd0
Disk /dev/rbd0: 4 GiB, 4294967296 bytes, 8388608 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 65536 bytes / 65536 bytes
[root@prometheus-server31 ~]#
2.4 客户端格式化ext4文件系统
[root@prometheus-server31 ~]# mkfs.ext4 /dev/rbd0
mke2fs 1.46.5 (30-Dec-2021)
Discarding device blocks: done
Creating filesystem with 1048576 4k blocks and 262144 inodes
Filesystem UUID: 96976f6e-fef9-437a-9d4f-6c1490ee0426
Superblock backups stored on blocks:
32768, 98304, 163840, 229376, 294912, 819200, 884736
Allocating group tables: done
Writing inode tables: done
Creating journal (16384 blocks): done
Writing superblocks and filesystem accounting information: done
[root@prometheus-server31 ~]#
2.5 挂载块设备
[root@prometheus-server31 ~]# mount /dev/rbd0 /mnt/
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# df -h | grep mnt
/dev/rbd0 3.9G 24K 3.7G 1% /mnt
[root@prometheus-server31 ~]#
2.6 测试尝试写入数据
[root@prometheus-server31 ~]# cp grafana-enterprise_9.5.21_amd64.deb violet-install-prometheus-server-v2.53.4.tar.gz /mnt/
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# ll -h /mnt/
total 184M
drwxr-xr-x 3 root root 4.0K Apr 1 11:46 ./
drwxr-xr-x 21 root root 4.0K Apr 1 11:46 ../
-rw-r--r-- 1 root root 85M Apr 1 11:46 grafana-enterprise_9.5.21_amd64.deb
drwx------ 2 root root 16K Apr 1 11:44 lost+found/
-rw-r--r-- 1 root root 99M Apr 1 11:46 violet-install-prometheus-server-v2.53.4.tar.gz
[root@prometheus-server31 ~]#
- 3.Ubuntu客户端挂载rbd设备格式化xfs文件系统格式
3.1 服务端创建块设备
[root@ceph141 ~]# rbd create -s 8G violet/node-exporter
[root@ceph141 ~]#
[root@ceph141 ~]# rbd info violet/node-exporter
rbd image 'node-exporter':
size 8 GiB in 2048 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: d71ff8cf211d
block_name_prefix: rbd_data.d71ff8cf211d
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Tue Apr 1 11:48:55 2025
access_timestamp: Tue Apr 1 11:48:55 2025
modify_timestamp: Tue Apr 1 11:48:55 2025
[root@ceph141 ~]#
3.2 客户端挂载
[root@prometheus-server31 ~]# rbd map violet/node-exporter
/dev/rbd1
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# fdisk -l /dev/rbd1
Disk /dev/rbd1: 8 GiB, 8589934592 bytes, 16777216 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 65536 bytes / 65536 bytes
[root@prometheus-server31 ~]#
3.3 格式化块设备
[root@prometheus-server31 ~]# mkfs.xfs /dev/rbd1
meta-data=/dev/rbd1 isize=512 agcount=8, agsize=262144 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=1, rmapbt=0
= reflink=1 bigtime=0 inobtcount=0
data = bsize=4096 blocks=2097152, imaxpct=25
= sunit=16 swidth=16 blks
naming =version 2 bsize=4096 ascii-ci=0, ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=16 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
Discarding blocks...Done.
[root@prometheus-server31 ~]#
3.4 挂载块设备
[root@prometheus-server31 ~]# mount /dev/rbd1 /opt/
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# df -h | grep opt
/dev/rbd1 8.0G 90M 8.0G 2% /opt
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]#
3.5 写入数据测试
[root@prometheus-server31 ~]# cp /etc/os-release /etc/netplan/00-installer-config.yaml /opt/
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# ll /opt/
total 12
drwxr-xr-x 2 root root 56 Apr 1 11:52 ./
drwxr-xr-x 21 root root 4096 Apr 1 11:46 ../
-rw------- 1 root root 319 Apr 1 11:52 00-installer-config.yaml
-rw-r--r-- 1 root root 427 Apr 1 11:52 os-release
[root@prometheus-server31 ~]#
RBD客户端文件系统容量热更新
- 1.服务端调大设备大小
[root@ceph141 ~]# rbd info violet/prometheus-server
rbd image 'prometheus-server':
size 4 GiB in 1024 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: d6f5a501cf29
block_name_prefix: rbd_data.d6f5a501cf29
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Tue Apr 1 11:37:28 2025
access_timestamp: Tue Apr 1 11:37:28 2025
modify_timestamp: Tue Apr 1 11:37:28 2025
[root@ceph141 ~]#
[root@ceph141 ~]#
[root@ceph141 ~]# rbd resize -s 40G violet/prometheus-server
Resizing image: 100% complete...done.
[root@ceph141 ~]#
[root@ceph141 ~]# rbd info violet/prometheus-server
rbd image 'prometheus-server':
size 40 GiB in 10240 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: d6f5a501cf29
block_name_prefix: rbd_data.d6f5a501cf29
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Tue Apr 1 11:37:28 2025
access_timestamp: Tue Apr 1 11:37:28 2025
modify_timestamp: Tue Apr 1 11:37:28 2025
[root@ceph141 ~]#
[root@ceph141 ~]# rbd info violet/node-exporter
rbd image 'node-exporter':
size 8 GiB in 2048 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: d71ff8cf211d
block_name_prefix: rbd_data.d71ff8cf211d
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Tue Apr 1 11:48:55 2025
access_timestamp: Tue Apr 1 11:48:55 2025
modify_timestamp: Tue Apr 1 11:48:55 2025
[root@ceph141 ~]#
[root@ceph141 ~]#
[root@ceph141 ~]# rbd resize -s 20G violet/node-exporter
Resizing image: 100% complete...done.
[root@ceph141 ~]#
[root@ceph141 ~]# rbd info violet/node-exporter
rbd image 'node-exporter':
size 20 GiB in 5120 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: d71ff8cf211d
block_name_prefix: rbd_data.d71ff8cf211d
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Tue Apr 1 11:48:55 2025
access_timestamp: Tue Apr 1 11:48:55 2025
modify_timestamp: Tue Apr 1 11:48:55 2025
[root@ceph141 ~]#
- 2.resize2fs热更新ext4文件系统
[root@prometheus-server31 ~]# df -h | grep mnt
/dev/rbd0 3.9G 184M 3.5G 5% /mnt
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# resize2fs /dev/rbd0
resize2fs 1.46.5 (30-Dec-2021)
Filesystem at /dev/rbd0 is mounted on /mnt; on-line resizing required
old_desc_blocks = 1, new_desc_blocks = 5
The filesystem on /dev/rbd0 is now 10485760 (4k) blocks long.
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# df -h | grep mnt
/dev/rbd0 40G 184M 38G 1% /mnt
[root@prometheus-server31 ~]#
- 3.xfs_growfs热更新xfs文件系统
[root@prometheus-server31 ~]# df -h | grep opt
/dev/rbd1 8.0G 90M 8.0G 2% /opt
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# xfs_growfs /opt/
meta-data=/dev/rbd1 isize=512 agcount=8, agsize=262144 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=1, rmapbt=0
= reflink=1 bigtime=0 inobtcount=0
data = bsize=4096 blocks=2097152, imaxpct=25
= sunit=16 swidth=16 blks
naming =version 2 bsize=4096 ascii-ci=0, ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=16 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
data blocks changed from 2097152 to 5242880
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# df -h | grep opt
/dev/rbd1 20G 177M 20G 1% /opt
[root@prometheus-server31 ~]#