
参考链接:
https://docs.ceph.com/en/squid/rbd/rbd-snapshot/
- 1.rbd快照概述
rbd的快照可以进行数据的备份,恢复。
- 2.创建快照
[root@ceph141 ~]# rbd snap create -p violet --image node-exporter --snap xixi
Creating snap: 100% complete...done.
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap create violet/prometheus-server@haha
Creating snap: 100% complete...done.
[root@ceph141 ~]#
- 3.查看快照信息
[root@ceph141 ~]# rbd snap ls violet/prometheus-server
SNAPID NAME SIZE PROTECTED TIMESTAMP
4 haha 40 GiB Tue Apr 1 14:45:51 2025
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap ls -p violet --image node-exporter
SNAPID NAME SIZE PROTECTED TIMESTAMP
3 xixi 20 GiB Tue Apr 1 14:44:52 2025
[root@ceph141 ~]#
- 4.客户端篡改数据
[root@prometheus-server31 ~]# ll /opt/
total 12
drwxr-xr-x 2 root root 56 Apr 1 11:52 ./
drwxr-xr-x 21 root root 4096 Apr 1 11:46 ../
-rw------- 1 root root 319 Apr 1 11:52 00-installer-config.yaml
-rw-r--r-- 1 root root 427 Apr 1 11:52 os-release
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# rm -f /opt/os-release /opt/00-installer-config.yaml
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# cp /etc/hostname /etc/fstab /opt/
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# ll /opt/
total 12
drwxr-xr-x 2 root root 35 Apr 1 14:49 ./
drwxr-xr-x 21 root root 4096 Apr 1 11:46 ../
-rw-r--r-- 1 root root 657 Apr 1 14:49 fstab
-rw-r--r-- 1 root root 20 Apr 1 14:48 hostname
[root@prometheus-server31 ~]#
- 5.客户单准备恢复数据前要删除块设备映射
[root@prometheus-server31 ~]# umount /opt
[root@prometheus-server31 ~]# ll /opt/
total 8
drwxr-xr-x 2 root root 4096 Feb 17 2024 ./
drwxr-xr-x 21 root root 4096 Apr 1 11:46 ../
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# ls /dev/rbd1
/dev/rbd1
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# rbd unmap /dev/rbd1
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# ll /opt/
total 8
drwxr-xr-x 2 root root 4096 Feb 17 2024 ./
drwxr-xr-x 21 root root 4096 Apr 1 11:46 ../
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# ls /dev/rbd1
ls: cannot access '/dev/rbd1': No such file or directory
[root@prometheus-server31 ~]#
- 6.服务端开始回滚数据
[root@ceph141 ~]# rbd snap rollback violet/node-exporter@xixi
Rolling back to snapshot: 100% complete...done.
[root@ceph141 ~]#
- 7.客户端重新挂载测试,验证数据是否恢复
[root@prometheus-server31 ~]# rbd map violet/node-exporter
/dev/rbd1
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# mount /dev/rbd1 /opt/
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# ll /opt/
total 12
drwxr-xr-x 2 root root 56 Apr 1 11:52 ./
drwxr-xr-x 21 root root 4096 Apr 1 11:46 ../
-rw------- 1 root root 319 Apr 1 11:52 00-installer-config.yaml
-rw-r--r-- 1 root root 427 Apr 1 11:52 os-release
[root@prometheus-server31 ~]#
删除快照,快照保护,基于克隆快照恢复数据实战
- 1.未被保护的快照可以被删除
[root@ceph141 ~]# rbd snap ls violet/prometheus-server
SNAPID NAME SIZE PROTECTED TIMESTAMP
4 haha 40 GiB Tue Apr 1 14:45:51 2025
[root@ceph141 ~]#
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap rm violet/prometheus-server@haha
Removing snap: 100% complete...done.
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap ls violet/prometheus-server
[root@ceph141 ~]#
- 2.保护快照
[root@ceph141 ~]# rbd snap ls violet/node-exporter
SNAPID NAME SIZE PROTECTED TIMESTAMP
3 xixi 20 GiB Tue Apr 1 14:44:52 2025
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap protect violet/node-exporter@xixi
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap ls violet/node-exporter
SNAPID NAME SIZE PROTECTED TIMESTAMP
3 xixi 20 GiB yes Tue Apr 1 14:44:52 2025
[root@ceph141 ~]#
- 3.无法删除被保护的快照
[root@ceph141 ~]# rbd snap rm violet/node-exporter@xixi
Removing snap: 0% complete...failed.
rbd: snapshot 'xixi' is protected from removal.
2025-04-01T15:31:06.252+0800 7fa96c82e640 -1 librbd::Operations: snapshot is protected
- 4.克隆快照
[root@ceph141 ~]# rbd clone violet/node-exporter@xixi violet/child-xixi-001
[root@ceph141 ~]#
- 5.查看快照是否有子镜像
[root@ceph141 ~]# rbd snap ls violet/node-exporter
SNAPID NAME SIZE PROTECTED TIMESTAMP
3 xixi 20 GiB yes Tue Apr 1 14:44:52 2025
[root@ceph141 ~]#
[root@ceph141 ~]# rbd children violet/node-exporter@xixi
oldboyedu/child-xixi-001
[root@ceph141 ~]#
[root@ceph141 ~]# rbd ls violet -l
NAME SIZE PARENT FMT PROT LOCK
child-xixi-001 20 GiB violet/node-exporter@xixi 2
node-exporter 20 GiB 2
node-exporter@xixi 20 GiB 2 yes
prometheus-server 40 GiB 2
[root@ceph141 ~]#
- 6.基于克隆的子镜像恢复数据实战【相比于回滚快照速度更快】
将图像回滚到快照意味着用快照中的数据覆盖图像的当前版本。执行回滚所需的时间随着映像的大小而增加。
从快照克隆比将映像回滚到快照更快。从快照克隆是返回到预先存在状态的首选方法。
[root@prometheus-server31 ~]# ll /opt/
total 12
drwxr-xr-x 2 root root 56 Apr 1 11:52 ./
drwxr-xr-x 21 root root 4096 Apr 1 11:46 ../
-rw------- 1 root root 319 Apr 1 11:52 00-installer-config.yaml
-rw-r--r-- 1 root root 427 Apr 1 11:52 os-release
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# rm -f /opt/os-release
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# ll /opt/
total 8
drwxr-xr-x 2 root root 38 Apr 1 15:36 ./
drwxr-xr-x 21 root root 4096 Apr 1 11:46 ../
-rw------- 1 root root 319 Apr 1 11:52 00-installer-config.yaml
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# umount /opt
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# rbd unmap /dev/rbd1
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# rbd map violet/child-xixi-001 # 此快照时从父快照克隆而言,几乎无需恢复时间(速度比rollback更快!)。
/dev/rbd1
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# mount /dev/rbd1 /opt/
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# ll /opt/
total 12
drwxr-xr-x 2 root root 56 Apr 1 11:52 ./
drwxr-xr-x 21 root root 4096 Apr 1 11:46 ../
-rw------- 1 root root 319 Apr 1 11:52 00-installer-config.yaml
-rw-r--r-- 1 root root 427 Apr 1 11:52 os-release
[root@prometheus-server31 ~]#
取消快照保护,子镜像独立案例
- 1.无法移除被保护的快照
[root@ceph141 ~]# rbd ls violet -l
NAME SIZE PARENT FMT PROT LOCK
child-xixi-001 20 GiB violet/node-exporter@xixi 2 excl
node-exporter 20 GiB 2
node-exporter@xixi 20 GiB 2 yes
prometheus-server 40 GiB 2
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap rm violet/node-exporter@xixi
2025-04-01T15:44:42.934+0800 7fa9b4e2f640 -1 librbd::Operations: snapshot is protected
Removing snap: 0% complete...failed.
rbd: snapshot 'xixi' is protected from removal.
[root@ceph141 ~]#
- 2.如果被保护的快照有子镜像则无法取消保护
[root@ceph141 ~]# rbd children violet/node-exporter@xixi
oldboyedu/child-xixi-001
[root@ceph141 ~]#
[root@ceph141 ~]# rbd info violet/child-xixi-001
rbd image 'child-xixi-001':
size 20 GiB in 5120 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: d83350e0ece5
block_name_prefix: rbd_data.d83350e0ece5
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Tue Apr 1 15:33:32 2025
access_timestamp: Tue Apr 1 15:33:32 2025
modify_timestamp: Tue Apr 1 15:33:32 2025
parent: violet/node-exporter@xixi
overlap: 20 GiB
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap unprotect violet/node-exporter@xixi
2025-04-01T16:07:35.692+0800 7fd2b328f640 -1 librbd::SnapshotUnprotectRequest: cannot unprotect: at least 1 child(ren) [d83350e0ece5] in pool 'oldboyedu'
2025-04-01T16:07:35.692+0800 7fd2b2a8e640 -1 librbd::SnapshotUnprotectRequest: encountered error: (16) Device or resource busy
2025-04-01T16:07:35.692+0800 7fd2b2a8e640 -1 librbd::SnapshotUnprotectRequest: 0x55685c80f2a0 should_complete_error: ret_val=-16
2025-04-01T16:07:35.696+0800 7fd2b328f640 -1 librbd::SnapshotUnprotectRequest: 0x55685c80f2a0 should_complete_error: ret_val=-16
rbd: unprotecting snap failed: (16) Device or resource busy
[root@ceph141 ~]#
- 3.基于flatten取消父镜像和子镜像的关联(说白了,就是让子镜像从父镜像依赖的文件重复复制一份,独立出来,这样就和父镜像无关)
[root@ceph141 ~]# rbd children violet/node-exporter@xixi
oldboyedu/child-xixi-001
[root@ceph141 ~]#
[root@ceph141 ~]# rbd flatten violet/child-xixi-001 # 如果父镜像数据过大,可能需要较长的时间拷贝数据
Image flatten: 100% complete...done.
[root@ceph141 ~]#
[root@ceph141 ~]# rbd children violet/node-exporter@xixi
[root@ceph141 ~]#
- 4.取消快照保护
[root@ceph141 ~]# rbd ls violet -l
NAME SIZE PARENT FMT PROT LOCK
child-xixi-001 20 GiB violet/node-exporter@xixi 2 excl
node-exporter 20 GiB 2
node-exporter@xixi 20 GiB 2 yes
prometheus-server 40 GiB 2
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap unprotect violet/node-exporter@xixi
[root@ceph141 ~]#
[root@ceph141 ~]# rbd ls violet -l
NAME SIZE PARENT FMT PROT LOCK
child-xixi-001 20 GiB 2 excl
node-exporter 20 GiB 2
node-exporter@xixi 20 GiB 2
prometheus-server 40 GiB 2
[root@ceph141 ~]#
- 5.取消保护后,就可以移除快照
[root@ceph141 ~]# rbd ls violet -l
NAME SIZE PARENT FMT PROT LOCK
child-xixi-001 20 GiB 2 excl
node-exporter 20 GiB 2
node-exporter@xixi 20 GiB 2
prometheus-server 40 GiB 2
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap rm violet/node-exporter@xixi
Removing snap: 100% complete...done.
[root@ceph141 ~]#
[root@ceph141 ~]# rbd ls violet -l
NAME SIZE PARENT FMT PROT LOCK
child-xixi-001 20 GiB 2 excl
node-exporter 20 GiB 2
prometheus-server 40 GiB 2
[root@ceph141 ~]#
卸载rbd设备
- 1.查看本地rbd块设备的映射信息
[root@prometheus-server31 ~]# rbd showmapped
id pool namespace image snap device
0 violet prometheus-server - /dev/rbd0
1 violet child-xixi-001 - /dev/rbd1
[root@prometheus-server31 ~]#
- 2.查看本地的挂载信息
[root@prometheus-server31 ~]# df -h | grep rbd
/dev/rbd0 40G 184M 38G 1% /mnt
/dev/rbd1 20G 177M 20G 1% /opt
[root@prometheus-server31 ~]#
- 3.取消挂载点
[root@prometheus-server31 ~]# umount /opt
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# df -h | grep rbd
/dev/rbd0 40G 184M 38G 1% /mnt
[root@prometheus-server31 ~]#
- 4.取消映射关系
[root@prometheus-server31 ~]# rbd showmapped
id pool namespace image snap device
0 violet prometheus-server - /dev/rbd0
1 violet child-xixi-001 - /dev/rbd1
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# rbd unmap -p violet --image child-xixi-001
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# rbd showmapped
id pool namespace image snap device
0 violet prometheus-server - /dev/rbd0
[root@prometheus-server31 ~]#
- 5.另一种卸载方式
[root@prometheus-server31 ~]# umount /mnt
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# rbd showmapped
id pool namespace image snap device
0 oldboyedu prometheus-server - /dev/rbd0
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# df -h | grep rbd
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# rbd unmap oldboyedu/prometheus-server
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# rbd showmapped
[root@prometheus-server31 ~]#
rbd的开机挂载
- 1.编写开机启动脚本
[root@prometheus-server31 ~]# cat /etc/rc.local
#!/bin/bash
rbd map violet/prometheus-server
rbd map violet/child-xixi-001
mount /dev/rbd0 /mnt
mount /dev/rbd1 /opt
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# chmod +x /etc/rc.local
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# ll /etc/rc.local
-rwxr-xr-x 1 root root 111 Apr 1 16:33 /etc/rc.local*
[root@prometheus-server31 ~]#
- 2.重启服务器
[root@prometheus-server31 ~]# reboot
- 3.验证测试
[root@prometheus-server31 ~]# df -h | grep rbd
/dev/rbd0 40G 184M 38G 1% /mnt
/dev/rbd1 20G 177M 20G 1% /opt
[root@prometheus-server31 ~]#
多个节点无法同时使用同一块设备案例
- 1.LOCK字段中’excl’标记着该设备正在被使用
[root@ceph141 ~]# rbd ls oldboyedu -l
NAME SIZE PARENT FMT PROT LOCK
child-xixi-001 20 GiB 2 excl
node-exporter 20 GiB 2
prometheus-server 40 GiB 2 excl
[root@ceph141 ~]#
[root@ceph141 ~]#
- 2.客户端取消映射
[root@prometheus-server31 ~]# rbd showmapped
id pool namespace image snap device
0 violet prometheus-server - /dev/rbd0
1 violet child-xixi-001 - /dev/rbd1
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# df -h | grep rbd
/dev/rbd0 40G 184M 38G 1% /mnt
/dev/rbd1 20G 177M 20G 1% /opt
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# umount /opt
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# rbd unmap /dev/rbd1
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# df -h | grep rbd
/dev/rbd0 40G 184M 38G 1% /mnt
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# rbd showmapped
id pool namespace image snap device
0 violet prometheus-server - /dev/rbd0
[root@prometheus-server31 ~]#
- 3.再次查看服务端
[root@ceph141 ~]# rbd ls violet -l
NAME SIZE PARENT FMT PROT LOCK
child-xixi-001 20 GiB 2
node-exporter 20 GiB 2
prometheus-server 40 GiB 2 excl
[root@ceph141 ~]#
- 4.客户端挂载设备
4.1 终端1挂载设备
[root@elk93 ~]# apt -y install ceph-common
4.2 拷贝认证文件
[root@ceph141 ~]# scp /etc/ceph/ceph{.client.admin.keyring,.conf} 10.0.0.93:/etc/ceph
4.3 挂载设备
[root@elk93 ~]# rbd map violet/child-xixi-001
/dev/rbd0
[root@elk93 ~]#
[root@elk93 ~]# rbd showmapped
id pool namespace image snap device
0 violet child-xixi-001 - /dev/rbd0
[root@elk93 ~]#
4.4 测试数据
[root@elk93 ~]# mount /dev/rbd0 /mnt/
[root@elk93 ~]# ll /mnt/
total 12
drwxr-xr-x 2 root root 56 Apr 1 11:52 ./
drwxr-xr-x 22 root root 4096 Mar 13 11:57 ../
-rw------- 1 root root 319 Apr 1 11:52 00-installer-config.yaml
-rw-r--r-- 1 root root 427 Apr 1 11:52 os-release
[root@elk93 ~]#
[root@elk93 ~]# cp /etc/hostname /mnt/
[root@elk93 ~]#
[root@elk93 ~]# ll /mnt/
total 16
drwxr-xr-x 2 root root 72 Apr 1 16:58 ./
drwxr-xr-x 22 root root 4096 Mar 13 11:57 ../
-rw------- 1 root root 319 Apr 1 11:52 00-installer-config.yaml
-rw-r--r-- 1 root root 6 Apr 1 16:58 hostname
-rw-r--r-- 1 root root 427 Apr 1 11:52 os-release
[root@elk93 ~]#
4.5 服务端查看块设备挂载情况
[root@ceph141 ~]# rbd ls violet -l
NAME SIZE PARENT FMT PROT LOCK
child-xixi-001 20 GiB 2 excl
node-exporter 20 GiB 2
prometheus-server 40 GiB 2 excl
[root@ceph141 ~]#
4.6 终端2继续挂载一个exel的设备
[root@prometheus-server31 ~]# rbd map violet/child-xixi-001
/dev/rbd1
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# rbd showmapped
id pool namespace image snap device
0 violet prometheus-server - /dev/rbd0
1 violet child-xixi-001 - /dev/rbd1
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# df -h | grep rbd
/dev/rbd0 40G 184M 38G 1% /mnt
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# mount /dev/rbd1 /opt/
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# ll /opt/
total 16
drwxr-xr-x 2 root root 72 Apr 1 16:58 ./
drwxr-xr-x 21 root root 4096 Apr 1 11:46 ../
-rw------- 1 root root 319 Apr 1 11:52 00-installer-config.yaml
-rw-r--r-- 1 root root 6 Apr 1 16:58 hostname
-rw-r--r-- 1 root root 427 Apr 1 11:52 os-release
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# cp /etc/fstab /opt/
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# ll /opt/
total 20
drwxr-xr-x 2 root root 85 Apr 1 17:00 ./
drwxr-xr-x 21 root root 4096 Apr 1 11:46 ../
-rw------- 1 root root 319 Apr 1 11:52 00-installer-config.yaml
-rw-r--r-- 1 root root 657 Apr 1 17:00 fstab
-rw-r--r-- 1 root root 6 Apr 1 16:58 hostname
-rw-r--r-- 1 root root 427 Apr 1 11:52 os-release
[root@prometheus-server31 ~]#
4.7 再次切回终端1发现数据没有任何变化【此时数据已经开始冲突了,因此生产环境中,不要让2个主机使用同一个镜像的情况!】
[root@elk93 ~]# ll /mnt/
total 16
drwxr-xr-x 2 root root 72 Apr 1 16:58 ./
drwxr-xr-x 22 root root 4096 Mar 13 11:57 ../
-rw------- 1 root root 319 Apr 1 11:52 00-installer-config.yaml
-rw-r--r-- 1 root root 6 Apr 1 16:58 hostname
-rw-r--r-- 1 root root 427 Apr 1 11:52 os-release
[root@elk93 ~]#
rbd块设备实战案例MySQL
- 1.ceph集群创建镜像设备
[root@ceph141 ~]# rbd create -s 500G violet/mysql80
[root@ceph141 ~]#
[root@ceph141 ~]# rbd info violet/mysql80
rbd image 'mysql80':
size 500 GiB in 128000 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: d92626b31f2c
block_name_prefix: rbd_data.d92626b31f2c
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Tue Apr 1 17:07:43 2025
access_timestamp: Tue Apr 1 17:07:43 2025
modify_timestamp: Tue Apr 1 17:07:43 2025
[root@ceph141 ~]#
- 2.MySQL客户端添加挂载ceph的镜像(块)设备
[root@elk93 ~]# rbd map violet/mysql80
/dev/rbd1
[root@elk93 ~]#
[root@elk93 ~]# rbd showmapped
id pool namespace image snap device
0 oldboyedu child-xixi-001 - /dev/rbd0
1 oldboyedu mysql80 - /dev/rbd1
[root@elk93 ~]#
[root@elk93 ~]# fdisk -l /dev/rbd1
Disk /dev/rbd1: 500 GiB, 536870912000 bytes, 1048576000 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 65536 bytes / 65536 bytes
[root@elk93 ~]#
[root@elk93 ~]# mkfs.xfs /dev/rbd1
meta-data=/dev/rbd1 isize=512 agcount=16, agsize=8192000 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=1, rmapbt=0
= reflink=1 bigtime=0 inobtcount=0
data = bsize=4096 blocks=131072000, imaxpct=25
= sunit=16 swidth=16 blks
naming =version 2 bsize=4096 ascii-ci=0, ftype=1
log =internal log bsize=4096 blocks=64000, version=2
= sectsz=512 sunit=16 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
Discarding blocks...Done.
[root@elk93 ~]#
[root@elk93 ~]# install -d /violet/data/mysql80 -o mysql -g mysql
[root@elk93 ~]#
[root@elk93 ~]# ll -d /violet/data/mysql80/
drwxr-xr-x 2 mysql mysql 4096 Apr 1 17:09 /violet/data/mysql80//
[root@elk93 ~]#
[root@elk93 ~]# mount /dev/rbd1 /violet/data/mysql80/
[root@elk93 ~]#
[root@elk93 ~]# df -h | grep rbd1
/dev/rbd1 500G 3.6G 497G 1% /violet/data/mysql80
[root@elk93 ~]#
- 3.修改MySQL数据的存储目录
[root@elk93 ~]# grep ^datadir= /etc/init.d/mysql.server
datadir=/var/lib/mysql
[root@elk93 ~]#
[root@elk93 ~]#
[root@elk93 ~]# ss -ntl | grep 3306
[root@elk93 ~]#
[root@elk93 ~]# sed -ri '/^datadir=/s#/var/lib/mysql#/violet/data/mysql80#' /etc/init.d/mysql.server
[root@elk93 ~]#
[root@elk93 ~]# grep ^datadir= /etc/init.d/mysql.server
datadir=/violet/data/mysql80
[root@elk93 ~]#
[root@elk93 ~]#
- 4.初始化MySQL服务
[root@elk93 ~]# cat /etc/my.cnf
[mysqld]
basedir=/usr/local/mysql844
#datadir=/var/lib/mysql
datadir=/violet/data/mysql80
socket=/tmp/mysql80.sock
port=3306
mysql_native_password=on
[client]
socket=/tmp/mysql80.sock
[root@elk93 ~]#
[root@elk93 ~]# mysqld --initialize-insecure --user=mysql --datadir=/violet/data/mysql80 --basedir=/usr/local/mysql844
2025-04-01T09:16:28.520902Z 0 [System] [MY-015017] [Server] MySQL Server Initialization - start.
2025-04-01T09:16:28.521958Z 0 [System] [MY-013169] [Server] /usr/local/mysql-8.4.4-linux-glibc2.28-x86_64/bin/mysqld (mysqld 8.4.4) initializing of server in progress as process 14977
2025-04-01T09:16:28.638158Z 1 [System] [MY-013576] [InnoDB] InnoDB initialization has started.
2025-04-01T09:16:30.492556Z 1 [System] [MY-013577] [InnoDB] InnoDB initialization has ended.
2025-04-01T09:16:33.803855Z 6 [Warning] [MY-010453] [Server] root@localhost is created with an empty password ! Please consider switching off the --initialize-insecure option.
2025-04-01T09:16:39.039116Z 0 [System] [MY-015018] [Server] MySQL Server Initialization - end.
[root@elk93 ~]#
[root@elk93 ~]# ll /violet/data/mysql80/
total 91208
drwxr-xr-x 7 mysql mysql 4096 Apr 1 17:16 ./
drwxr-xr-x 6 root root 4096 Apr 1 17:09 ../
-rw-r----- 1 mysql mysql 56 Apr 1 17:16 auto.cnf
-rw------- 1 mysql mysql 1705 Apr 1 17:16 ca-key.pem
-rw-r--r-- 1 mysql mysql 1108 Apr 1 17:16 ca.pem
-rw-r--r-- 1 mysql mysql 1108 Apr 1 17:16 client-cert.pem
-rw------- 1 mysql mysql 1705 Apr 1 17:16 client-key.pem
-rw-r----- 1 mysql mysql 6291456 Apr 1 17:16 '#ib_16384_0.dblwr'
-rw-r----- 1 mysql mysql 14680064 Apr 1 17:16 '#ib_16384_1.dblwr'
-rw-r----- 1 mysql mysql 5739 Apr 1 17:16 ib_buffer_pool
-rw-r----- 1 mysql mysql 12582912 Apr 1 17:16 ibdata1
drwxr-x--- 2 mysql mysql 4096 Apr 1 17:16 '#innodb_redo'/
drwxr-x--- 2 mysql mysql 6 Apr 1 17:16 '#innodb_temp'/
drwxr-x--- 2 mysql mysql 143 Apr 1 17:16 mysql/
-rw-r----- 1 mysql mysql 26214400 Apr 1 17:16 mysql.ibd
-rw-r----- 1 mysql mysql 124 Apr 1 17:16 mysql_upgrade_history
drwxr-x--- 2 mysql mysql 8192 Apr 1 17:16 performance_schema/
-rw------- 1 mysql mysql 1705 Apr 1 17:16 private_key.pem
-rw-r--r-- 1 mysql mysql 452 Apr 1 17:16 public_key.pem
-rw-r--r-- 1 mysql mysql 1108 Apr 1 17:16 server-cert.pem
-rw------- 1 mysql mysql 1709 Apr 1 17:16 server-key.pem
drwxr-x--- 2 mysql mysql 28 Apr 1 17:16 sys/
-rw-r----- 1 mysql mysql 16777216 Apr 1 17:16 undo_001
-rw-r----- 1 mysql mysql 16777216 Apr 1 17:16 undo_002
[root@elk93 ~]#
- 5.启动MySQL服务
[root@elk93 ~]# /etc/init.d/mysql.server start
Starting mysql.server (via systemctl): mysql.server.service.
[root@elk93 ~]#
[root@elk93 ~]# ss -ntl | grep 3306
LISTEN 0 70 *:33060 *:*
LISTEN 0 151 *:3306 *:*
[root@elk93 ~]#
- 6.验证测试
[root@elk93 ~]# mysql
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 9
Server version: 8.4.4 MySQL Community Server - GPL
Copyright (c) 2000, 2025, Oracle and/or its affiliates.
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
mysql> CREATE DATABASE db01;
Query OK, 1 row affected (0.02 sec)
mysql> CREATE DATABASE db02;
Query OK, 1 row affected (0.01 sec)
mysql> CREATE DATABASE db03;
Query OK, 1 row affected (0.01 sec)
mysql> QUIT
Bye
[root@elk93 ~]#
[root@elk93 ~]# ll /violet/data/mysql80/
total 103512
drwxr-xr-x 10 mysql mysql 4096 Apr 1 17:19 ./
drwxr-xr-x 6 root root 4096 Apr 1 17:09 ../
-rw-r----- 1 mysql mysql 56 Apr 1 17:16 auto.cnf
-rw-r----- 1 mysql mysql 181 Apr 1 17:17 binlog.000001
-rw-r----- 1 mysql mysql 713 Apr 1 17:19 binlog.000002
-rw-r----- 1 mysql mysql 32 Apr 1 17:18 binlog.index
-rw------- 1 mysql mysql 1705 Apr 1 17:16 ca-key.pem
-rw-r--r-- 1 mysql mysql 1108 Apr 1 17:16 ca.pem
-rw-r--r-- 1 mysql mysql 1108 Apr 1 17:16 client-cert.pem
-rw------- 1 mysql mysql 1705 Apr 1 17:16 client-key.pem
drwxr-x--- 2 mysql mysql 6 Apr 1 17:19 db01/
drwxr-x--- 2 mysql mysql 6 Apr 1 17:19 db02/
drwxr-x--- 2 mysql mysql 6 Apr 1 17:19 db03/
-rw-r----- 1 mysql mysql 3001 Apr 1 17:18 elk93.err
-rw-r----- 1 mysql mysql 6 Apr 1 17:18 elk93.pid
-rw-r----- 1 mysql mysql 6291456 Apr 1 17:19 '#ib_16384_0.dblwr'
-rw-r----- 1 mysql mysql 14680064 Apr 1 17:16 '#ib_16384_1.dblwr'
-rw-r----- 1 mysql mysql 3461 Apr 1 17:17 ib_buffer_pool
-rw-r----- 1 mysql mysql 12582912 Apr 1 17:19 ibdata1
-rw-r----- 1 mysql mysql 12582912 Apr 1 17:18 ibtmp1
drwxr-x--- 2 mysql mysql 4096 Apr 1 17:18 '#innodb_redo'/
drwxr-x--- 2 mysql mysql 187 Apr 1 17:18 '#innodb_temp'/
drwxr-x--- 2 mysql mysql 143 Apr 1 17:16 mysql/
-rw-r----- 1 mysql mysql 26214400 Apr 1 17:19 mysql.ibd
-rw-r----- 1 mysql mysql 124 Apr 1 17:16 mysql_upgrade_history
drwxr-x--- 2 mysql mysql 8192 Apr 1 17:16 performance_schema/
-rw------- 1 mysql mysql 1705 Apr 1 17:16 private_key.pem
-rw-r--r-- 1 mysql mysql 452 Apr 1 17:16 public_key.pem
-rw-r--r-- 1 mysql mysql 1108 Apr 1 17:16 server-cert.pem
-rw------- 1 mysql mysql 1709 Apr 1 17:16 server-key.pem
drwxr-x--- 2 mysql mysql 28 Apr 1 17:16 sys/
-rw-r----- 1 mysql mysql 16777216 Apr 1 17:19 undo_001
-rw-r----- 1 mysql mysql 16777216 Apr 1 17:19 undo_002
[root@elk93 ~]#
rbd块设备实战案例harbor
- 1.创建块设备文件
[root@ceph141 ~]# ceph -s
cluster:
id: 11e66474-0e02-11f0-82d6-4dcae3d59070
health: HEALTH_OK
services:
mon: 3 daemons, quorum ceph141,ceph142,ceph143 (age 23h)
mgr: ceph141.mbakds(active, since 23h), standbys: ceph142.qgifwo
osd: 9 osds: 9 up (since 23h), 9 in (since 23h)
data:
pools: 2 pools, 9 pgs
objects: 214 objects, 475 MiB
usage: 2.1 GiB used, 5.3 TiB / 5.3 TiB avail
pgs: 9 active+clean
[root@ceph141 ~]#
[root@ceph141 ~]# rbd create -s 1T violet/harbor
[root@ceph141 ~]#
[root@ceph141 ~]# rbd info violet/harbor
rbd image 'harbor':
size 1 TiB in 262144 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: d93e3b45ac4
block_name_prefix: rbd_data.d93e3b45ac4
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Tue Apr 1 17:22:56 2025
access_timestamp: Tue Apr 1 17:22:56 2025
modify_timestamp: Tue Apr 1 17:22:56 2025
[root@ceph141 ~]#
- 2.harbor服务器挂载ceph的设备
[root@elk93 harbor]# rbd map oldboyedu/harbor
/dev/rbd2
[root@elk93 harbor]#
[root@elk93 harbor]# rbd showmapped
id pool namespace image snap device
0 violet child-xixi-001 - /dev/rbd0
1 violet mysql80 - /dev/rbd1
2 violet harbor - /dev/rbd2
[root@elk93 harbor]#
[root@elk93 harbor]# mkfs.xfs /dev/rbd2
meta-data=/dev/rbd2 isize=512 agcount=32, agsize=8388608 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=1, rmapbt=0
= reflink=1 bigtime=0 inobtcount=0
data = bsize=4096 blocks=268435456, imaxpct=5
= sunit=16 swidth=16 blks
naming =version 2 bsize=4096 ascii-ci=0, ftype=1
log =internal log bsize=4096 blocks=131072, version=2
= sectsz=512 sunit=16 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
Discarding blocks...Done.
[root@elk93 harbor]#
[root@elk93 harbor]# install -d /violet/data/harbor-2025
[root@elk93 harbor]#
[root@elk93 harbor]# mount /dev/rbd2 /violet/data/harbor-2025
[root@elk93 harbor]#
[root@elk93 harbor]# df -h | grep harbor
/dev/rbd2 1.0T 7.2G 1017G 1% /violet/data/harbor-2025
[root@elk93 harbor]#
- 3.修改harbor的配置文件
[root@elk93 ~]# cd /usr/local/harbor/
[root@elk93 harbor]#
[root@elk93 harbor]# grep ^data_volume harbor.yml
data_volume: /violet/data/harbor
[root@elk93 harbor]#
[root@elk93 harbor]#
[root@elk93 harbor]# sed -ri '/^data_volume/s#(/violet/data/harbor)#\1-2025#' harbor.yml
[root@elk93 harbor]#
[root@elk93 harbor]# grep ^data_volume harbor.yml
data_volume: /violet/data/harbor-2025
[root@elk93 harbor]#
[root@elk93 harbor]# ./install.sh
[root@elk93 harbor]#
[root@elk93 harbor]# ll /violet/data/harbor-2025/
total 4
drwxr-xr-x 8 root root 100 Apr 1 17:26 ./
drwxr-xr-x 7 root root 4096 Apr 1 17:25 ../
drwxr-xr-x 2 10000 10000 6 Apr 1 17:26 ca_download/
drwx------ 3 lxd logstash 18 Apr 1 17:26 database/
drwxr-xr-x 2 10000 10000 6 Apr 1 17:26 job_logs/
drwxr-xr-x 2 lxd logstash 6 Apr 1 17:26 redis/
drwxr-xr-x 2 10000 10000 6 Apr 1 17:26 registry/
drwxr-xr-x 6 root root 58 Apr 1 17:26 secret/
[root@elk93 harbor]#
- 4.验证harbor是否可用
略
- 5.验证ceph集群的使用空间大小
[root@ceph141 ~]# ceph -s
cluster:
id: 11e66474-0e02-11f0-82d6-4dcae3d59070
health: HEALTH_OK
services:
mon: 3 daemons, quorum ceph141,ceph142,ceph143 (age 23h)
mgr: ceph141.mbakds(active, since 23h), standbys: ceph142.qgifwo
osd: 9 osds: 9 up (since 23h), 9 in (since 23h)
data:
pools: 2 pools, 9 pgs
objects: 265 objects, 553 MiB
usage: 2.5 GiB used, 5.3 TiB / 5.3 TiB avail
pgs: 9 active+clean
[root@ceph141 ~]#
rdb块设备实战案例之Prometheus
- 1.服务端创建块设备
[root@ceph141 ~]# rbd create -s 500M violet/prometheus
[root@ceph141 ~]#
[root@ceph141 ~]# rbd info violet/prometheus
rbd image 'prometheus':
size 500 MiB in 125 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: d9626db6f982
block_name_prefix: rbd_data.d9626db6f982
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Tue Apr 1 17:34:09 2025
access_timestamp: Tue Apr 1 17:34:09 2025
modify_timestamp: Tue Apr 1 17:34:09 2025
[root@ceph141 ~]#
- 2.客户端开机挂载测试(略,此处为了省事,我就手动挂载了)
[root@prometheus-server31 prometheus-2.53.4.linux-amd64]# rbd map violet/prometheus
/dev/rbd2
[root@prometheus-server31 prometheus-2.53.4.linux-amd64]#
[root@prometheus-server31 prometheus-2.53.4.linux-amd64]# rbd showmapped
id pool namespace image snap device
0 violet prometheus-server - /dev/rbd0
1 violet child-xixi-001 - /dev/rbd1
2 violet prometheus - /dev/rbd2
[root@prometheus-server31 prometheus-2.53.4.linux-amd64]#
[root@prometheus-server31 prometheus-2.53.4.linux-amd64]# mkfs.xfs /dev/rbd2
meta-data=/dev/rbd2 isize=512 agcount=8, agsize=16000 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=1, rmapbt=0
= reflink=1 bigtime=0 inobtcount=0
data = bsize=4096 blocks=128000, imaxpct=25
= sunit=16 swidth=16 blks
naming =version 2 bsize=4096 ascii-ci=0, ftype=1
log =internal log bsize=4096 blocks=1872, version=2
= sectsz=512 sunit=16 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
Discarding blocks...Done.
[root@prometheus-server31 prometheus-2.53.4.linux-amd64]#
[root@prometheus-server31 prometheus-2.53.4.linux-amd64]# mkdir /violet/data/prometheus-2025
[root@prometheus-server31 prometheus-2.53.4.linux-amd64]#
[root@prometheus-server31 prometheus-2.53.4.linux-amd64]# mount /dev/rbd2 /violet/data/prometheus-2025
[root@prometheus-server31 prometheus-2.53.4.linux-amd64]#
[root@prometheus-server31 prometheus-2.53.4.linux-amd64]# df -h | grep prom
/dev/rbd2 493M 29M 464M 6% /violet/data/prometheus-2025
[root@prometheus-server31 prometheus-2.53.4.linux-amd64]#
- 3.Prometheus指定数据目录
[root@prometheus-server31 ~]# cd /violet/softwares/prometheus-2.53.4.linux-amd64/
[root@prometheus-server31 prometheus-2.53.4.linux-amd64]#
[root@prometheus-server31 prometheus-2.53.4.linux-amd64]# ll /violet/data/prometheus-2025/
total 4
drwxr-xr-x 2 root root 6 Apr 1 17:35 ./
drwxr-xr-x 4 root root 4096 Apr 1 17:36 ../
[root@prometheus-server31 prometheus-2.53.4.linux-amd64]#
[root@prometheus-server31 prometheus-2.53.4.linux-amd64]#
[root@prometheus-server31 prometheus-2.53.4.linux-amd64]# ./prometheus --storage.tsdb.path="/violet/data/prometheus-2025/" --web.listen-address="0.0.0.0:19090"
- 4.访问测试
http://10.0.0.31:19090/
- 5.验证数据是否写入成功
[root@prometheus-server31 ~]# ll /violet/data/prometheus-2025/
total 24
drwxr-xr-x 4 root root 58 Apr 1 17:39 ./
drwxr-xr-x 4 root root 4096 Apr 1 17:36 ../
drwxr-xr-x 2 root root 6 Apr 1 17:38 chunks_head/
-rw-r--r-- 1 root root 20001 Apr 1 17:39 queries.active
drwxr-xr-x 2 root root 22 Apr 1 17:38 wal/
[root@prometheus-server31 ~]#
rbd存储池的资源配额实战
官网连接:
https://docs.ceph.com/en/latest/rados/operations/pools/#setting-pool-quotas
- 1.存储池资源配额概述
ceph集群官方支持基于对象存储数量和数据存储的大小两种方式限制存储资源配额。
- 2.创建存储池
[root@ceph141 ~]# ceph osd pool create linux 32 32 --size 3 --autoscale_mode off
pool 'linux' created
[root@ceph141 ~]#
[root@ceph141 ~]# ceph osd pool ls detail
pool 1 '.mgr' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 1 pgp_num 1 autoscale_mode on last_change 21 flags hashpspool stripe_width 0 pg_num_max 32 pg_num_min 1 application mgr read_balance_score 9.09
pool 8 'violet' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 autoscale_mode off last_change 358 flags hashpspool,selfmanaged_snaps stripe_width 0 application rbd read_balance_score 3.38
pool 9 'linux' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode off last_change 362 flags hashpspool stripe_width 0 read_balance_score 2.25
[root@ceph141 ~]#
- 3.查看存储池的资源限制信息
[root@ceph141 ~]# ceph osd pool get-quota linux
quotas for pool 'linux':
max objects: N/A
max bytes : N/A
[root@ceph141 ~]#
- 4.限制存储池最大上限有1500个对象
[root@ceph141 ~]# echo 10*1024*1024| bc
10485760
[root@ceph141 ~]#
[root@ceph141 ~]# ceph osd pool set-quota linux max_bytes 10485760
set-quota max_bytes = 10485760 for pool linux
[root@ceph141 ~]#
[root@ceph141 ~]# ceph osd pool get-quota linux
quotas for pool 'linux':
max objects: 30k objects (current num objects: 0 objects)
max bytes : 10 MiB (current num bytes: 0 bytes)
[root@ceph141 ~]#
- 5.限制存储池最大存储10M大小
[root@ceph141 ~]# echo 10*1024*1024| bc
10485760
[root@ceph141 ~]#
[root@ceph141 ~]# ceph osd pool set-quota linux max_bytes 10485760
set-quota max_bytes = 10485760 for pool linux
[root@ceph141 ~]#
[root@ceph141 ~]# ceph osd pool get-quota linux
quotas for pool 'linux':
max objects: 30k objects (current num objects: 0 objects)
max bytes : 10 MiB (current num bytes: 0 bytes)
[root@ceph141 ~]#
- 6.验证数据存储的上限
[root@ceph141 ~]# rados put file01 ./install-docker.sh -p linux
[root@ceph141 ~]#
[root@ceph141 ~]# ceph osd pool get-quota linux
quotas for pool 'linux96':
max objects: 30k objects (current num objects: 1 objects)
max bytes : 10 MiB (current num bytes: 3513 bytes) # 大小不足10MB
[root@ceph141 ~]#
[root@ceph141 ~]# rados put file02 ./violet-node-exporter-v1.7.0.tar.gz -p linux
[root@ceph141 ~]#
[root@ceph141 ~]# ceph osd pool get-quota linux
quotas for pool 'linux':
max objects: 30k objects (current num objects: 2 objects)
max bytes : 10 MiB (current num bytes: 23871929 bytes) # 由于在上传文件前,其大小还不足10M,因此判断此文件可以上传!
[root@ceph141 ~]#
[root@ceph141 ~]# rados put file03 ./violet-node-exporter-v1.7.0.tar.gz -p linux # 你会发现无法上传成功啦!因为已经超过10MB啦~(目前将近22MB)
- 7.清除资源限制
[root@ceph141 ~]# ceph osd pool set-quota linux max_objects 0
set-quota max_objects = 0 for pool linux96
[root@ceph141 ~]#
[root@ceph141 ~]# ceph osd pool set-quota linux max_bytes 0
set-quota max_bytes = 0 for pool linux96
[root@ceph141 ~]#
[root@ceph141 ~]# ceph osd pool get-quota linux
quotas for pool 'linux':
max objects: N/A
max bytes : N/A
[root@ceph141 ~]#
块设备打多个快照恢复指定版本
- 1.创建存储池
[root@ceph141 ~]# ceph osd pool create laonanhai
pool 'laonanhai' created
[root@ceph141 ~]#
- 2.创建块设备
[root@ceph141 ~]# rbd create -s 1M ubuntu-2404-lts -p laonanhai
[root@ceph141 ~]#
[root@ceph141 ~]# rbd info laonanhai/ubuntu-2404-lts
rbd image 'ubuntu-2404-lts':
size 1 MiB in 1 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: d7dd78861a29
block_name_prefix: rbd_data.d7dd78861a29
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Wed Apr 2 09:15:41 2025
access_timestamp: Wed Apr 2 09:15:41 2025
modify_timestamp: Wed Apr 2 09:15:41 2025
[root@ceph141 ~]#
- 3.客户端挂载
[root@prometheus-server31 ~]# rbd map laonanhai/ubuntu-2404-lts
/dev/rbd3
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# rbd showmapped
id pool namespace image snap device
0 violet prometheus-server - /dev/rbd0
1 violet child-xixi-001 - /dev/rbd1
2 violet prometheus - /dev/rbd2
3 laonanhai ubuntu-2404-lts - /dev/rbd3
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# fdisk -l /dev/rbd3
Disk /dev/rbd3: 1 MiB, 1048576 bytes, 2048 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 65536 bytes / 65536 bytes
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# mkfs.ext4 /dev/rbd3
mke2fs 1.46.5 (30-Dec-2021)
Filesystem too small for a journal
Discarding device blocks: done
Creating filesystem with 256 4k blocks and 128 inodes
Allocating group tables: done
Writing inode tables: done
Writing superblocks and filesystem accounting information: done
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# mkdir /violet/ubuntu
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# mount /dev/rbd3 /violet/ubuntu
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# df -h | grep rbd3
/dev/rbd3 976K 24K 884K 3% /violet/ubuntu
[root@prometheus-server31 ~]#
- 4.第一次写入测试数据
[root@prometheus-server31 ~]# cp /etc/hosts /etc/fstab /violet/ubuntu/
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# ll /violet/ubuntu/
total 32
drwxr-xr-x 3 root root 4096 Apr 2 09:24 ./
drwxr-xr-x 6 root root 4096 Apr 2 09:22 ../
-rw-r--r-- 1 root root 657 Apr 2 09:24 fstab
-rw-r--r-- 1 root root 226 Apr 2 09:24 hosts
drwx------ 2 root root 16384 Apr 2 09:21 lost+found/
[root@prometheus-server31 ~]#
- 5.服务端拍快照
[root@ceph141 ~]# rbd snap create laonanhai/ubuntu-2404-lts@2025-04-02-first
Creating snap: 100% complete...done.
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap ls laonanhai/ubuntu-2404-lts
SNAPID NAME SIZE PROTECTED TIMESTAMP
3 2025-04-02-first 1 MiB Wed Apr 2 09:25:55 2025
[root@ceph141 ~]#
- 6.第二次写入测试数据
[root@prometheus-server31 ~]# cp /etc/os-release /etc/hostname /violet/ubuntu/
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# ll /violet/ubuntu/
total 40
drwxr-xr-x 3 root root 4096 Apr 2 09:26 ./
drwxr-xr-x 6 root root 4096 Apr 2 09:22 ../
-rw-r--r-- 1 root root 657 Apr 2 09:24 fstab
-rw-r--r-- 1 root root 20 Apr 2 09:26 hostname
-rw-r--r-- 1 root root 226 Apr 2 09:24 hosts
drwx------ 2 root root 16384 Apr 2 09:21 lost+found/
-rw-r--r-- 1 root root 427 Apr 2 09:26 os-release
[root@prometheus-server31 ~]#
- 7.服务端再拍快照
[root@ceph141 ~]# rbd snap create laonanhai/ubuntu-2404-lts@2025-04-02-second
Creating snap: 100% complete...done.
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap ls laonanhai/ubuntu-2404-lts
SNAPID NAME SIZE PROTECTED TIMESTAMP
3 2025-04-02-first 1 MiB Wed Apr 2 09:25:55 2025
4 2025-04-02-second 1 MiB Wed Apr 2 09:27:14 2025
[root@ceph141 ~]#
- 8.第三次写入测试数据
[root@prometheus-server31 ~]# cp /etc/netplan/00-installer-config.yaml /violet/ubuntu/
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# cp /etc/passwd /violet/ubuntu/
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# ll /violet/ubuntu/
total 48
drwxr-xr-x 3 root root 4096 Apr 2 09:27 ./
drwxr-xr-x 6 root root 4096 Apr 2 09:22 ../
-rw------- 1 root root 319 Apr 2 09:27 00-installer-config.yaml
-rw-r--r-- 1 root root 657 Apr 2 09:24 fstab
-rw-r--r-- 1 root root 20 Apr 2 09:26 hostname
-rw-r--r-- 1 root root 226 Apr 2 09:24 hosts
drwx------ 2 root root 16384 Apr 2 09:21 lost+found/
-rw-r--r-- 1 root root 427 Apr 2 09:26 os-release
-rw-r--r-- 1 root root 2620 Apr 2 09:27 passwd
[root@prometheus-server31 ~]#
- 9.服务端再拍快照
[root@ceph141 ~]# rbd snap create laonanhai/ubuntu-2404-lts@2025-04-02-third
Creating snap: 100% complete...done.
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap ls laonanhai/ubuntu-2404-lts
SNAPID NAME SIZE PROTECTED TIMESTAMP
3 2025-04-02-first 1 MiB Wed Apr 2 09:25:55 2025
4 2025-04-02-second 1 MiB Wed Apr 2 09:27:14 2025
5 2025-04-02-third 1 MiB Wed Apr 2 09:28:29 2025
[root@ceph141 ~]#
- 10.第四次写入测试数据
[root@prometheus-server31 ~]# rm -rf /violet/ubuntu/*
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# ll /violet/ubuntu/
total 8
drwxr-xr-x 2 root root 4096 Apr 2 09:29 ./
drwxr-xr-x 6 root root 4096 Apr 2 09:22 ../
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# cp /etc/rc.local /violet/ubuntu/
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# cp /etc/shadow /violet/ubuntu/
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# ll /violet/ubuntu/
total 16
drwxr-xr-x 2 root root 4096 Apr 2 09:29 ./
drwxr-xr-x 6 root root 4096 Apr 2 09:22 ../
-rwxr-xr-x 1 root root 124 Apr 2 09:29 rc.local*
-rw-r----- 1 root root 1473 Apr 2 09:29 shadow
[root@prometheus-server31 ~]#
- 11.服务端再拍快照
[root@ceph141 ~]# rbd snap create laonanhai/ubuntu-2404-lts@2025-04-02-fourth
Creating snap: 100% complete...done.
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap ls laonanhai/ubuntu-2404-lts
SNAPID NAME SIZE PROTECTED TIMESTAMP
3 2025-04-02-first 1 MiB Wed Apr 2 09:25:55 2025
4 2025-04-02-second 1 MiB Wed Apr 2 09:27:14 2025
5 2025-04-02-third 1 MiB Wed Apr 2 09:28:29 2025
6 2025-04-02-fourth 1 MiB Wed Apr 2 09:30:30 2025
[root@ceph141 ~]#
[root@ceph141 ~]#
- 12.客户端移除映射
[root@prometheus-server31 ~]# umount /violet/ubuntu/
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# ll /violet/ubuntu/
total 8
drwxr-xr-x 2 root root 4096 Apr 2 09:22 ./
drwxr-xr-x 6 root root 4096 Apr 2 09:22 ../
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# df -h | grep rbd3
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# rbd showmapped
id pool namespace image snap device
0 violet prometheus-server - /dev/rbd0
1 violet child-xixi-001 - /dev/rbd1
2 violet prometheus - /dev/rbd2
3 laonanhai ubuntu-2404-lts - /dev/rbd3
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# rbd unmap /dev/rbd3
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# rbd showmapped
id pool namespace image snap device
0 violet prometheus-server - /dev/rbd0
1 violet child-xixi-001 - /dev/rbd1
2 violet prometheus - /dev/rbd2
[root@prometheus-server31 ~]#
- 13.回滚到指定快照
[root@ceph141 ~]# rbd snap ls laonanhai/ubuntu-2404-lts
SNAPID NAME SIZE PROTECTED TIMESTAMP
3 2025-04-02-first 1 MiB Wed Apr 2 09:25:55 2025
4 2025-04-02-second 1 MiB Wed Apr 2 09:27:14 2025
5 2025-04-02-third 1 MiB Wed Apr 2 09:28:29 2025
6 2025-04-02-fourth 1 MiB Wed Apr 2 09:30:30 2025
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap rollback laonanhai/ubuntu-2404-lts@2025-04-02-second
Rolling back to snapshot: 100% complete...done.
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap ls laonanhai/ubuntu-2404-lts
SNAPID NAME SIZE PROTECTED TIMESTAMP
3 2025-04-02-first 1 MiB Wed Apr 2 09:25:55 2025
4 2025-04-02-second 1 MiB Wed Apr 2 09:27:14 2025
5 2025-04-02-third 1 MiB Wed Apr 2 09:28:29 2025
6 2025-04-02-fourth 1 MiB Wed Apr 2 09:30:30 2025
[root@ceph141 ~]#
- 14.客户端重新挂载
[root@prometheus-server31 ~]# rbd map laonanhai/ubuntu-2404-lts
/dev/rbd3
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# mount /dev/rbd3 /violet/ubuntu/
[root@prometheus-server31 ~]#
[root@prometheus-server31 ~]# ll /violet/ubuntu/
total 40
drwxr-xr-x 3 root root 4096 Apr 2 09:26 ./
drwxr-xr-x 6 root root 4096 Apr 2 09:22 ../
-rw-r--r-- 1 root root 657 Apr 2 09:24 fstab
-rw-r--r-- 1 root root 20 Apr 2 09:26 hostname
-rw-r--r-- 1 root root 226 Apr 2 09:24 hosts
drwx------ 2 root root 16384 Apr 2 09:21 lost+found/
-rw-r--r-- 1 root root 427 Apr 2 09:26 os-release
[root@prometheus-server31 ~]#
ceph块设备的回收站机制【推荐使用】
- 1.创建测试块设备
[root@ceph141 ~]# rbd ls violet
child-xixi-001
harbor
mysql80
node-exporter
prometheus
prometheus-server
ubuntu-2204
[root@ceph141 ~]#
- 2.查看指定存储池回收站列表
[root@ceph141 ~]# rbd trash move violet/child-xixi-001
[root@ceph141 ~]#
- 3.将块设备移到回收站模拟删除效果
[root@ceph141 ~]# rbd trash ls -p violet
d83350e0ece5 child-xixi-001
[root@ceph141 ~]#
[root@ceph141 ~]# rbd trash ls -p violet -l
ID NAME SOURCE DELETED_AT STATUS PARENT
d83350e0ece5 child-xixi-001 USER Wed Apr 2 09:59:48 2025 expired at Wed Apr 2 09:59:48 2025
[root@ceph141 ~]#
- 4.再次查看存储池的信息列表【发现块设备没有了】
[root@ceph141 ~]# rbd ls violet
harbor
mysql80
node-exporter
prometheus
prometheus-server
ubuntu-2204
[root@ceph141 ~]#
- 5.恢复块设备
[root@ceph141 ~]# rbd trash restore -p oldboyedu --image child-xixi-001 --image-id d83350e0ece5
[root@ceph141 ~]#
- 6.验证是否回收成功
[root@ceph141 ~]# rbd ls violet
child-xixi-001
harbor
mysql80
node-exporter
prometheus
prometheus-server
ubuntu-2204
[root@ceph141 ~]#
[root@ceph141 ~]# rbd info violet/child-xixi-001
rbd image 'child-xixi-001':
size 20 GiB in 5120 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: d83350e0ece5
block_name_prefix: rbd_data.d83350e0ece5
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Tue Apr 1 15:33:32 2025
access_timestamp: Tue Apr 1 15:33:32 2025
modify_timestamp: Tue Apr 1 15:33:32 2025
[root@ceph141 ~]#
RBD的块设备的快照数量限制
- 1 创建指定数量的快照
1.1 为添加限制前
[root@ceph141 ~]# rbd snap ls laonanhai/ubuntu-2404-lts
SNAPID NAME SIZE PROTECTED TIMESTAMP
3 2025-04-02-first 1 MiB Wed Apr 2 09:25:55 2025
4 2025-04-02-second 1 MiB Wed Apr 2 09:27:14 2025
5 2025-04-02-third 1 MiB Wed Apr 2 09:28:29 2025
6 2025-04-02-fourth 1 MiB Wed Apr 2 09:30:30 2025
[root@ceph141 ~]#
[root@ceph141 ~]# rbd info laonanhai/ubuntu-2404-lts
rbd image 'ubuntu-2404-lts':
size 1 MiB in 1 objects
order 22 (4 MiB objects)
snapshot_count: 4
id: d7dd78861a29
block_name_prefix: rbd_data.d7dd78861a29
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Wed Apr 2 09:15:41 2025
access_timestamp: Wed Apr 2 09:15:41 2025
modify_timestamp: Wed Apr 2 09:15:41 2025
[root@ceph141 ~]#
1.2 添加快照限制
[root@ceph141 ~]# rbd snap limit set laonanhai/ubuntu-2404-lts --limit 5
[root@ceph141 ~]#
[root@ceph141 ~]# rbd info laonanhai/ubuntu-2404-lts
rbd image 'ubuntu-2404-lts':
size 1 MiB in 1 objects
order 22 (4 MiB objects)
snapshot_count: 4
id: d7dd78861a29
block_name_prefix: rbd_data.d7dd78861a29
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Wed Apr 2 09:15:41 2025
access_timestamp: Wed Apr 2 09:15:41 2025
modify_timestamp: Wed Apr 2 09:15:41 2025
snapshot_limit: 5
[root@ceph141 ~]#
- 2.创建快照测试
[root@ceph141 ~]# rbd snap create laonanhai/ubuntu-2404-lts@xixi
Creating snap: 100% complete...done.
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap ls laonanhai/ubuntu-2404-lts
SNAPID NAME SIZE PROTECTED TIMESTAMP
3 2025-04-02-first 1 MiB Wed Apr 2 09:25:55 2025
4 2025-04-02-second 1 MiB Wed Apr 2 09:27:14 2025
5 2025-04-02-third 1 MiB Wed Apr 2 09:28:29 2025
6 2025-04-02-fourth 1 MiB Wed Apr 2 09:30:30 2025
7 xixi 1 MiB Wed Apr 2 10:05:43 2025
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap create laonanhai/ubuntu-2404-lts@haha
Creating snap: 10% complete...failed.
rbd: failed to create snapshot: (122) Disk quota exceeded
[root@ceph141 ~]#
[root@ceph141 ~]# rbd info laonanhai/ubuntu-2404-lts
rbd image 'ubuntu-2404-lts':
size 1 MiB in 1 objects
order 22 (4 MiB objects)
snapshot_count: 5
id: d7dd78861a29
block_name_prefix: rbd_data.d7dd78861a29
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Wed Apr 2 09:15:41 2025
access_timestamp: Wed Apr 2 09:15:41 2025
modify_timestamp: Wed Apr 2 09:15:41 2025
snapshot_limit: 5
[root@ceph141 ~]#
- 3.清楚快照数量限制
[root@ceph141 ~]# rbd snap limit clear laonanhai/ubuntu-2404-lts
[root@ceph141 ~]#
[root@ceph141 ~]# rbd info laonanhai/ubuntu-2404-lts
rbd image 'ubuntu-2404-lts':
size 1 MiB in 1 objects
order 22 (4 MiB objects)
snapshot_count: 5
id: d7dd78861a29
block_name_prefix: rbd_data.d7dd78861a29
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Wed Apr 2 09:15:41 2025
access_timestamp: Wed Apr 2 09:15:41 2025
modify_timestamp: Wed Apr 2 09:15:41 2025
[root@ceph141 ~]#
- 4.再次测试验证,发现能够创建快照
[root@ceph141 ~]# rbd snap create laonanhai/ubuntu-2404-lts@haha
Creating snap: 100% complete...done.
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap ls laonanhai/ubuntu-2404-lts
SNAPID NAME SIZE PROTECTED TIMESTAMP
3 2025-04-02-first 1 MiB Wed Apr 2 09:25:55 2025
4 2025-04-02-second 1 MiB Wed Apr 2 09:27:14 2025
5 2025-04-02-third 1 MiB Wed Apr 2 09:28:29 2025
6 2025-04-02-fourth 1 MiB Wed Apr 2 09:30:30 2025
7 xixi 1 MiB Wed Apr 2 10:05:43 2025
9 haha 1 MiB Wed Apr 2 10:08:14 2025
[root@ceph141 ~]#
- 5.删除所有快照
[root@ceph141 ~]# rbd snap rm laonanhai/ubuntu-2404-lts@xixi # 删除单个快照
Removing snap: 100% complete...done.
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap ls laonanhai/ubuntu-2404-lts
SNAPID NAME SIZE PROTECTED TIMESTAMP
3 2025-04-02-first 1 MiB Wed Apr 2 09:25:55 2025
4 2025-04-02-second 1 MiB Wed Apr 2 09:27:14 2025
5 2025-04-02-third 1 MiB Wed Apr 2 09:28:29 2025
6 2025-04-02-fourth 1 MiB Wed Apr 2 09:30:30 2025
9 haha 1 MiB Wed Apr 2 10:08:14 2025
[root@ceph141 ~]#
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap rm oldboyedu/mysql80 --snap oldboyedu-linux94-v2
Removing snap: 100% complete...done.
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap ls oldboyedu/mysql80
SNAPID NAME SIZE PROTECTED TIMESTAMP
3 oldboyedu-snap-rbd-v1-20241205 50 GiB Thu Dec 5 19:08:46 2024
4 oldboyedu-linux94-xixi 50 GiB Thu Dec 5 19:19:49 2024
5 oldboyedu-linux94-haha 50 GiB Thu Dec 5 19:21:49 2024
7 oldboyedu-linux94-hehe 50 GiB Thu Dec 5 19:35:11 2024
8 oldboyedu-linux94-v1 50 GiB Thu Dec 5 19:35:14 2024
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap purge laonanhai/ubuntu-2404-lts # 删除所有的快照
Removing all snapshots: 100% complete...done.
[root@ceph141 ~]#
[root@ceph141 ~]# rbd snap ls laonanhai/ubuntu-2404-lts
[root@ceph141 ~]#