版本

[root@clienta ~]# ceph -v
ceph version 16.2.0-117.el8cp (0e34bb74700060ebfaa22d99b7d2cdc037b28a57) pacific (stable)
[root@clienta ~]#

rbd高级特性

创建一个块

[root@clienta ~]# ceph osd pool create rbd
pool 'rbd' created
[root@clienta ~]# rbd pool init rbd
[root@clienta ~]# ceph osd pool ls detail | grep rbd
pool 6 'rbd' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 226 flags hashpspool,selfmanaged_snaps stripe_width 0 application rbd
[root@clienta ~]#
[root@clienta ~]#
[root@clienta ~]#
[root@clienta ~]# rbd create --size 1G -p rbd image1
[root@clienta ~]# rbd -p rbd ls
image1
[root@clienta ~]# rbd info image1
rbd image 'image1':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: fab9c898fcc1
block_name_prefix: rbd_data.fab9c898fcc1
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Sun Aug 14 05:10:43 2022
access_timestamp: Sun Aug 14 05:10:43 2022
modify_timestamp: Sun Aug 14 05:10:43 2022
[root@clienta ~]#

默认特性

[root@clienta ~]# ceph config ls | grep feature
enable_experimental_unrecoverable_data_corrupting_features
mon_debug_no_initial_persistent_features
rbd_default_features
[root@clienta ~]# ceph config get osd rbd_default_features
layering,exclusive-lock,object-map,fast-diff,deep-flatten
[root@clienta ~]#

layering 支持克隆
striping 支持条带化 (raid)
exclusive-lock 排他锁。是一个分布式锁,主要用于防止多个客户端同时写入image导致数据不一致问题。(保证性能一致,性能可能下降,多客户端访问)
object-map 精简配置,不立马分配空间,写数据了才分配 (require exclusive-lock)
fast-diff io加速
deep-flatten 扁平化RBD镜像的所有快照
journaling 支持日志 镜像功能依赖这个
data-pool ec数据池支持(ec纠删码池) [root@clienta ~]# rbd feature enable rbd/image1 journaling
[root@clienta ~]# rbd info image1
rbd image 'image1':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: fab9c898fcc1
block_name_prefix: rbd_data.fab9c898fcc1
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten, journaling
op_features:
开启新特性 (我的版本不支持data-pool功能)
[root@clienta ~]# rbd feature disable rbd/image1 journaling 有的报错会是不支持[0x40] 可以通过id来判断
rbd_defaults_features=24 (8 object-map + 16 fast-diff)



striping(类似raid条带)

stripe-unit=1M

stripe-count=4 数据块

并发写4M

[root@clienta ~]# rbd create --size 1G --stripe-unit=1M --stripe-count=4 image2
[root@clienta ~]# rbd info image2
rbd image 'image2':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: fad5ece2e6b6
block_name_prefix: rbd_data.fad5ece2e6b6
format: 2
features: layering, striping, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Sun Aug 14 05:38:12 2022
access_timestamp: Sun Aug 14 05:38:12 2022
modify_timestamp: Sun Aug 14 05:38:12 2022
stripe unit: 1 MiB
stripe count: 4
[root@clienta ~]#

默认一次写一个块,一个块写满4M

[root@clienta ~]# rbd map rbd/image2
/dev/rbd0

1.rbd快照

[root@clienta ~]# rbd showmapped
id pool namespace image snap device
0 rbd image2 - /dev/rbd0
[root@clienta ~]# rbd unmap rbd/image2
[root@clienta ~]# rbd rm image1
Removing image: 100% complete...done.
[root@clienta ~]# rbd rm image2
Removing image: 100% complete...done.
[root@clienta ~]# [root@clienta ~]# rbd create rbd/image1 --size 1G
[root@clienta ~]# rbd map image1
/dev/rbd0
[root@clienta ~]# mkfs.xfs /dev/rbd0
meta-data=/dev/rbd0 isize=512 agcount=8, agsize=32768 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=1, rmapbt=0
= reflink=1
data = bsize=4096 blocks=262144, imaxpct=25
= sunit=16 swidth=16 blks
naming =version 2 bsize=4096 ascii-ci=0, ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=16 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
Discarding blocks...Done.
[root@clienta ~]# mkdir /mnt/rbddev
[root@clienta ~]# mount /dev/rbd0 /mnt/rbddev/
[root@clienta ~]# [root@clienta ~]# cp /etc/passwd /etc/profile /mnt/rbddev/
[root@clienta ~]# cd /mnt/rbddev/
[root@clienta rbddev]# dd if=/dev/zero of=file1 bs=1M count=10
10+0 records in
10+0 records out
10485760 bytes (10 MB, 10 MiB) copied, 0.0169812 s, 617 MB/s
[root@clienta rbddev]# ls
file1 passwd profile
[root@clienta rbddev]# sync [root@clienta rbddev]# rbd snap create rbd/image1@snap1
Creating snap: 100% complete...done.
[root@clienta rbddev]# rbd info image1
rbd image 'image1':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 1
id: fae5f58d3b62
block_name_prefix: rbd_data.fae5f58d3b62
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Sun Aug 14 05:42:02 2022
access_timestamp: Sun Aug 14 05:42:02 2022
modify_timestamp: Sun Aug 14 05:42:02 2022 [root@clienta rbddev]# rbd snap ls image1
SNAPID NAME SIZE PROTECTED TIMESTAMP
4 snap1 1 GiB Sun Aug 14 05:48:07 2022 不可以挂载快照 (只读的不让挂)
快照和原本的块,信息一样

测试快照效果

[root@clienta rbddev]# ls
file1 passwd profile
[root@clienta rbddev]# rm -rf passwd
[root@clienta rbddev]# ls
file1 profile
[root@clienta ~]# umount /mnt/rbddev
[root@clienta ~]# rbd unmap /dev/rbd0
[root@clienta ~]# rbd snap rollback rbd/image1@snap1
Rolling back to snapshot: 100% complete...done.
[root@clienta ~]#

他需要取消映射与挂载,才可以恢复

[root@clienta ~]# rbd showmapped
id pool namespace image snap device
0 rbd image1 - /dev/rbd0
[root@clienta ~]# rbd unmap /dev/rbd0
rbd: sysfs write failed

回滚成功

[root@clienta ~]# mount /dev/rbd0 /mnt/rbddev/
[root@clienta ~]# cd /mnt/rbddev/
[root@clienta rbddev]# ls
file1 passwd profile

存储池是对象块如果丢了,对单个对象进行回滚,这个是直接回滚到快照时的镜像

快照原理

拍快照时,建议暂停访问文件系统(停业务)

快照时源镜像的只读副本,写时复制功能COW 节省空间



快照一开始不占空间,当你将原文件修改时,原文件拷贝到快照,并被修改(写前拷贝)

有的快照,他用一次就没了

但是这里。你用了他还会存在

SNAPID  NAME   SIZE   PROTECTED  TIMESTAMP
4 snap1 1 GiB Sun Aug 14 05:48:07 2022
[root@clienta rbddev]#

快照越多性能越慢

偷懒举一个例子

[root@clienta rbddev]# rbd snap rm image1@snap1
Removing snap: 100% complete...done.
[root@clienta rbddev]# rbd snap ls image1
[root@clienta rbddev]#

rbd快照 是原始镜像的只读副本

rbd克隆功能 (依赖快照)

快照数据不会变,基于快照,再去克隆

2.镜像克隆

这是可以读写的

[root@clienta rbddev]# rbd clone rbd/image1@snap1 rbd/clone1
2022-08-14T06:30:28.220-0400 7febbdb2f700 -1 librbd::image::CloneRequest: 0x55f9906c6b00 validate_parent: parent snapshot must be protected
rbd: clone error: (22) Invalid argument
[root@clienta rbddev]# rbd snap protect rbd/image1@snap1
[root@clienta rbddev]# rbd clone rbd/image1@snap1 rbd/clone1
[root@clienta rbddev]# [root@clienta rbddev]# rbd ls
clone1
image1
[root@clienta rbddev]# [root@clienta ~]# blkid
/dev/vda1: PARTUUID="fac7f1fb-3e8d-4137-a512-961de09a5549"
/dev/vda2: SEC_TYPE="msdos" UUID="7B77-95E7" BLOCK_SIZE="512" TYPE="vfat" PARTUUID="68b2905b-df3e-4fb3-80fa-49d1e773aa33"
/dev/vda3: LABEL="root" UUID="d47ead13-ec24-428e-9175-46aefa764b26" BLOCK_SIZE="512" TYPE="xfs" PARTUUID="6264d520-3fb9-423f-8ab8-7a0a8e3d3562"
/dev/rbd1: UUID="4d62ee62-03dc-4acc-b925-eeea77238c1b" BLOCK_SIZE="512" TYPE="xfs"
/dev/rbd0: UUID="4d62ee62-03dc-4acc-b925-eeea77238c1b" BLOCK_SIZE="512" TYPE="xfs"
[root@clienta ~]#
克隆和镜像一模一样
克隆可以继续创建文件,读写副本 (rbd快照用作基础)(无操作时,依然是cow映射,还是看到的是原始镜像) 克隆可以独立于原始镜像
快照为cow 克隆支持cor 写前复制,读前复制

启用cor,不然你每次读的都是源镜像,源镜像压力增大,不如读前复制到克隆里

查看子镜像。

并且使子镜像(克隆)独立

[root@clienta ~]# rbd children image1@snap1
rbd/clone1
[root@clienta ~]# rbd flatten rbd/clone1
Image flatten: 100% complete...done.
[root@clienta ~]# rbd children image1@snap1
[root@clienta ~]#

3.rbd缓存

服务端直接写到硬盘,他的数据是没有经过缓存的数据聚合

但是用客户端的缓存 先写客户端内存,然后刷到集群那一端 缓存功能默认开启

第一个和第六个都得开,才是开始回写

可以查

[root@clienta ~]# ceph config ls | grep rbd_cache
rbd_cache
rbd_cache_policy
rbd_cache_writethrough_until_flush
rbd_cache_size
rbd_cache_max_dirty
rbd_cache_target_dirty
rbd_cache_max_dirty_age
rbd_cache_max_dirty_object
rbd_cache_block_writes_upfront

4.镜像的导入导出

像镜像的备份

第一个ceph集群

[root@clienta ~]# rbd ls
clone1
image1
[root@clienta ~]# rbd snap ls image1
SNAPID NAME SIZE PROTECTED TIMESTAMP
6 snap1 1 GiB yes Sun Aug 14 06:29:50 2022
[root@clienta ~]#
[root@clienta ~]# rbd export rbd/image1 image1-v1
Exporting image: 100% complete...done.
[root@clienta ~]# ls
- image1-v1
[root@clienta ~]# rsync image1-v1 root@serverf:~
Warning: Permanently added 'serverf,172.25.250.15' (ECDSA) to the list of known hosts.
[root@clienta ~]#

第二个ceph集群

[root@serverf ~]# ceph osd pool create f-rbd
pool 'f-rbd' created
[root@serverf ~]# rbd pool init f-rbd
[root@serverf ~]#
[root@serverf ~]# rbd import image1-v1 f-rbd/image1
Importing image: 100% complete...done.
[root@serverf ~]# rbd info f-rbd/image1
rbd image 'image1':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: ac64adf717b3
block_name_prefix: rbd_data.ac64adf717b3
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Sun Aug 14 07:42:46 2022
access_timestamp: Sun Aug 14 07:42:46 2022
modify_timestamp: Sun Aug 14 07:42:46 2022 [root@serverf ~]# rbd map f-rbd/image1
/dev/rbd0
[root@serverf ~]# mount /dev/rbd0 /mnt/
[root@serverf ~]# cd /mnt/
[root@serverf mnt]# ls
file1 passwd profile
[root@serverf mnt]#

基于时间点备份成功,那我继续在clienta修改镜像呢

[root@clienta ~]# rbd map image1
/dev/rbd0
[root@clienta ~]# mount /dev/rbd0 /mnt/rbddev/
[root@clienta ~]#
[root@clienta rbddev]# cp /etc/group .
[root@clienta rbddev]# ls
file1 group passwd profile

可能是错误示范,得从快照导出,标记出时间点

再来

[root@clienta ~]# rbd snap  ls image1
SNAPID NAME SIZE PROTECTED TIMESTAMP
6 snap1 1 GiB yes Sun Aug 14 06:29:50 2022
[root@clienta ~]# rbd export rbd/image1@snap1 image-snap1
Exporting image: 100% complete...done.
[root@clienta ~]#
[root@clienta ~]# rsync image-snap1 root@serverf:~
[root@clienta ~]# [root@serverf ~]# rbd import image-snap1 f-rbd/image1
Importing image: 100% complete...done.
[root@serverf ~]# rbd map f-rbd/image1
/dev/rbd0
[root@serverf ~]# mount /dev/rbd0 /mnt
cd[root@serverf ~]# cd /mnt
[root@serverf mnt]# ls
file1 passwd profile
[root@clienta ~]# rbd export-diff --from-snap snap1 rbd/image1 image1-v1-v2
[root@clienta ~]# rsync image1-v1-v2 root@serverf:~ 客户端(在之前的全量备份的基础上,再打一个快照)
[root@serverf ~]# rbd snap create f-rbd/image1@snap1
Creating snap: 100% complete...done.
[root@serverf ~]# rbd import-diff image1-v1-v2 f-rbd/image1
Importing image diff: 100% complete...done.
[root@serverf ~]# mount /dev/rbd0 /mnt
[root@serverf ~]# cd /mnt
[root@serverf mnt]# ls
file1 group passwd profile

理一遍思路(增量备份)

[root@clienta ~]# rbd create test  --size 1G --pool rbd
[root@clienta ~]# rbd ls
clone1
image1
test
[root@clienta ~]# rbd map test
/dev/rbd0
[root@clienta ~]# mkfs.xfs /dev/rbd0
meta-data=/dev/rbd0 isize=512 agcount=8, agsize=32768 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=1, sparse=1, rmapbt=0
= reflink=1
data = bsize=4096 blocks=262144, imaxpct=25
= sunit=16 swidth=16 blks
naming =version 2 bsize=4096 ascii-ci=0, ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=16 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
Discarding blocks...Done.
[root@clienta ~]# mount /dev/rbd0 /mnt/rbddev/
[root@clienta ~]# cd /mnt/rbddev/
[root@clienta rbddev]# ls
[root@clienta rbddev]# touch file{1..10}
[root@clienta rbddev]# ls
file1 file10 file2 file3 file4 file5 file6 file7 file8 file9
[root@clienta rbddev]#
[root@clienta ~]# umount /mnt/rbddev
[root@clienta ~]# rbd unmap rbd/test
[root@clienta ~]# rbd showmapped
[root@clienta ~]# rbd export rbd/test test-v1
Exporting image: 100% complete...done.
[root@clienta ~]#
[root@clienta ~]# ls
- test-v1
[root@clienta ~]# rsync test-v1 root@serverf:~ [root@serverf ~]# ls
- ceph test-v1
[root@serverf ~]# rbd import test-v1 f-rbd/test1
Importing image: 100% complete...done.
[root@serverf ~]# rbd map f-rbd/test1
/dev/rbd0
[root@serverf ~]# mount /dev/rbd0 /mnt/
[root@serverf ~]# cd /mnt/
[root@serverf mnt]# ls
file1 file10 file2 file3 file4 file5 file6 file7 file8 file9
[root@serverf mnt]#
卸载
[root@serverf mnt]# cd
[root@serverf ~]#
[root@serverf ~]# umount /mnt
[root@serverf ~]# rbd unmap f-rbd/test1
[root@serverf ~]#

第一次全量备份完成

两边同时打上快照

[root@clienta ~]# rbd snap  create test@testsnap1
Creating snap: 100% complete...done.
[root@serverf ~]# rbd snap create f-rbd/test1@testsnap1 #上下两个快照名字得一样
Creating snap: 100% complete...done.

倘若名字不一样,在最后一步会导致报错

[root@serverf ~]# rbd import-diff snap1-snap2 f-rbd/test1
start snapshot 'testsnap1' does not exist in the image, aborting
Importing image diff: 0% complete...failed.

在主节点增量

[root@clienta ~]# rbd map rbd/test
/dev/rbd0
[root@clienta ~]# mount /dev/rbd0 /mnt/rbddev/
[root@clienta ~]# cd /mnt/rbddev/
[root@clienta rbddev]# ls
file1 file10 file2 file3 file4 file5 file6 file7 file8 file9
[root@clienta rbddev]# touch hello
[root@clienta rbddev]# touch mqy
[root@clienta rbddev]# cp /etc/passwd .
[root@clienta rbddev]# ls
file1 file10 file2 file3 file4 file5 file6 file7 file8 file9 hello mqy passwd
[root@clienta rbddev]#
[root@clienta ~]# umount /mnt/rbddev
[root@clienta ~]# rbd unmap rbd/test
[root@clienta ~]#

创建第二次快照,并导出差异数据,传输

[root@clienta ~]# rbd snap create rbd/test@testsnap2
Creating snap: 100% complete...done.
[root@clienta ~]# rbd export-diff --from-snap testsnap1 rbd/test@testsnap2 snap1-snap2
Exporting image: 100% complete...done.
[root@clienta ~]# rsync snap1-snap2 root@serverf:~
[root@clienta ~]#

备份节点,同步增量的数据

[root@serverf ~]# rbd import-diff snap1-snap2 f-rbd/test1
Importing image diff: 100% complete...done.
[root@serverf ~]# rbd map f-rbd/test1
/dev/rbd0
[root@serverf ~]# mount /dev/rbd0 /mnt/
[root@serverf ~]# cd /mnt/
[root@serverf mnt]# ls
file1 file10 file2 file3 file4 file5 file6 file7 file8 file9 hello mqy passwd

serverf在这个版本,非得给被增量的节点打一个快照(意义不明,以前版本并不用)

防止集群出现故障,可以拷贝,备份一下

5.rbd镜像功能(单向同步)

容灾,主集群直接挂掉了,不可访问。无缝切换到备用集群

人为的问题不能解决。主被误写,备集群也误写

人为的操作,只能通过备份解决。搞错了,可以再用备份导进来

两边强一致,导致他们在同步时,延迟就会很高,备集群,写成功,才会告诉主集群,然后主机群才写。

RBD-mirror

此功能需要开启journal 性能会降低一点,先写日志。备集群读,拉取日志到备用集群。然后修改image。备集群主动拉取日志。这是通过日志,而不是原来缓慢的三副本写完同步。

镜像级别同步,单独设置1对1镜像同步

池级别同步 池和池的一对一 单双向区别是看谁安装rbd-mirror

双向的话两边都有rbd-mirror

实践

[root@clienta ~]# ceph osd pool create rbd
pool 'rbd' created
[root@clienta ~]# rbd pool init rbd [root@serverf ~]# ceph osd pool create rbd
pool 'rbd' created
[root@serverf ~]# rbd pool init rbd

备集群需要rbd-mirror去要主集群的数据

[root@serverf ~]# ceph orch apply rbd-mirror --placement=serverf.lab.example.com
Scheduled rbd-mirror update...
[root@serverf ~]# ceph -s
cluster:
id: 0bf7c358-25e1-11ec-ae02-52540000fa0f
health: HEALTH_OK services:
mon: 1 daemons, quorum serverf.lab.example.com (age 9m)
mgr: serverf.lab.example.com.vuoooq(active, since 7m)
osd: 5 osds: 5 up (since 8m), 5 in (since 10M)
rbd-mirror: 1 daemon active (1 hosts)
rgw: 1 daemon active (1 hosts, 1 zones) data:
pools: 6 pools, 137 pgs
objects: 222 objects, 4.9 KiB
usage: 96 MiB used, 50 GiB / 50 GiB avail
pgs: 137 active+clean
多出rbd-mirror

主集群创建镜像

[root@clienta ~]# rbd create image1 --size 1024 --pool rbd --image-feature exclusive-lock,journaling
[root@clienta ~]# rbd info image1
rbd image 'image1':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: d3bf73566d45
block_name_prefix: rbd_data.d3bf73566d45
format: 2
features: exclusive-lock, journaling
op_features:
flags:
create_timestamp: Sun Aug 14 11:12:54 2022
access_timestamp: Sun Aug 14 11:12:54 2022
modify_timestamp: Sun Aug 14 11:12:54 2022
journal: d3bf73566d45
mirroring state: disabled
[root@clienta ~]#

开启池模式

[root@clienta ~]# rbd mirror pool enable rbd pool
[root@clienta ~]# rbd info image1
rbd image 'image1':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: d3bf73566d45
block_name_prefix: rbd_data.d3bf73566d45
format: 2
features: exclusive-lock, journaling
op_features:
flags:
create_timestamp: Sun Aug 14 11:12:54 2022
access_timestamp: Sun Aug 14 11:12:54 2022
modify_timestamp: Sun Aug 14 11:12:54 2022
journal: d3bf73566d45
mirroring state: enabled
mirroring mode: journal
mirroring global id: 431aba12-fbe6-488f-b57e-320ab526d47a
mirroring primary: true
[root@clienta ~]#

建立联系

将主集群取名prod并将信息导出

[root@clienta ~]# rbd mirror pool peer bootstrap create --site-name prod rbd > /root/prod
[root@clienta ~]# cat prod
eyJmc2lkIjoiMmFlNmQwNWEtMjI5YS0xMWVjLTkyNWUtNTI1NDAwMDBmYTBjIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFBSEV2bGlISnVwQ0JBQVZBZUk3Wnc3d215eEI3TytCZTR5V2c9PSIsIm1vbl9ob3N0IjoiW3YyOjE3Mi4yNS4yNTAuMTI6MzMwMC8wLHYxOjE3Mi4yNS4yNTAuMTI6Njc4OS8wXSJ9
[root@clienta ~]# rbd mirror pool info rbd
Mode: pool
Site Name: prod Peer Sites: none
[root@clienta ~]#
[root@clienta ~]# rsync prod root@serverf:~
Warning: Permanently added 'serverf,172.25.250.15' (ECDSA) to the list of known hosts.
[root@clienta ~]# [root@serverf ~]# rbd mirror pool peer bootstrap import --site-name bup --direction rx-only rbd prod
2022-08-14T11:21:20.321-0400 7f777f0392c0 -1 auth: unable to find a keyring on /etc/ceph/..keyring,/etc/ceph/.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory
2022-08-14T11:21:20.323-0400 7f777f0392c0 -1 auth: unable to find a keyring on /etc/ceph/..keyring,/etc/ceph/.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory
2022-08-14T11:21:20.323-0400 7f777f0392c0 -1 auth: unable to find a keyring on /etc/ceph/..keyring,/etc/ceph/.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory
[root@serverf ~]# rbd mirror pool info rbd
Mode: pool
Site Name: bup Peer Sites: UUID: cd5db8bb-4f03-449b-ae29-c04ac38df226
Name: prod
Direction: rx-only
Client: client.rbd-mirror-peer [root@clienta ~]# rbd create image3 --size 1024 --pool rbd --image-feature exclusive-lock,journaling
[root@serverf ~]# rbd ls
image1
image3 [root@serverf ~]# rbd map image3
rbd: sysfs write failed
RBD image feature set mismatch. You can disable features unsupported by the kernel with "rbd feature disable image3 journaling".
In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: (6) No such device or address
[root@serverf ~]# dmesg | tail
[ 2442.874097] Key type dns_resolver registered
[ 2442.963090] Key type ceph registered
[ 2442.974459] libceph: loaded (mon/osd proto 15/24)
[ 2442.996598] rbd: loaded (major 251)
[ 2443.015744] libceph: mon0 (1)172.25.250.15:6789 session established
[ 2443.018969] libceph: mon0 (1)172.25.250.15:6789 socket closed (con state OPEN)
[ 2443.019780] libceph: mon0 (1)172.25.250.15:6789 session lost, hunting for new mon
[ 2443.024317] libceph: mon0 (1)172.25.250.15:6789 session established
[ 2443.026864] libceph: client34233 fsid 0bf7c358-25e1-11ec-ae02-52540000fa0f
[ 2443.053943] rbd: image image3: image uses unsupported features: 0x40
[root@serverf ~]#
报错0x40,因为journaling特性的开启,反而不能用了

可以先创建池子,后给journaling特性

本来有个集群A,不再用了

开启池模式,单向同步,一次性导入到集群B?

所以这个功能好像不是那么常用

总结(单向同步)

主集群操作
ceph osd pool create rbd
rbd pool init rbd
rbd create image1 --size 1024 --pool rbd --image-feature exclusive-lock,journaling
rbd mirror pool enable rbd pool
rbd mirror pool peer bootstrap create --site-name prod rbd > /root/prod
rsycn prod root@serverf:~ 备集群操作
ceph osd pool create rbd
rbd pool init rbd
ceph orch apply rbd-mirror --placement=serverf.lab.example.com
rbd mirror pool peer bootstrap import --site-name bup --direction rx-only rbd /root/prod
rbd ls

官方文档真的很好用

https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/5/html/block_device_guide/mirroring-ceph-block-devices

scp可能出现安全问题,所以用rsync?

ceph作为一个开源的万金油分布式存储,势必不如其他专一方向的分布式存储,功能可能不如他们。但是原理是通用

最新文章

  1. python list dict 去重的两种方式
  2. C#获取存储过程返回值和输出参数值的方法
  3. html5表单验证
  4. GLFW初体验
  5. java.lang.NullPointerException
  6. Maven pom.xml 配置详解
  7. android asmack 注册 登陆 聊天 多人聊天室 文件传输
  8. mysql用户修改登录密码及开启远程登录
  9. [poj 3678]Katu Pazzle[2-SAT常用建图法]
  10. HBase的Snapshots功能介绍
  11. Js 数组(一):基础应用
  12. ui-router 父子state共享数据
  13. 工业级GBDT算法︱微软开源 的LightGBM(R包正在开发....)
  14. 一线互联网企业常见的14个Java面试题,Java面试题集大全等你拿,颤抖吧程序员!
  15. 键盘事件(keyup、keydown、keypress)
  16. struts2:标签库图示,控制标签
  17. 物联网架构成长之路(13)-SpringBoot入门
  18. OAF_OAF控件系列3 - Poplist的实现(案例)
  19. sql: Oracle 11g create table, function,trigger, sequence
  20. 预备作业03: 初体验---虚拟机virtual box

热门文章

  1. 06vim --- gcc库的制作及使用
  2. SmartIDE v0.1.17 已经发布 - 模版库远程模式和插件市场公测
  3. 定制ASP.NET 6.0的应用配置
  4. django框架7
  5. 腾讯云数据库TDSQL-大咖论道 | 基础软件的过去、现在、未来
  6. 【clickhouse专栏】新建库角色用户初始化
  7. 使用 .NET MAUI 创建移动应用——Get Start
  8. SAP Web Dynpro - 个性化和配置
  9. go int64传到前端导致溢出问题排查
  10. leetcode二叉树题目总结