ceph-cluster map
阅读原文时间:2023年07月12日阅读:3

知道cluster topology,是因为这5种cluster map。

======================================
知道cluster topology,是因为这5种cluster map。
相关命令
有命令补全,跟交换机命令行一样
ceph mon dump
ceph osd dump
ceph fs dump
ceph pg dump

这个需要反编译,来得到文本
ceph osd getcrushmap -o crush

crushtool -d crush -o crush1

[root@ali- dd]# ceph mon dump
dumped monmap epoch
epoch
fsid 69e6081b-075f-4f39-8cf3-f1e5bd68908b
last_changed -- ::31.228140
created -- ::21.704124
: 192.168.3.51:/ mon.ali-
: 192.168.3.52:/ mon.ali-
: 192.168.3.53:/ mon.ali-

======================================
[root@ali- dd]# ceph fs dump
dumped fsmap epoch
e1
enable_multiple, ever_enabled_multiple: ,
compat: compat={},rocompat={},incompat={=base v0.,=client writeable ranges,=default file layouts on dirs,=dir inode in separate object,=mds uses versioned encoding,=dirfrag is stored in omap,=file layout v2}
legacy client fscid: -

No filesystems configured

======================================

[root@ali- dd]# ceph pg dump
dumped all
version
stamp -- ::24.077612
last_osdmap_epoch
last_pg_scan
full_ratio 0.9
nearfull_ratio 0.8

[root@ceph1 ~]# ceph pg ls
PG OBJECTS DEGRADED MISPLACED UNFOUND BYTES LOG STATE STATE_STAMP VERSION REPORTED UP ACTING SCRUB_STAMP DEEP_SCRUB_STAMP
1.0 active+clean -- ::54.430131 '2 57:95 [1,2,0]p1 [1,2,0]p1 2019-03-28 02:42:54.430020 2019-03-28 02:42:54.430020
1.1 active+clean -- ::33.846731 '0 57:78 [2,0,1]p2 [2,0,1]p2 2019-03-27 20:42:33.846600 2019-03-27 20:42:33.846600
1.2 active+clean -- ::31.853254 '0 57:92 [1,0,2]p1 [1,0,2]p1 2019-03-27 20:02:31.853127 2019-03-21 18:53:07.286885
1.3 active+clean -- ::29.499574 '0 57:94 [0,1,2]p0 [0,1,2]p0 2019-03-28 01:04:29.499476 2019-03-21 18:53:07.286885
1.4 active+clean -- ::42.694788 '0 57:77 [2,1,0]p2 [2,1,0]p2 2019-03-28 10:17:42.694658 2019-03-21 18:53:07.286885
1.5 active+clean -- ::49.922515 '0 57:78 [2,0,1]p2 [2,0,1]p2 2019-03-28 14:33:49.922414 2019-03-21 18:53:07.286885
1.6 active+clean -- ::08.897114 '0 57:78 [2,1,0]p2 [2,1,0]p2 2019-03-28 08:33:08.897044 2019-03-25 19:51:32.716535
1.7 active+clean -- ::16.417698 '0 57:92 [1,2,0]p1 [1,2,0]p1 2019-03-27 21:37:16.417553 2019-03-22 23:05:53.863908
2.0 active+clean -- ::09.127196 '1 57:155 [1,2,0]p1 [1,2,0]p1 2019-03-27 15:07:09.127107 2019-03-22 15:05:32.211389
2.1 active+clean -- ::41.958378 '0 57:89 [0,2,1]p0 [0,2,1]p0 2019-03-27 20:55:41.958328 2019-03-27 20:55:41.958328
2.2 active+clean -- ::45.117140 '0 57:87 [1,0,2]p1 [1,0,2]p1 2019-03-28 03:09:45.117036 2019-03-28 03:09:45.117036
2.3 active+clean -- ::17.944907 '0 57:87 [1,0,2]p1 [1,0,2]p1 2019-03-27 08:54:17.944792 2019-03-26 05:44:21.586541
2.4 active+clean -- ::52.040458 '0 57:89 [0,2,1]p0 [0,2,1]p0 2019-03-27 23:42:52.040353 2019-03-22 15:05:32.211389
2.5 active+clean -- ::15.908085 '0 57:73 [2,0,1]p2 [2,0,1]p2 2019-03-27 14:26:15.908022 2019-03-22 15:05:32.211389
2.6 active+clean -- ::22.282027 '2 57:161 [0,2,1]p0 [0,2,1]p0 2019-03-28 15:00:22.281923 2019-03-26 05:39:41.395132
2.7 active+clean -- ::39.415262 '4 57:253 [1,2,0]p1 [1,2,0]p1 2019-03-27 17:09:39.415167 2019-03-27 17:09:39.415167

[root@ceph1 rbdpool]# ceph pg map 8.13
osdmap e55 pg 8.13 (8.13) -> up [,,] acting [,,]

pg id由{pool-num}.{pg-id}组成
ceph osd lspools

[root@ceph1 rbdpool]# ceph pg stat
pgs: active+clean; GiB data, GiB used, 8.4 GiB / GiB avail
[root@client mnt]# rm -rf a*
上面的删除操作后,下面的pg才开始清理
[root@ceph1 rbdpool]# ceph pg stat
pgs: active+clean; 2.5 MiB data, 3.5 GiB used, GiB / GiB avail; 8.7 KiB/s rd, B/s wr, op/s

======================================

[root@ali- dd]# ceph osd getcrushmap -o crush

[root@ali- dd]# file crush
crush: MS Windows icon resource - icons, -colors

[root@ali- dd]# crushtool -d crush -o crush1
[root@ali- dd]# file crush1
crush1: ASCII text

[root@ali- dd]# cat crush1

begin crush map

tunable choose_local_tries
tunable choose_local_fallback_tries
tunable choose_total_tries
……
rule pool-d83c6154956b44aea7639c7bd4c45c65-rule {
id
type replicated
min_size
max_size
step take pool-d83c6154956b44aea7639c7bd4c45c65-root
step chooseleaf firstn type rack
step emit
}

end crush map

[root@ali- dd]#

======================================

[root@ali- dd]# ceph osd dump
epoch
fsid 69e6081b-075f-4f39-8cf3-f1e5bd68908b
created -- ::22.409031
modified -- ::38.522821
flags nodeep-scrub,sortbitwise,recovery_deletes,purged_snapdirs
crush_version
full_ratio 0.9
backfillfull_ratio 0.85
nearfull_ratio 0.8
omap_full_ratio 0.9
omap_backfillfull_ratio 0.85
omap_nearfull_ratio 0.8
require_min_compat_client luminous
min_compat_client luminous
require_osd_release luminous
pool 'pool-d83c6154956b44aea7639c7bd4c45c65' replicated size min_size crush_rule object_hash rjenkins pg_num pgp_num last_change flags hashpspool stripe_width async_recovery_max_updates osd_backfillfull_ratio 0.85 osd_full_ratio 0.9 osd_nearfull_ratio 0.8 osd_omap_backfillfull_ratio 0.85 osd_omap_nearfull_ratio 0.8
removed_snaps [~]
max_osd
osd. down out weight up_from up_thru down_at last_clean_interval [,) 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.1.53:/ autoout,exists 54e32850-b1ef-44e1-8df9-d3c93bfe4807
osd. down out weight up_from up_thru down_at last_clean_interval [,) 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.1.53:/ autoout,exists 17af8207-2a25-405b-b87d-1c6d7806cc8d
osd. down out weight up_from up_thru down_at last_clean_interval [,) 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.1.53:/ autoout,exists 06cf6578-e516-4e4a-a494-10423b8999cd
osd. down out weight up_from up_thru down_at last_clean_interval [,) 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.1.53:/ autoout,exists bc31e4ab-a135--81b3-e92969921ba7
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.1.53:/ exists,up 62edd341-50b8-4cca-852f-852a51f96760
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.1.53:/ exists,up 00d0cd89-2e74--b4b4-6deaf465b97e
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.1.53:/ exists,up 8ed2597f-1a92-4b90--43b7953cffea
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.3.53:/ 192.168.1.53:/ exists,up f5723232-3f04-4c22--bdc69d7bcff6

osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.1.52:/ exists,up f75a6ee5-cd79-499c--db400f0bed93
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.1.52:/ exists,up 30431fd9-306c--a5bd-cf6b9bc77ca1
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.1.52:/ exists,up 6ed49e4d-d640--957e-94d2f4ba055f
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.1.52:/ exists,up -5c5e-475c-8b41-d58980da3f43
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.1.52:/ exists,up 6168f2cd-de56--8fe5-c80e93f134cd
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.1.52:/ exists,up 26e54a1c-601a-4f3b-afdc-a0c5b140affc
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.1.52:/ exists,up fa366bda-3ac8---b156acffb4aa
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.3.52:/ 192.168.1.52:/ exists,up e9a16507--465c-af80-9d371a9018ad

osd. down out weight up_from up_thru down_at last_clean_interval [,) 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.1.51:/ autoout,exists c39c2030-4ad2-49b2-a2bd-d6f26d9cc2c8
osd. down out weight up_from up_thru down_at last_clean_interval [,) 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.1.51:/ autoout,exists 9fa68652-dda8-485a--92d109bc7283
osd. down out weight up_from up_thru down_at last_clean_interval [,) 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.1.51:/ autoout,exists f91dc889-379d-427a--9525deb70603
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.1.51:/ exists,up 254c1dc1-c5aa-406d-a144-408c757f6b34
osd. down out weight up_from up_thru down_at last_clean_interval [,) 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.1.51:/ autoout,exists c13c44fd-397f-465d-bc14-917e8899e2fd
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.1.51:/ exists,up c5028149-28ec-4bd4-a5fe-3d13bdb82c6a
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.1.51:/ exists,up 27c2a32e-eef3-41c9--15246fb20ac4
osd. up in weight up_from up_thru down_at last_clean_interval [,) 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.3.51:/ 192.168.1.51:/ exists,up 4f877615-df0d-40d0-a351-a21dc518c3f4
pg_upmap_items 1.1 [,]
pg_upmap_items 1.2 [,,,]
pg_upmap_items 1.3 [,]