- 积分
- 11080
在线时间 小时
最后登录1970-1-1
|

楼主 |
发表于 2023-1-9 09:07:39
|
显示全部楼层
调整pool的pg数之后我们观察到的变化如下:6 L- ~, i2 t9 P( u6 C/ G
[root@ceph1 ~]# ceph osd pool set default.rgw.meta pg_num 645 t/ y M0 E+ ?) l4 h* H
set pool 5 pg_num to 64
3 Y, {0 I, C/ m- \+ S! m[root@ceph1 ~]# ceph osd pool set default.rgw.meta pgp_num 64" H% |7 q' j- n
set pool 5 pgp_num to 64
; A u1 e# q- u
7 \8 J& k/ L$ a, ], E% ?7 C6 f此时的状态:
; i* q% r6 `* F9 Z[root@ceph1 ~]# ceph -s
( V- E8 j% ~" b S% g5 { cluster:1 ?( M( A/ j6 Q: A( N8 T
id: 433d3d2a-8e4a-11ed-b84b-000c29377297: E- J! ?3 R. x
health: HEALTH_OK
' z! d) U9 U0 Q1 J$ f
# B$ L/ Q0 a) @# u5 b5 f( I services:
) B+ x. `" ]1 [, A7 [# J- D mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 4m)
, `; f6 z/ K4 U' t mgr: ceph2.twhubv(active, since 3m), standbys: ceph1.zmducz, ceph3.dkkmft
+ V9 J$ {, ~7 t! `0 K* ] mds: cephfs:1 {0=cephfs.ceph3.idylsu=up:active} 1 up:standby/ V ~& O% F! G- D) i7 V) Q9 k
osd: 3 osds: 3 up (since 3m), 3 in (since 11h)% ]" Q `6 y8 q( u4 l5 | `
rgw: 3 daemons active (ceph1, ceph2, ceph3)# I, J, k. Q2 G% H9 s
, V6 Y% j2 `2 J X/ r, m: v
task status:
( _/ R9 \- v9 Y6 i6 Q6 F
1 n# r9 }7 g* s# \0 `! w data:
2 ?+ z4 d( l) A0 b pools: 8 pools, 233 pgs
' ~! Q7 q; {- b9 O5 p objects: 240 objects, 157 KiB6 H% c- i) W z
usage: 3.0 GiB used, 57 GiB / 60 GiB avail
" `& J2 U$ k% w4 Q2 a pgs: 233 active+clean* W7 u7 @; `' k! b- p3 F5 p6 E& Q
7 z1 {5 V% Q# \. p9 V# Y
R3 v. }/ N6 d
6 Y& x6 F" u. L) ~, a6 ?1 e" O- P+ G0 U4 C
pgs到达某个值:
" X+ f0 `+ c/ J6 h s) g
- V+ @% `# |/ s# E& h4 z. A
3 ^: \- B: A1 i# ?- X1 Z8 E查看pool池的详细记录:
# k9 o3 k! \$ v0 Q$ u
3 h4 A3 L2 j' l9 \- r2 Y[root@ceph1 ~]# ceph osd pool ls detail
! L: q" R1 ~ Vpool 1 'device_health_metrics' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 1 pgp_num 1 autoscale_mode on last_change 375 flags hashpspool,selfmanaged_snaps stripe_width 0 pg_num_min 1 application mgr_devicehealth
9 x7 M1 H! @7 I' m7 `pool 2 '.rgw.root' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 35 flags hashpspool stripe_width 0 application rgw8 r' H' u& U! U1 P& G5 D4 j) c
pool 3 'default.rgw.log' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 37 flags hashpspool stripe_width 0 application rgw0 [2 b# c7 r3 \6 r9 U7 S. d
pool 4 'default.rgw.control' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 39 flags hashpspool stripe_width 0 application rgw
3 f c, \2 R/ S% npool 5 'default.rgw.meta' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 64 pgp_num 64 autoscale_mode on last_change 381 lfor 0/156/379 flags hashpspool stripe_width 0 pg_autoscale_bias 4 pg_num_min 8 application rgw
. ~6 ^. Q5 E/ D0 H1 @8 V1 ~pool 6 'cephfs.cephfs.meta' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 377 flags hashpspool stripe_width 0 pg_autoscale_bias 4 pg_num_min 16 recovery_priority 5 application cephfs
* v' ^; {1 g1 e$ G0 G+ `" [pool 7 'cephfs.cephfs.data' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 163 flags hashpspool stripe_width 0 application cephfs
/ }1 J% o9 K1 W: Fpool 8 'default.rgw.buckets.index' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 autoscale_mode on last_change 297 lfor 0/297/295 flags hashpspool stripe_width 0 pg_autoscale_bias 4 pg_num_min 8 application rgw* v$ W4 G1 ]! ] s6 ]
% ~* t! {! c i发生变化,pg开始下降:0 `3 H( B" Y, }7 E
[root@ceph1 ~]# ceph osd pool ls detail
' S3 |$ P' r5 t8 xpool 1 'device_health_metrics' replicated size 2 min_size 1 crush_rule 0 object_hash rjenkins pg_num 1 pgp_num 1 autoscale_mode on last_change 375 flags hashpspool,selfmanaged_snaps stripe_width 0 pg_num_min 1 application mgr_devicehealth
, T2 u* [9 v- ]! e5 X5 Vpool 2 '.rgw.root' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 35 flags hashpspool stripe_width 0 application rgw! v4 h& s, k" m, O, ~' ?3 X+ [7 e
pool 3 'default.rgw.log' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 37 flags hashpspool stripe_width 0 application rgw
% j( |7 D$ G9 F. @pool 4 'default.rgw.control' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 39 flags hashpspool stripe_width 0 application rgw9 G; z3 d( ]6 W3 t" m- E
pool 5 'default.rgw.meta' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 61 pgp_num 59 pg_num_target 8 pgp_num_target 8 autoscale_mode on last_change 400 lfor 0/400/398 flags hashpspool stripe_width 0 pg_autoscale_bias 4 pg_num_min 8 application rgw
% a5 k" E2 ]+ \, V/ fpool 6 'cephfs.cephfs.meta' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 377 flags hashpspool stripe_width 0 pg_autoscale_bias 4 pg_num_min 16 recovery_priority 5 application cephfs
( u: w# N4 ]; d T( zpool 7 'cephfs.cephfs.data' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode on last_change 163 flags hashpspool stripe_width 0 application cephfs8 M3 T, k7 ^, ?6 `+ E3 @# @
pool 8 'default.rgw.buckets.index' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 autoscale_mode on last_change 297 lfor 0/297/295 flags hashpspool stripe_width 0 pg_autoscale_bias 4 pg_num_min 8 application rgw
6 ^) Z$ S5 R1 T. ?
K3 }8 e* v) q, Q: K, m+ |状态pgs也在下降:* c/ ]. `% W8 X2 {- {
[root@ceph1 ~]# ceph -s. h3 b; Y7 Z, b; `
cluster:
# q5 b7 Q, j6 n8 A2 R, D& o6 z6 Q id: 433d3d2a-8e4a-11ed-b84b-000c29377297; U z7 e! P9 ]$ S
health: HEALTH_OK
+ x: Z, i( M5 K7 D3 b& b
0 k2 E8 M! R/ m* b' ]1 Y services:
. ]8 J' H) n; _ mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 5m)
) w2 `6 L( r/ W1 M: k mgr: ceph2.twhubv(active, since 4m), standbys: ceph1.zmducz, ceph3.dkkmft
& Z! J: |* {* A8 ]( n: D mds: cephfs:1 {0=cephfs.ceph3.idylsu=up:active} 1 up:standby2 g" P7 E& c8 _% h
osd: 3 osds: 3 up (since 4m), 3 in (since 11h)8 z' q# i% E( u* b+ f
rgw: 3 daemons active (ceph1, ceph2, ceph3)
, h/ s0 L" T6 N0 p/ s
+ |/ L/ n4 P% G3 W) `1 } task status:2 U ?4 M% ?! _6 V+ R% A( y* r
( Y) a F2 B. N data:
* E3 x! n; M, E/ T( _2 O pools: 8 pools, 228 pgs
- [6 P. ^0 }$ R objects: 240 objects, 157 KiB
: r3 N' J7 y7 x& }# f, u A( T usage: 3.0 GiB used, 57 GiB / 60 GiB avail/ T' o+ l2 n3 q* s, C6 n
pgs: 228 active+clean
9 T) E% G& K3 _: b7 y& q
# J0 e ?% j# t% I3 f9 M" I progress:
; r/ ^# f3 n; V7 S/ S PG autoscaler decreasing pool 5 PGs from 64 to 8 (0s)5 \( ~! M$ @: v2 j0 I- r
[............................]
! o. l, L i9 c0 R6 a% F7 J! e1 C
: u2 {+ P+ Y" n% X: v[root@ceph1 ~]# ceph -s
; z$ f2 l% H# |8 F2 ?2 S: S$ w cluster:. {5 J3 N6 I8 a, q& G2 F% F
id: 433d3d2a-8e4a-11ed-b84b-000c29377297
& {$ F! c+ f8 l. G3 J" t: [ health: HEALTH_OK: {0 U! |3 D$ F; b2 j
4 q) i$ j8 H* \# w) Y; Y services:
. P# p6 S2 F/ e- v mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 5m)
$ y( i: O8 z$ _: r) U mgr: ceph2.twhubv(active, since 4m), standbys: ceph1.zmducz, ceph3.dkkmft1 h) ]' I+ u3 ?6 q
mds: cephfs:1 {0=cephfs.ceph3.idylsu=up:active} 1 up:standby
& H+ u! F/ S! m( ~ osd: 3 osds: 3 up (since 4m), 3 in (since 11h)+ f9 W6 i1 A4 T+ ]
rgw: 3 daemons active (ceph1, ceph2, ceph3)
/ {' T I) V1 T2 h) l( x9 a $ c) ?; O; L' C; k
task status:
, P# A1 \- F& l, s: u' A * n( X: t. n1 K3 ]5 A. J; ~' X" G
data:6 W& T. M/ X- l+ W. h8 m
pools: 8 pools, 228 pgs
( G0 w( m: v" G+ h objects: 240 objects, 157 KiB' n$ T) `# S, A. `; w
usage: 3.0 GiB used, 57 GiB / 60 GiB avail
& F; Y8 N" U2 @ K pgs: 228 active+clean
+ n9 V7 H! \' ]1 a# _! } 9 m. b2 w- o9 q
progress:
4 {& i) U1 _9 n! c$ G' j/ l PG autoscaler decreasing pool 5 PGs from 64 to 8 (0s)
# W! _2 J2 _6 V4 P( b& x [............................] 1 Q+ `1 l% f2 z
2 Q& S- d: H8 _+ V
: }6 g. S3 k3 _! Q5 J% \8 P! @- ~
$ D& m J1 w7 \# k! ~4 R0 D3 D
等待同步一会
2 W/ n3 y+ ?5 |% q7 l: c* |6 L[root@ceph1 ~]# ceph -s4 z) y1 A; i; ]
cluster:
" d& V1 M& {' T# ?( n6 `, R9 D1 b id: 433d3d2a-8e4a-11ed-b84b-000c293772979 j, m9 @6 I, N, T
health: HEALTH_OK
# A* E, Y! G( p; ^
q: w% m' D/ A5 v0 d" k services:
, o& Y% A1 g- ]/ K5 C0 J% D. j# k mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 10m)& A3 f1 u% ~- N; H3 L' B! t/ p
mgr: ceph2.twhubv(active, since 9m), standbys: ceph1.zmducz, ceph3.dkkmft0 {( Q; ]2 |2 M4 _: } C' V# R8 E
mds: cephfs:1 {0=cephfs.ceph3.idylsu=up:active} 1 up:standby% Z& b4 m# F& Z1 Q; \
osd: 3 osds: 3 up (since 9m), 3 in (since 11h)/ {- V% |; S" n) \) `# J5 V
rgw: 3 daemons active (ceph1, ceph2, ceph3)8 ^7 g, j6 g5 {- [* F
: f) [/ v- y+ H4 l% ` {) j9 i task status:2 H+ p' \) U F) `! q! T, w0 D) A
s0 @+ ?$ F& L$ N/ o+ K data:% F: O! F3 _0 `: F5 |
pools: 8 pools, 185 pgs
* O* ^; q0 }9 w1 \ objects: 240 objects, 157 KiB% x8 s5 c4 b" Q7 g0 j* v
usage: 3.0 GiB used, 57 GiB / 60 GiB avail- P* j; N+ n$ h2 L; a/ u
pgs: 185 active+clean
2 W+ Z5 {1 V. S. b* Q3 {/ W0 V! x& K
progress:7 x% ?0 _) n4 i' \1 s1 D) U
PG autoscaler decreasing pool 5 PGs from 64 to 8 (5m)4 U1 n2 c- R6 s$ f/ ]0 {
[======================......] (remaining: 81s)
. g P2 {4 ^2 l8 c0 G I7 u4 i2 L+ G( U% R( L! @
自动下降到185。因为是虚机下降速度较慢。& ]# a. A ]# t$ y" _
|
|