- 积分
- 11081
在线时间 小时
最后登录1970-1-1
|

楼主 |
发表于 2023-1-9 17:03:34
|
显示全部楼层
[root@ceph1 mnt]# ceph -s
5 p& z* N2 t m cluster:& @$ T2 e# Q+ j' T! B
id: 433d3d2a-8e4a-11ed-b84b-000c29377297
/ k: R( L3 ^6 N0 _1 R0 p5 v/ R health: HEALTH_WARN
4 o- f3 h/ c) m: ~ too many PGs per OSD (272 > max 250)% x3 s `8 N+ t7 o; b% i. T4 v
- k, Y0 w) i4 K/ U services:* o9 a$ `% Y# O2 b. z1 G
mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 4m)
* d4 M* W% _7 j+ C3 E d0 [ mgr: ceph2.twhubv(active, since 6h), standbys: ceph3.dkkmft, ceph1.zmducz
* d r B) M1 T$ S @2 j mds: cephfs:1 {0=cephfs.ceph3.idylsu=up:active} 1 up:standby
, k( L( y. X# V! x* r osd: 3 osds: 3 up (since 7h), 3 in (since 19h)" d+ l$ g! @# D4 H- _; ?' ]
rgw: 3 daemons active (ceph1, ceph2, ceph3)
/ a: S1 b7 ~8 I! T / P7 Y& R: N. y$ [% r B2 U5 H9 b
task status:; E! t; q/ V, @3 X# F
. c+ M, K- m. `) P
data:
, o- @( |/ X. h9 }" C, v. A pools: 8 pools, 273 pgs( I6 I" \& v. M2 |8 t* i( A
objects: 240 objects, 162 KiB; z: S8 {% b5 z
usage: 3.0 GiB used, 57 GiB / 60 GiB avail
. o: m$ }- u9 | pgs: 273 active+clean0 k9 ~6 W( E; B. O) t
& q1 N! s* N5 d; ^
[root@ceph1 mnt]# ceph config get mon
: T' I, I+ R# bWHO MASK LEVEL OPTION VALUE RO
1 Z; F: z# U( u9 [- s7 V. Umon advanced auth_allow_insecure_global_id_reclaim false
$ X/ W3 E: h$ o7 e, d# v! ?mon advanced cluster_network 162.96.90.0/24 *
, F% N- Q7 b6 @& D* w9 jglobal basic container_image quay.io/ceph/ceph:v15 *
' e6 V& `3 j, E) a8 cmon advanced mon_allow_pool_delete true ]. |( L6 v1 W+ ?
mon advanced mon_max_pg_per_osd 500
4 F$ `+ o& n v; \% Z( @, zglobal advanced mon_target_pg_per_osd 500
/ v y6 \: l5 @2 mglobal advanced mon_warn_on_pool_no_redundancy false
! d. y8 o) @! I% I! m1 E9 Wmon advanced osd_max_pg_per_osd_hard_ratio 10.000000
* p% o3 H4 K! L" [* V! }- E4 Xmon advanced public_network 192.168.13.0/24 *
, }+ [' m3 l4 N# Y[root@ceph1 mnt]# ceph config set global mon_pg_per_osd 500
" y# T/ ]8 n( H4 s! }Error EINVAL: unrecognized config option 'mon_pg_per_osd'
3 j4 i* \& ?7 `- O+ T2 g$ I+ G[root@ceph1 mnt]# ceph config set global mon_max_pg_per_osd 500; V1 g: d/ u: o& H, H
[root@ceph1 mnt]# ceph config get mon b/ [; G" s( l+ f, f. C1 j2 B0 m1 p
WHO MASK LEVEL OPTION VALUE RO
& i4 f+ ^7 G. Pmon advanced auth_allow_insecure_global_id_reclaim false , H. w! Y$ }( ?# s
mon advanced cluster_network 162.96.90.0/24 *
9 o& f( \% S) l9 |global basic container_image quay.io/ceph/ceph:v15 *
+ _# d+ K3 F; {4 e) Omon advanced mon_allow_pool_delete true
$ c$ P( V) g5 umon advanced mon_max_pg_per_osd 500 - A( r; I4 ]8 A$ U) x
global advanced mon_target_pg_per_osd 500
. B# r9 D2 n2 sglobal advanced mon_warn_on_pool_no_redundancy false
4 P% s) J4 L; p3 Wmon advanced osd_max_pg_per_osd_hard_ratio 10.000000 8 D l# s, N; ~: W$ O: w
mon advanced public_network 192.168.13.0/24 *
5 P* J/ Z2 ~) N* Z/ \2 J/ u[root@ceph1 mnt]# ceph config set global mon_max_pg_per_osd 500& P' ?; @. @2 n" e* |
[root@ceph1 mnt]# ceph -s- @- s' M& p+ b- _6 F- t! K
cluster:6 Y9 v/ o0 u" V. K; F, y! v
id: 433d3d2a-8e4a-11ed-b84b-000c293772976 p! c+ a+ U2 c% m
health: HEALTH_OK+ q" y( q: H% E E6 q8 W+ A
7 n% s/ K3 L. D/ ?$ p services:
$ y0 G$ j: A7 p$ f% q& A+ B6 ^ mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 5m)
9 I; l( r7 f9 a6 z5 _ mgr: ceph2.twhubv(active, since 6h), standbys: ceph3.dkkmft, ceph1.zmducz, H6 \" W- h) U$ [. J& r( a; M
mds: cephfs:1 {0=cephfs.ceph3.idylsu=up:active} 1 up:standby
' T( q2 w( N c, @9 r8 D osd: 3 osds: 3 up (since 7h), 3 in (since 19h)7 p2 X, n8 [1 O: b2 ]
rgw: 3 daemons active (ceph1, ceph2, ceph3)) y# \0 N0 W0 o8 v; M* T4 X
1 B1 j1 Z8 E. ^, T+ [ task status:
! s1 k4 s9 w5 l3 R
V0 H6 y3 U% j7 ^5 l data:/ X0 P4 |3 e, F4 L
pools: 8 pools, 273 pgs3 m6 ?' d' r P8 X9 x2 H% k4 I( n3 o$ b4 G
objects: 240 objects, 162 KiB
2 f! v; ~0 A8 b2 d2 `$ y9 @6 |: b usage: 3.0 GiB used, 57 GiB / 60 GiB avail$ H* S/ b( D6 I/ A3 |" V" D: Z7 p
pgs: 273 active+clean$ M5 P. ^' U- H: l
3 o: b* g. M1 n
|
|