- 积分
- 11042
在线时间 小时
最后登录1970-1-1
|

楼主 |
发表于 2021-7-19 13:50:11
|
显示全部楼层
环境介绍
" J+ a9 e0 Y; v8 l: BIP地址 配置 主机名 Ceph版本. F6 u K& w6 W# c+ ~. T O; R
10.15.253.161 c2m8h300 cephnode01 Octopus 15.2.48 p# ?7 e7 ~) B: G. Z% @: O
10.15.253.193 c2m8h300 cephnode02 Octopus 15.2.4" I& \4 s7 h) K4 @ i
10.15.253.225 c2m8h300 cephnode03 Octopus 15.2.4
8 C. o1 }% u& q5 {* O# Q+ v+ g. @0 N
#Linux系统版本: F, x" u2 m" R" y
[root@cephnode01 ~]# cat /etc/redhat-release$ k2 \, u% ^* u S9 K
CentOS Linux release 8.2.2004 (Core)
9 k/ c% V' g2 a. F3 V; r6 }[root@cephnode01 ~]# uname -r! G) v5 n) E3 `% O1 D+ r2 a% W
4.18.0-193.14.2.el8_2.x86_64
9 q1 R5 |( H' q! f( z/ t! I+ q* B- J; t/ n( G
#网络设计:建议各网络单独分开
, w; b9 z/ ^1 H9 C; E' ?10.15.253.0/24 #Public Network 公共网络9 H7 B, ]/ o4 V: L
172.31.253.0/24 #Cluster Network 集群网络, D6 M. K/ b0 H6 G
#每台ceph节点下除系统盘外,最少挂载两块相同的大容量硬盘,不需要进行分区# k! L3 _9 T! H+ ?* x* ^
[root@cephnode01 ~]# lsblk; a8 c. g. G5 w& o/ C. s
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT s/ y' C0 a; M( K" i& [
sda 8:0 0 20G 0 disk
7 b' w' M" M3 b5 ]├─sda1 8:1 0 200M 0 part /boot
8 h8 M$ Z" j2 L. v0 g( Q├─sda2 8:2 0 1G 0 part [SWAP]
3 I' S" Z- t7 p# `└─sda3 8:3 0 18.8G 0 part /, k5 e6 {' C, P
sdb 8:16 0 20G 0 disk+ V$ F) n$ C4 v/ x/ z
2.1.1 Ceph安装与版本选择# X1 X ~8 }" m- S% _4 g9 F
https://docs.ceph.com/docs/master/install/
( G# v4 H q/ kceph-deploy 是用于快速部署群集的工具;社区不再积极维护ceph-deploy。仅支持Nautilus版之前的Ceph版本上进行。它不支持RHEL8,CentOS 8或更新的操作系统。
( ?( k$ V4 _ j* O0 y这里的系统环境是centos8系统,所以需要使用cephadm部署工具部署octopus版的ceph
+ P0 a0 S' t- Z2 \; Z2.1.2 基础环境准备4 z/ e9 T0 n8 H$ ^9 C4 t, l
全部Ceph节点上操作;以cephnode01节点为例;
2 `3 r1 g% x1 T
1 c. _; O$ [# h( o' T#(1)关闭防火墙:
/ K! ?( d# G" Asystemctl stop firewalld4 c( d1 D0 A5 w
systemctl disable firewalld; p( ]8 b7 {( s
#(2)关闭selinux:
1 S9 n. M0 ], l6 used -i 's/enforcing/disabled/' /etc/selinux/config
/ U+ D4 c I& T( B* ]5 o1 y4 ysetenforce 0: c3 {) v4 j; S7 P& l& r
#(3)在cephnode01上配置免密登录到cephnode02、cephnode039 Q, T7 ^% ]/ Y) j
dnf install sshpass -y
" d9 C+ j" `8 y4 ^: V9 q- fssh-keygen -t rsa -f ~/.ssh/id_rsa -P ''
B4 ?( S& o7 Xfor ip in 161 193 225 ;do sshpass -pZxzn@2020 ssh-copy-id -o StrictHostKeyChecking=no 10.15.253.$ip ;done
# O( j H& X6 f8 t#(4)在cephnode01上添加主机名:已经配置过则不需要再次添加. ~9 |" W& g" ^2 z8 I
cat >>/etc/hosts <<EOF1 a- b) H) g4 n6 E+ g5 @. d+ t
10.15.253.161 cephnode01+ P1 j1 f8 u( `) i5 a
10.15.253.193 cephnode02' e4 l* C2 C& H+ E( f7 W% w
10.15.253.225 cephnode03. d# O: D$ x# r7 m7 I8 ^( G
EOF
9 B0 a9 ]) {$ V& t$ z- rfor ip in 193 225 ;do scp -rp /etc/hosts root@10.15.253.$ip:/etc/hosts ;done
0 s6 C$ D2 _ j) N$ r0 y#(5)设置文件连接数最大值
; z1 `: b* s" |9 _echo "ulimit -SHn 102400" >> /etc/rc.local2 y! M4 h2 n: Z. h) ^2 A7 P7 X$ v
cat >> /etc/security/limits.conf << EOF
& C/ s3 u& A6 V$ b* soft nofile 65535
* b: x. o' Q) j* hard nofile 65535
; y: t! [9 u* aEOF
1 k3 g+ K, |# a#(6)内核参数优化
3 q0 N$ `( y# P( w. E$ Vecho 'net.ipv4.ip_forward = 1' >>/etc/sysctl.conf3 }' [) d' M' A$ W! r3 i/ r
echo 'kernel.pid_max = 4194303' >>/etc/sysctl.conf
1 u6 T1 _& r8 X2 m4 _1 ~8 q0 W#内存不足时低于此值,使用交换空间
+ }( e: _) p7 Q0 Z) s9 R9 hecho "vm.swappiness = 0" >>/etc/sysctl.conf/ ~4 @, B$ E% b8 b; R/ |& y t
sysctl -p( v: `3 Q& B7 g0 I: [2 k
#(7)同步网络时间和修改时区;已经添加不需要配置
- B3 l+ D1 y8 K- }1 D% @; a# o安装chrony时间同步 同步cephnode01节点* i* F2 n$ l* l F& v: O
yum install chrony -y y% N! ?( s5 z! O" J% n8 I' [
vim /etc/chrony.conf
( ^( k, b' M F3 iserver cephnode01 iburst
. B2 C8 Y1 b' a/ e" r( q---1 p- G8 ]$ M2 m! r5 w
systemctl restart chronyd.service
# s% x2 k# f* ?) v" n& F2 Gsystemctl enable chronyd.service* M: k0 W# k2 Z# v
chronyc sources
6 c6 I" O* k1 E. ^. S* d#(8)read_ahead,通过数据预读并且记载到随机访问内存方式提高磁盘读操作
7 H: [$ K$ f$ E. I" T( Becho "8192" > /sys/block/sda/queue/read_ahead_kb
. l: ]3 T" z5 p#(9) I/O Scheduler,SSD要用noop(电梯式调度程序),SATA/SAS使用deadline(截止时间调度程序)7 W& [$ h% f" k% Y; [
#https://blog.csdn.net/shipeng1022/article/details/78604910* u/ s. x5 U2 d% z v. m8 u9 I
echo "deadline" >/sys/block/sda/queue/scheduler( _. r! a% v+ ~. w/ D# }
echo "deadline" >/sys/block/sdb/queue/scheduler
! Q' S4 a$ w$ ~8 z( [#echo "noop" >/sys/block/sd[x]/queue/scheduler
4 e8 T9 [9 ]3 B; }3. 添加Octopus版yum源
. F8 V w, Q; U0 q& o% P
B" G' K" l4 R; Ocat >>/etc/yum.repos.d/ceph.repo <<EOF" c' l6 U% d) M
[Ceph]5 I8 c% F& @' c5 D
name=Ceph packages for $basearch3 z" ~. T3 p5 v) m8 e0 J
baseurl=https://mirrors.aliyun.com/ceph/rpm-octopus/el8/$basearch
- a" C5 K2 `- F b9 S# f" Q; v6 venabled=18 i+ [! n% ?+ h& Y* O$ D9 b O. A+ ]
gpgcheck=0
! S5 ? k! E! H+ y& ?3 w/ v7 Ztype=rpm-md$ W C: K/ d2 y" t1 d, h
[Ceph-noarch]1 T8 W" | ?/ m {. |
name=Ceph noarch packages! k6 O( N/ l8 ]; _
baseurl=https://mirrors.aliyun.com/ceph/rpm-octopus/el8/noarch& k4 h* l+ O9 B3 H, U0 y, q
enabled=1 z6 [( _2 }" d3 |4 g, x* F0 A0 K4 U `
gpgcheck=0
) x( n! x: L( m% J( S1 x' Stype=rpm-md; S7 Y. J& I# A3 g4 l' A7 C3 d
[ceph-source]
/ X- p+ c! ]. ]: kname=Ceph source packages: C5 `4 x: B' [5 a3 f2 c5 F
baseurl=https://mirrors.aliyun.com/ceph/rpm-octopus/el8/SRPMS
, c/ g) W/ }# i# |6 R9 U+ Venabled=10 r" m. |0 o$ e( W
gpgcheck=0
+ U4 j! U. Y0 C1 `8 e0 t! K" }7 ? itype=rpm-md4 s8 T4 h, u: O2 r4 J
EOF
$ |# n9 t* q2 m" Y2 o1 d' c1 Yyum clean all && yum makecache& l5 h. q* m5 d$ t$ k
#安装基础软件
, ]( z* ^9 O( _8 ? }& Q vyum install net-tools wget vim bash-completion lrzsz unzip zip -y4 ^. K( E$ V0 d1 k q
4. cephadm工具部署 C$ q7 O. d9 j% j$ [
https://docs.ceph.com/docs/master/cephadm/install/! l2 E- C4 m( x3 b0 u9 p
在15版本,支持使用cephadm工具部署,ceph-deploy在14版本前都支持; c; B9 S4 N3 ?; k" O: n z/ ]
4.1 拉取最新的cephadm并赋权
7 g) R3 ~* f3 f4 t在cephnode01节点配置;) e7 y: A5 T" G1 L5 v: V! r: }7 I
% T9 F0 @6 b* B2 M% |* X& w
[root@cephnode01 ~]# curl --silent --remote-name --location https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm1 D% l4 g# ^; b" v
[root@cephnode01 ~]# chmod +x cephadm; a" z- L. L( i; H
[root@cephnode01 ~]#ll9 L, R5 S" R% Z
-rwxr-xr-x. 1 root root 184653 Sep 10 12:01 cephadm
. [4 Q4 t$ w: V$ X8 }' ?4.2 使用cephadm获取octopus最新版本并安装# p. v8 L8 _/ E; O! L2 C
已手动配置为国内yum源,不需要按官方文档的步骤再进行添加yum源
) I6 Y. X5 N$ k5 V& i* D" L) \7 D3 g; E; |" s( x2 b* O4 O' I1 n/ Y
#全部ceph节点安装
5 G/ N+ y Y+ j. _& q% e, c; B/ O[root@cephnode01 ~]# dnf install python3 podman -y0 [/ D$ e% V7 i
[root@cephnode01 ~]# ./cephadm install
4 `( G8 U. h- N) N...
1 z. o1 W) H% v/ j& k% F( p: a[root@cephnode01 ~]# which cephadm
8 p8 [$ O" x; X8 ]( T- ?/usr/sbin/cephadm
% z$ i; @( V/ G; s- b5. 创建ceph新集群6 h z# n7 m3 M8 ]% |& ?1 T
5.1 指定管理节点
& h) _. D4 D1 Y8 r创建一个可以被任何访问Ceph集群的主机访问的网络,指定mon-ip,并将生成的配置文件写进/etc/ceph目录里
8 Z4 @9 o& k% ` L. o& l
- S- G; }- U, ~, `9 B0 J) T[root@cephnode01 ~]# mkdir -p /etc/ceph$ M5 i7 K- t3 m- V. s
[root@cephnode01 ~]# cephadm bootstrap --mon-ip 10.15.253.161
( d2 i# ^! z2 D...2 S! I9 V i3 }( N- h5 Q4 u; x
URL: https://cephnode01:8443/! D. _( t. P) K ]$ i1 r
User: admin& j# b* L, J% b* m' \
Password: 6v7xazcbwk, x# a7 A, \ A5 ~
...
- u( q' w( K$ Y可登陆URL: https://cephnode01:8443/,首次登陆要修改密码,进行验证7 q% N2 n7 A& O
: D. q/ K$ z" C: C* U- e2 _ F
/ D- i" l! ~+ z% I, {5.2 将ceph命令映射到本地1 K. i7 e; S' Z5 Y6 D8 ^1 p; D8 `- ]
Cephadm不需要在主机上安装任何Ceph包。但是,建议启用对ceph命令的简单访问。
3 q! W5 r2 ~2 {7 j. |cephadm shell命令在安装了所有Ceph包的容器中启动一个bash shell。默认情况下,如果在主机上的/etc/ceph中找到配置和keyring文件,它们将被传递到容器环境中,这样就可以完全正常工作了。) P* H- f D& Z6 K& t% b' f7 M
/ g6 Q0 p0 _/ d4 a8 ^8 n
[root@cephnode01 ~]# cephadm shell, W( N& \+ W4 N& Q
[root@cephnode01 ~]# alias ceph='cephadm shell -- ceph'* |# p" U5 @7 K; I
[root@cephnode01 ~]# exit) [, p" X* U8 z @9 r
#安装ceph-common包;包括ceph,rbd,mount.ceph的命令
5 o1 s* a3 L% P: B[root@cephnode01 ~]# ephadm install ceph-common包;包括ceph,rbd,mount.ceph的命令
' O; e+ z3 Y9 k. S1 h4 ~#查看版本
9 \' }0 c9 `+ u' |' S[root@cephnode01 ~]# ceph -v: O2 z" I9 y3 D( V1 C8 N
ceph version 15.2.4 (7447c15c6ff58d7fce91843b705a268a1917325c) octopus (stable)
9 S& j ^' S' Z( |查看状态
1 s5 R1 n4 b( T9 |8 w- U$ R/ H/ D9 F* @
[root@cephnode01 ~]# ceph status' g+ I7 |3 o% H% B6 i0 c
cluster:; Z; |* {; |% M! e' |; \! J$ l
id: 8a4fdb4e-f31c-11ea-be33-000c29358c7a& G, t- Q! `7 A) D2 x# [: ]; D
health: HEALTH_OK& _2 H3 X( F4 a4 O
Reduced data availability: 1 pg inactive
' @3 J' ~/ I1 h' i OSD count 0 < osd_pool_default_size 3! R' a8 ]4 ]* E4 I
- F* `1 m9 C9 c! L
services:7 o' G, ^* y$ o& Z) d% V8 {
mon: 1 daemons, quorum ceph135 (age 14m)5 z1 p3 _$ N7 L" ?4 N: ~
mgr: ceph03.oesega(active, since 10m)2 N" q- S' h" P z
osd: 0 osds: 0 up (since 31m), 0 in8 j! c u, N9 Y9 K5 f
1 j' h) [. P, ]! b% u3 a- n data:6 F. S2 y; W1 I7 ^/ I! f# _2 T
pools: 1 pools, 1 pgs8 L4 }% {( [; z! b: J2 S
objects: 0 objects, 0 B
- J& B" y) ?& M# f: | usage: 0 B used, 0 B / 0 B avail
$ A( j2 ^; O/ ^6 P3 [; W2 d- d% l pgs: 100.000% pgs unknown
& R) _- B7 V4 J1 a4 Q& q 1 unknown
4 ^0 r2 L3 j9 {5.3 添加新节点进ceph集群
# t# ~ G( k+ s0 M
: f' U) v b( O3 Q' Q2 t" P+ q[root@cephnode01 ~]# ssh-copy-id -f -i /etc/ceph/ceph.pub root@cephnode02
) R% F0 U4 S8 k' |7 o1 v[root@cephnode01 ~]# ssh-copy-id -f -i /etc/ceph/ceph.pub root@cephnode03
8 ]/ H9 H# A6 V9 _1 g* r% ~# M[root@cephnode01 ~]# ceph orch host add cephnode02
* M: `& N% I( X9 m1 C: P9 KAdded host 'cephnode02'
9 U% e1 R0 W' U O5 w0 b[root@cephnode01 ~]# ceph orch host add cephnode03; ]# G* R8 n8 [3 m6 p) q: ]) I( Y
Added host 'cephnode03'. \; |3 k& o# f$ M! D, Z
5.4 部署添加 monitor& l- F1 }' D( h7 t6 t/ N7 {5 h
选择需要设置mon的节点,全选
n! R- t( T6 v. K# N+ y8 U g+ C, H1 @2 }( H& C" j0 o* Q* ]
[root@cephnode01 ~]# ceph orch host label add cephnode01 mon9 k j' M1 a3 b& {
Added label mon to host cephnode01' M, X0 B* W. t4 U! E
[root@cephnode01 ~]# ceph orch host label add cephnode02 mon
: \9 l- s: N+ A& z! t# qAdded label mon to host cephnode02
* U4 I- @7 W& ?# Q[root@cephnode01 ~]# ceph orch host label add cephnode03 mon+ {( {% X; }+ v
Added label mon to host cephnode037 }6 I8 o5 T! k0 U
[root@cephnode01 ~]# ceph orch host ls- |5 R4 G' D; I7 e" M: G$ [, g
HOST ADDR LABELS STATUS 0 O q2 v) w0 o# U8 ^3 T6 H
cephnode01 cephnode01 mon
4 H4 H* D( L8 tcephnode02 cephnode02 mon
5 h( m2 j+ v, `cephnode03 cephnode03 mon
2 w( B7 R6 \; S. Z _告诉cephadm根据标签部署mon,这步需要等待各节点拉取images并启动容器
- n$ b. y' M1 o3 ]2 `4 I9 s$ S! Y$ H ^1 K7 C, J
[root@cephnode01 ~]# ceph orch apply mon label:mon
) u7 r! h/ T# l5 @ H具体验证是否安装完成,其他两台节点可查看下
5 R6 h* M7 O6 B5 H/ r
9 K1 q2 Y: O- S3 [, [[root@cephnode02 ~]# podman ps -a
8 R: B. ~( y+ C) g...# V4 s" K- I3 [8 `9 `, t: A9 ~4 S9 i
[root@cephnode02 ~]# podman images
% }/ z8 o( @/ a: p5 u7 x8 QREPOSITORY TAG IMAGE ID CREATED SIZE( q& L1 X9 j3 k& o3 m1 M0 F) ?
docker.io/ceph/ceph v15 852b28cb10de 3 weeks ago 1 GB
0 n9 c$ T9 J# n2 ~) ^3 F3 A$ r" zdocker.io/prom/node-exporter v0.18.1 e5a616e4b9cf 15 months ago 24.3 MB! X6 B: v- r" t7 j5 T& c
6. 部署OSD# ^/ r$ X' N1 n y; ?
6.1 查看可使用的硬盘( f2 W' l( ?# j, m) t
% A6 i( L2 D4 W* e8 e
[root@cephnode01 ~]# ceph orch device ls2 c% K( L2 S- j0 |
HOST PATH TYPE SIZE DEVICE AVAIL REJECT REASONS - @( d) G) b$ Y8 j& {; j+ k
ceph01 /dev/sda hdd 20.0G False locked, Insufficient space (<5GB) on vgs, LVM detected ) F1 F" `5 |' Q/ K$ X" W$ C8 P6 v
ceph01 /dev/sdb hdd 20.0G True
0 |5 i! x( `6 }ceph02 /dev/sda hdd 20.0G False Insufficient space (<5GB) on vgs, LVM detected, locked 3 H! ^1 U9 P& D
ceph02 /dev/sdb hdd 20.0G True6 i5 y' I$ f# j& m4 F+ D
ceph03 /dev/sda hdd 20.0G False locked, Insufficient space (<5GB) on vgs, LVM detected
% C# h! i0 d, M1 j' w$ Y" p' wceph03 /dev/sdb hdd 20.0G True
# p9 ]4 V5 m* O1 Y% x4 n6.2 使用所有可用硬盘
* K, d0 ?9 O7 A5 _: J1 Z6 t1 [1 Z
; e& @1 c* D# N[root@cephnode01 ~]# ceph orch apply osd --all-available-devices* K( U% @1 ?/ v- j# j' M1 T/ Q
添加单块盘的方式' u3 z2 z1 E) q: P; m# b3 ~8 e
% i9 Y, }1 \0 Q/ `7 u* L
[root@cephnode01 ~]# ceph orch daemon add osd cephnode02:/dev/sdc
5 d. m; k5 f( V' v7 Y; \/ J6.3 验证部署情况* s' y- v7 A `
' h' j- D2 `% N$ \" m1 X[root@cephnode01 ~]# ceph osd df
& ^; O+ F3 W7 o. jID CLASS WEIGHT REWEIGHT SIZE RAW USE DATA OMAP META AVAIL %USE VAR PGS STATUS
1 r- P0 ^, l4 A3 O2 U% l9 D 0 hdd 0.01949 1.00000 20 GiB 1.0 GiB 3.8 MiB 1 KiB 1024 MiB 19 GiB 5.02 1.00 1 up% v6 y/ {0 c' }7 |( ]* f
1 hdd 0.01949 1.00000 20 GiB 1.0 GiB 3.8 MiB 1 KiB 1024 MiB 19 GiB 5.02 1.00 1 up" S6 ^* ^. y, a0 p
2 hdd 0.01949 1.00000 20 GiB 1.0 GiB 3.8 MiB 1 KiB 1024 MiB 19 GiB 5.02 1.00 1 up
/ B: L# ?1 `+ p# H5 E4 P2 `# [ TOTAL 60 GiB 3.0 GiB 11 MiB 4.2 KiB 3.0 GiB 57 GiB 5.02 4 C0 s/ g( }# p0 R8 X$ \- r
MIN/MAX VAR: 1.00/1.00 STDDEV: 0
( u" v) D3 N6 K/ U+ N% K7. 存储部署. L' z1 v e; ], }
7.1 CephFS部署
8 W2 k* U% K4 ~/ n+ L部署cephfs的mds服务,指定集群名及mds的数量. Y4 m, j) H& `; P& a* d: v& f1 T
5 C, K S- l3 `! I5 f" y
[root@cephnode01 ~]# ceph orch apply mds fs-cluster --placement=3
! K. [- v# o$ Y- }
7 U( @0 v. V/ R5 {[root@cephnode01 ~]# ceph -s3 F. Y+ {9 p3 Z/ J
cluster:5 j- C8 i0 F2 d: M! I I# \
id: 8a4fdb4e-f31c-11ea-be33-000c29358c7a
. N, }: |# E6 E) r7 \ health: HEALTH_OK
, n7 F s5 ?/ H* k$ @& D
+ a* @" I4 O" B$ k8 w5 C2 t: ^ services:
4 B6 [% r; b. O2 L! A# Y7 w" v1 W mon: 3 daemons, quorum cephnode01,cephnode02,cephnode03 (age 1m)8 l! v) j$ [2 J5 _
mgr: cephnode01.oesega(active, since 49m), standbys: cephnode02.lphrtb, cephnode03.wkthtb' Q# A: F% t# h, i7 N
mds: 3 up:standby( }- M- R1 E; P+ N/ O
osd: 3 osds: 3 up (since 51m), 3 in (since 30m)
9 I" d' k9 G) n& C; i& v1 K
$ [: D& a& W& v l& K T7 G data:( X1 T- {7 |8 s' R6 H2 t3 l
pools: 1 pools, 1 pgs! `5 P x+ ?8 A3 k8 K
objects: 0 objects, 0 B
6 O% g( ^5 g# t2 O7 m usage: 3.0 GiB used, 57 GiB / 60 GiB avail$ V3 ~* C% ~6 T
pgs: 1 active+clean
9 S8 Y' d1 x+ Q. Y, D: L! G! A5 K7.2 部署RGW
# R. b3 M @/ ?# p5 x% H创建一个领域# z0 }( v- W* f
" n1 W8 H' l! i[root@cephnode01 ~]# radosgw-admin realm create --rgw-realm=rgw-org --default7 l1 @" ^% K, e3 |: h/ `% @2 {
{
% i2 t, f. ?0 |0 X+ @* L. K3 h "id": "43dc34c0-6b5b-411c-9e23-687a29c8bd00",! `) ^ _$ a% t {% |, M1 u
"name": "rgw-org",1 J. a& y+ R" f0 r; J; Y
"current_period": "ea3dd54c-2dfe-4180-bf11-4415be6ccafd",, {' y2 ] n: h( n/ C
"epoch": 1
/ Q1 N8 U6 @3 p ^3 Q}$ h) V2 O$ K k l3 w# N
创建一个zonegroup区域组
/ F7 z' h; k( ?2 Y: Q( c2 r' ~+ k' e b6 t; M; b- }* i8 Q$ u
[root@cephnode01 ~]# radosgw-admin zonegroup create --rgw-zonegroup=rgwgroup --master --default
- U5 T4 X' I" Q' K( u% x% {6 h+ G{$ W$ F* B# h$ U
"id": "1878ecaa-216b-4c99-ad4e-b72f4fa9193f",
) t) q6 m" ]" ]$ F "name": "rgwgroup",
3 X5 g% t3 F$ G' U% c1 c "api_name": "rgwgroup",+ a" C+ y1 G( {1 `+ C
"is_master": "true",9 l6 M, k* u9 E7 B; h& U
"endpoints": [],' a( s E+ n7 m h" d
"hostnames": [],' r3 R9 w0 g2 F, S
"hostnames_s3website": [],' {) N6 P% m) d
"master_zone": "",
! J5 ~- w. @9 U5 r7 Z' ? "zones": [],: D4 D) F6 b4 p' T# H3 u$ Z
"placement_targets": [],
/ Z8 U9 M7 p6 h+ G' y+ D "default_placement": "",( T6 s7 z4 W G
"realm_id": "43dc34c0-6b5b-411c-9e23-687a29c8bd00",
. ~0 U7 \" H" A7 k$ _# @4 d "sync_policy": {
* d) w. ~- n8 ~3 {6 v8 H "groups": []* Y" D& B4 C+ l9 v0 X/ ]
}
. _) @3 q- O/ A% u/ c/ \}
^7 ]) h/ D; x创建一个区域* s! V% V1 m& S3 S' m1 i0 I
! K# |' G5 C4 m7 r" V
[root@cephnode01 ~]# radosgw-admin zone create --rgw-zonegroup=rgwgroup --rgw-zone=zone-dc1 --master --default
6 P0 P, b/ E9 H9 j) ^. j( v- ^4 |* b{* K0 r5 L( @+ H$ O' \1 T
"id": "fbdc5f83-9022-4675-b98e-39738920bb57",3 s+ S0 G, [( ?3 @% ^" F) R* f
"name": "zone-dc1",3 p) r5 R/ V8 n" B# @7 g5 m
"domain_root": "zone-dc1.rgw.meta:root",% [/ M7 B) b4 H \6 ^6 Y
"control_pool": "zone-dc1.rgw.control",/ M) S- X4 b' L- X) l
"gc_pool": "zone-dc1.rgw.log:gc",
D! g4 M+ f2 |/ {- @3 P "lc_pool": "zone-dc1.rgw.log:lc",
. T+ G$ G* T+ |! ^7 D "log_pool": "zone-dc1.rgw.log",
6 U) g9 d' }7 s! |- ? "intent_log_pool": "zone-dc1.rgw.log:intent",) f7 r( z, C5 m1 H" [4 s3 Z) v
"usage_log_pool": "zone-dc1.rgw.log:usage",
' D9 @/ n# p9 q+ e4 J "roles_pool": "zone-dc1.rgw.meta:roles",
2 C% G& J$ i0 b7 N "reshard_pool": "zone-dc1.rgw.log:reshard",
* B2 p% d, F: L# U* V "user_keys_pool": "zone-dc1.rgw.meta:users.keys",
& ?4 s* S0 r$ K "user_email_pool": "zone-dc1.rgw.meta:users.email",
7 z+ E1 M$ h8 Z8 q, g; m+ F "user_swift_pool": "zone-dc1.rgw.meta:users.swift",
0 W2 ]0 c- h9 S$ q" B5 a$ e0 f "user_uid_pool": "zone-dc1.rgw.meta:users.uid",; O* `- ~3 B- p1 c' U) a9 I% t6 }4 T
"otp_pool": "zone-dc1.rgw.otp",$ H& u- f4 u% ]% M) p& l7 I. d, i. K
"system_key": {+ N+ j% D% N; m5 v
"access_key": ""," @% y" D! T [2 g1 G! N
"secret_key": ""! V: g5 y5 P. p
},+ U) z( K* v4 k
"placement_pools": [4 g0 `# ^& Z5 x0 s7 J5 b" B
{
3 q) F) }( b' g, v7 X "key": "default-placement",
7 r v. Q: @# V6 @/ q, c) x "val": {" w! A9 Q2 r# r
"index_pool": "zone-dc1.rgw.buckets.index",
8 ~* J3 l7 N! u1 v4 ?0 T "storage_classes": {
* S5 L& x, [/ X% Y "STANDARD": {
0 E3 e6 ]! J4 w! C; R. i "data_pool": "zone-dc1.rgw.buckets.data". h7 I1 L, l, L/ Q. L. H/ s& @
}
! F3 \$ w) P; w) ^5 P },! I* p$ `5 r1 x7 T. {: Z
"data_extra_pool": "zone-dc1.rgw.buckets.non-ec",, F% S U. t' f [# D5 d; w
"index_type": 0
/ q0 K5 b- v0 d( y2 l }/ N' p5 p) N. k$ L. \
}
( y+ p6 l3 ?* A C/ e ],
, r6 z. g& F7 Y0 V "realm_id": "43dc34c0-6b5b-411c-9e23-687a29c8bd00"7 @* U$ f0 @2 F! h0 ^* }. B6 v) [
}
) C' D% B% b. C为特定领域和区域部署一组radosgw守护进程,这里只指定了两个节点开启rgw
; m* E1 _3 d1 `3 c+ m/ h1
5 c$ B0 n* w) d$ y5 q9 e[root@cephnode01 ~]# ceph orch apply rgw rgw-org zone-dc1 --placement="2 cephnode02 cephnode03"" z2 X" j3 a5 Y0 _2 P: n
验证( n3 [2 j u4 s/ h5 P& }
" R F6 Q/ ]" w[root@cephnode01 ~]# ceph -s& h# Z( b. m: P$ [% G) b
cluster:% Y+ @9 K: F9 A( f6 @5 _
id: 8a4fdb4e-f31c-11ea-be33-000c29358c7a! r+ c) ]" v8 F6 F2 }
health: HEALTH_OK
! b( F9 H# Y1 U% ^
. l; `& b+ k% ]; U9 P K services:
$ [; J4 L7 ?8 Z0 b8 d mon: 3 daemons, quorum cephnode01,cephnode02,cephnode03 (age 1m)3 s4 ]2 w$ C1 T) C; v
mgr: cephnode01.oesega(active, since 49m), standbys: cephnode02.lphrtb, cephnode03.wkthtb
4 Y5 V5 u# \0 W+ V9 r1 Z9 r mds: 3 up:standby8 [$ ]/ `8 v: ]. |% m7 r C" i
osd: 3 osds: 3 up (since 51m), 3 in (since 30m)
5 H: X* m0 ~5 o! K+ w& V. {' [0 ] rgw: 2 daemons active (rgw-org.zone-dc1.cephnode02.cdgjsi, rgw-org.zone-dc1.cephnode03.nmbbsz)
4 A+ b4 v# O3 i; P data:
! v6 J \* [. N: a# J pools: 1 pools, 1 pgs
" T+ _( R; w, a( s% Z objects: 0 objects, 0 B$ F' u; [1 K9 {& S6 ?8 l" d
usage: 3.0 GiB used, 57 GiB / 60 GiB avail
9 H; j" L: P0 ^" d G pgs: 1 active+clean" q" o! Z! S& ?% s8 A9 c1 [7 F+ a* g
为RGW开启dashborad+ h5 r/ k9 H7 m+ ~) m2 @7 B
( _5 P, w0 ?; c5 m#创建rgw的管理用户
8 b) I% P7 ^8 u/ S* w9 L[root@cephnode01 ~]# radosgw-admin user create --uid=admin --display-name=admin --system$ O$ a3 X3 f5 \( j) i
{# I- b7 U. ^; O G
"user_id": "admin",2 ^; z7 P3 O5 p" S
"display_name": "admin",
# u+ r) L" v8 r5 O/ Q4 F* s t "email": "",
4 B0 g0 `7 p" U$ a0 U& b "suspended": 0,
0 m G. Y* `7 i2 U9 H4 [0 E "max_buckets": 1000,3 `# k( ?. M% u, M7 v' B ]; E" Q
"subusers": [],
2 a( i' k8 Q# i3 i/ B "keys": [$ z! H2 F5 ^; \1 n
{7 ?4 J; b( C; J. p i y
"user": "admin",/ q7 m$ `, Q! a; O3 {) Z& F
"access_key": "WG9W5O9O11TGGOLU6OD2",
) c) n+ u( j( z+ n C6 R, p "secret_key": "h2DfrWvlS4NMkdgGin4g6OB6Z50F1VNmhRCRQo3W"
6 l7 ]+ R* K+ C6 L* ? }
& x2 H/ }9 o( D+ }! E: R+ \% l ],
: U/ _, k7 M2 k9 b- L# o "swift_keys": [],, L9 v- `# J! Q6 l5 o
"caps": [],
: i' c& i" x7 I4 C% y3 B "op_mask": "read, write, delete",
' X) O9 t( u5 t; U3 c "system": "true",7 W$ v( N/ y, C% _
"default_placement": "",
( t1 D8 N1 f! b4 |8 g "default_storage_class": "",* k, F- o5 ] u# k; c5 s8 q) S
"placement_tags": [],( i) ]1 O4 d* u# `
"bucket_quota": {
3 H* t& A7 x& [ "enabled": false,' w& J1 E$ e$ X4 c' o
"check_on_raw": false,; r2 |4 E* P9 U. ?
"max_size": -1,8 L, f6 Y& o/ ^* n3 s" n
"max_size_kb": 0,
3 x f3 \4 v& r6 @ "max_objects": -1( C. Z2 C+ f' J& x
},7 a% C- o% j. Y) ^9 _
"user_quota": {# h4 B# w# v' d2 K
"enabled": false,
% l0 E% M" B2 a3 C# B. ? "check_on_raw": false,
+ a0 x5 i+ J) d# x9 t1 j, ~ "max_size": -1,. S6 }. ^" `9 d/ i! I# V( B
"max_size_kb": 0,1 Z, b& ^ c ~+ o9 }9 @: G% |
"max_objects": -1. L; p$ S+ i6 Y% x
},
8 R. U: [; G4 V' \* H "temp_url_keys": [],4 l0 m, Q4 |* J- |2 B
"type": "rgw",* J" f2 G8 V9 C/ I2 s# m& A
"mfa_ids": []7 G) O3 j3 N' [( N: q$ G* X T$ ^: \
}1 i4 e1 A) l8 @9 z* E' e
设置dashboard凭证7 e4 G% f% P0 Q6 f0 }" G
- U* Y7 t ^$ u, ^[root@cephnode01 ~]# ceph dashboard set-rgw-api-access-key WG9W5O9O11TGGOLU6OD2/ W6 {6 Z9 \3 o6 |6 Q
Option RGW_API_ACCESS_KEY updated
3 |2 _) h" P; z6 I, W' J6 [$ {[root@cephnode01 ~]# ceph dashboard set-rgw-api-secret-key h2DfrWvlS4NMkdgGin4g6OB6Z50F1VNmhRCRQo3W
/ j; _6 ^% W( X- }7 f4 |Option RGW_API_SECRET_KEY updated
' R3 n6 k q9 Q, f( F设置禁用证书验证、http访问方式及使用admin账号
! @* [! I4 @3 {1 M) }
; U. j( Y* { u0 x' o2 ^ceph dashboard set-rgw-api-ssl-verify False/ j) b+ u {' J) o9 m: X. Y$ `8 \
ceph dashboard set-rgw-api-scheme http* u: Y1 M$ r( l) z
ceph dashboard set-rgw-api-host 10.15.253.225
7 E E% X6 S7 B, f8 o |( y8 _ceph dashboard set-rgw-api-port 80
9 E- |7 N0 x) C" e( yceph dashboard set-rgw-api-user-id admin
& s: \6 h6 r! n% c重启RGW# G/ S# Z/ U O2 G( B# D+ @
9 ?2 A. @* R( I; j) d8 z ~ceph orch restart rgw8 Q1 G+ W, k; v. A" O! Z# i
|
|