- 积分
- 9464
在线时间 小时
最后登录1970-1-1
|

楼主 |
发表于 2021-7-19 13:50:11
|
显示全部楼层
环境介绍
3 L- S; _' _& M! s; U( ]" KIP地址 配置 主机名 Ceph版本7 l" v# u( ]* |6 U* Y# H
10.15.253.161 c2m8h300 cephnode01 Octopus 15.2.4" z/ q+ F5 H& e, t c5 V
10.15.253.193 c2m8h300 cephnode02 Octopus 15.2.4
6 h) \9 z, J/ _/ C10.15.253.225 c2m8h300 cephnode03 Octopus 15.2.4
( k5 x! @9 K/ j- z; h) w) W$ S
* A- r) Q1 h1 f Q#Linux系统版本
6 s& x$ b4 W& h7 R, V0 b[root@cephnode01 ~]# cat /etc/redhat-release
, m! j8 `0 I" y9 ?5 `CentOS Linux release 8.2.2004 (Core)
1 M9 W) p# |, R" [; P[root@cephnode01 ~]# uname -r% d7 L z/ D+ j" W. X& e: G
4.18.0-193.14.2.el8_2.x86_64+ }: x0 n! q. _/ u( h* Y" L9 P
) |, h3 Z$ P$ M- O: T* I6 b/ T#网络设计:建议各网络单独分开
4 L. \) A8 H1 [. d+ n) b o10.15.253.0/24 #Public Network 公共网络
4 x3 B5 W5 L$ A172.31.253.0/24 #Cluster Network 集群网络
3 ]0 b+ ] A. J, s% Q#每台ceph节点下除系统盘外,最少挂载两块相同的大容量硬盘,不需要进行分区" ^9 z" i& E4 ^, T: z" g; r
[root@cephnode01 ~]# lsblk3 F' W1 @ X$ B R8 ^) ]* ^8 T
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
4 y( J. s1 L/ p; q8 f( Rsda 8:0 0 20G 0 disk
K7 A1 l4 `' ~( i6 f, H& p├─sda1 8:1 0 200M 0 part /boot# D9 F9 W, e' `& R! j3 c
├─sda2 8:2 0 1G 0 part [SWAP]
5 v/ v1 r" u7 t* _( v3 T' c└─sda3 8:3 0 18.8G 0 part /
/ }+ N; R3 H: Nsdb 8:16 0 20G 0 disk+ T' g" _! H$ K4 V( H
2.1.1 Ceph安装与版本选择
- k+ {3 Z$ C$ d$ w6 Fhttps://docs.ceph.com/docs/master/install/
/ u0 W8 v7 P% `, e# S- ? K: Xceph-deploy 是用于快速部署群集的工具;社区不再积极维护ceph-deploy。仅支持Nautilus版之前的Ceph版本上进行。它不支持RHEL8,CentOS 8或更新的操作系统。2 o2 k: N t3 M9 V
这里的系统环境是centos8系统,所以需要使用cephadm部署工具部署octopus版的ceph3 G; w' d4 R. ?) E) A; a
2.1.2 基础环境准备
v# q* ]4 Q/ K3 _+ B" x$ ~, Z& \全部Ceph节点上操作;以cephnode01节点为例;) f! ~8 F7 u- Q8 M- n" R9 n/ x
5 V) W' f- r+ {
#(1)关闭防火墙:
* L% K: @! M- N4 ?+ W7 Jsystemctl stop firewalld+ c. s+ i q) _4 u A8 X0 t' ~
systemctl disable firewalld0 |# X# J8 }5 R$ H
#(2)关闭selinux:+ I& J9 @: t! P
sed -i 's/enforcing/disabled/' /etc/selinux/config
2 t) Q- m4 O- t. y6 Msetenforce 06 R! S) F5 i& G. I, X5 w( a9 O
#(3)在cephnode01上配置免密登录到cephnode02、cephnode03: v; I0 Y- O4 e. R
dnf install sshpass -y# G$ v. x, Z# B6 Z" W2 ]
ssh-keygen -t rsa -f ~/.ssh/id_rsa -P ''
8 w9 L; @. \; o6 N. z$ Y/ Tfor ip in 161 193 225 ;do sshpass -pZxzn@2020 ssh-copy-id -o StrictHostKeyChecking=no 10.15.253.$ip ;done
Z$ J) W2 `' C, U6 _#(4)在cephnode01上添加主机名:已经配置过则不需要再次添加
$ U8 C( t% s1 y: D7 \( x; @$ Ccat >>/etc/hosts <<EOF! X) G, s6 j9 d4 o& f8 \6 I8 m. T
10.15.253.161 cephnode01
@. n3 l7 y( J2 p) i$ u" F10.15.253.193 cephnode02. Y) g# y) \. i. d2 Y4 f6 h R/ _
10.15.253.225 cephnode03
' G2 o. b) ~7 y0 i5 e( _9 S5 \5 uEOF9 C$ N. o3 b2 e. k
for ip in 193 225 ;do scp -rp /etc/hosts root@10.15.253.$ip:/etc/hosts ;done1 x" t% n4 t7 H, U2 \; E
#(5)设置文件连接数最大值
4 ] Z0 n* g, Fecho "ulimit -SHn 102400" >> /etc/rc.local5 X5 o) K9 i; `$ {3 Q, l ~3 ]
cat >> /etc/security/limits.conf << EOF; T, K/ b/ Q. l& l, ^
* soft nofile 65535
! F, e7 w1 X2 y2 p1 C+ [) s) ]& U% i2 a* hard nofile 655355 z8 |" ?) N' \
EOF, i+ y. k" w5 K% n2 I$ K" H( j
#(6)内核参数优化6 e5 O/ \8 f- e* R3 `
echo 'net.ipv4.ip_forward = 1' >>/etc/sysctl.conf
. h& t, f) ?6 t6 }0 recho 'kernel.pid_max = 4194303' >>/etc/sysctl.conf
- m j8 F! Y4 U( v#内存不足时低于此值,使用交换空间/ h1 F& n0 i! V* I1 J9 B
echo "vm.swappiness = 0" >>/etc/sysctl.conf6 i/ _* d0 v! x& O% K& d# b
sysctl -p+ F( F: P1 G! K. Y X
#(7)同步网络时间和修改时区;已经添加不需要配置; ^( @. y% w5 d) ]
安装chrony时间同步 同步cephnode01节点
* c, \) |; M ~5 R! _( ]) kyum install chrony -y% }* v1 J6 a8 O9 k' ?! f% r
vim /etc/chrony.conf
3 y: H! P5 N- Xserver cephnode01 iburst2 |% i! G8 [# J. Y) }2 y
---8 l2 o. t/ d& X1 }0 i
systemctl restart chronyd.service) ?" t" U# i3 _# s
systemctl enable chronyd.service) D4 h6 ?4 B6 D' G, c1 f) f6 v4 B
chronyc sources$ O* @, {; b% } ~
#(8)read_ahead,通过数据预读并且记载到随机访问内存方式提高磁盘读操作 ^5 R# |" y2 j( H+ g; a& a0 I* _
echo "8192" > /sys/block/sda/queue/read_ahead_kb6 j/ R t( |# U
#(9) I/O Scheduler,SSD要用noop(电梯式调度程序),SATA/SAS使用deadline(截止时间调度程序)
4 S7 ]$ w# k- h8 X# ^8 A/ Z#https://blog.csdn.net/shipeng1022/article/details/78604910$ @* h" F( m3 r! r- R7 x
echo "deadline" >/sys/block/sda/queue/scheduler
- ]+ @6 ^3 r$ H* i1 kecho "deadline" >/sys/block/sdb/queue/scheduler
, @3 @/ K. S& o4 S" R5 K$ K' N#echo "noop" >/sys/block/sd[x]/queue/scheduler) i) ?6 P" Y$ X" h" o2 v8 g
3. 添加Octopus版yum源
$ _; L! q$ w& D1 u3 o! z; N: G# @
cat >>/etc/yum.repos.d/ceph.repo <<EOF
6 t. |7 m: A* `2 ~[Ceph]6 y0 G# w3 U, c- v6 s/ N) u% S: J
name=Ceph packages for $basearch
% |1 Z! q% e& L. h gbaseurl=https://mirrors.aliyun.com/ceph/rpm-octopus/el8/$basearch
) f" x8 V" P3 }1 M+ L" Q0 fenabled=1
: ~) U2 W3 f$ i- tgpgcheck=0
( h* ?8 A4 c6 N+ Q2 V etype=rpm-md
# x- ? n" j6 N/ i3 Q[Ceph-noarch]/ w+ X( g% h3 L0 E
name=Ceph noarch packages
' n" _& l" _, U% Q# Nbaseurl=https://mirrors.aliyun.com/ceph/rpm-octopus/el8/noarch; ^0 O6 V/ J1 U3 U6 F2 z
enabled=18 a! W6 c; x) p
gpgcheck=0( Z' Y8 o& X3 D. ]9 f* v
type=rpm-md; G5 R* q8 A5 O0 g
[ceph-source]6 d0 L) } y3 C) i
name=Ceph source packages
" S8 j0 x5 X0 v, b$ T& G. j* z; r. Y" Abaseurl=https://mirrors.aliyun.com/ceph/rpm-octopus/el8/SRPMS
6 ^1 b5 X/ U. g. T$ Wenabled=1
5 O8 p1 @% K- L! o$ q, D$ q( \$ N, ~gpgcheck=08 z9 I' N/ ~& ]* S7 [
type=rpm-md
' w: K1 M4 Z4 a2 R6 U! oEOF
9 h. v; A, y0 K* @9 N8 h( {yum clean all && yum makecache; M O6 V9 k5 G M3 n/ V
#安装基础软件, W* q) \2 t/ `0 c1 c9 |1 o
yum install net-tools wget vim bash-completion lrzsz unzip zip -y2 L- A! Z4 x5 u# h5 ~
4. cephadm工具部署. v7 B- p( V, D" _/ A7 C
https://docs.ceph.com/docs/master/cephadm/install/
6 e3 c/ J/ m, `' x9 X- u. }在15版本,支持使用cephadm工具部署,ceph-deploy在14版本前都支持$ ~( n) l0 y$ J! L9 ~' o' T0 P
4.1 拉取最新的cephadm并赋权0 P% L9 W. D( y+ G3 X# x
在cephnode01节点配置;" g) l( {0 a& j9 w3 y
; }1 H8 V p2 n- V% n[root@cephnode01 ~]# curl --silent --remote-name --location https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm
3 j6 q( C1 F' o' y) s[root@cephnode01 ~]# chmod +x cephadm
- C! t4 f! J8 h8 C[root@cephnode01 ~]#ll; }4 r/ w+ q; v& Y+ X
-rwxr-xr-x. 1 root root 184653 Sep 10 12:01 cephadm3 k9 x% l% O% d7 p6 H
4.2 使用cephadm获取octopus最新版本并安装
$ V3 P6 z; p2 \- z5 y5 @7 {# M+ C已手动配置为国内yum源,不需要按官方文档的步骤再进行添加yum源
2 g2 U5 w* m5 N+ N5 b/ O* b. Z1 ^
#全部ceph节点安装
6 T1 J! @# x |. T0 Y- d/ _6 V[root@cephnode01 ~]# dnf install python3 podman -y# l" K' \/ _1 [( G5 M- M
[root@cephnode01 ~]# ./cephadm install9 v5 h ]% @3 F9 s z3 I
...; @) Z4 ^& a: ], S% D1 Y
[root@cephnode01 ~]# which cephadm
* S& q7 r6 T9 j6 u& i. T3 k/usr/sbin/cephadm1 g* a9 i, o8 I+ X( N' D7 d* Z
5. 创建ceph新集群' R5 G0 x3 x; T
5.1 指定管理节点
& \6 J& ^. W, t0 t7 `创建一个可以被任何访问Ceph集群的主机访问的网络,指定mon-ip,并将生成的配置文件写进/etc/ceph目录里 Z9 I1 }# I2 V1 P1 H4 ~
6 R5 c9 V' R v; G& `- y; B* q% Q, f[root@cephnode01 ~]# mkdir -p /etc/ceph* ]+ g5 N) u: D
[root@cephnode01 ~]# cephadm bootstrap --mon-ip 10.15.253.1614 l+ s7 \% d" E% [+ Y
...) Q! n7 ~) Y8 O2 G
URL: https://cephnode01:8443/
J. Z4 d4 p9 N3 t l. R9 f' v User: admin* A+ z8 e, i8 C( d2 p
Password: 6v7xazcbwk2 }$ o1 s2 C4 P8 Q7 L/ Q7 Z1 S
...
3 P# q b* E0 P可登陆URL: https://cephnode01:8443/,首次登陆要修改密码,进行验证' k. _- z" @' k% x I
6 v2 H5 z( f* A5 F) [' o
* B ?- ?/ i+ e3 y6 L( \6 b5.2 将ceph命令映射到本地
, A6 }. e3 D* C2 ~Cephadm不需要在主机上安装任何Ceph包。但是,建议启用对ceph命令的简单访问。- `. R7 x4 c9 h: R
cephadm shell命令在安装了所有Ceph包的容器中启动一个bash shell。默认情况下,如果在主机上的/etc/ceph中找到配置和keyring文件,它们将被传递到容器环境中,这样就可以完全正常工作了。
1 [: C1 @3 M* Z/ I) L0 I$ E$ U6 L0 x+ [" P
[root@cephnode01 ~]# cephadm shell8 W" @) Y* H8 V
[root@cephnode01 ~]# alias ceph='cephadm shell -- ceph'
+ Z4 j: d: ^) D5 Z* f[root@cephnode01 ~]# exit1 S' ]8 E9 w7 s9 l; o
#安装ceph-common包;包括ceph,rbd,mount.ceph的命令: E+ k! i9 A8 d5 t; U! {9 y: l9 u7 J
[root@cephnode01 ~]# ephadm install ceph-common包;包括ceph,rbd,mount.ceph的命令
$ e& a9 q- g. A9 h( u6 p. ], Y#查看版本
. ?- J" x( x6 R ?8 x4 O[root@cephnode01 ~]# ceph -v2 S( E9 h7 m& a( n) N: z( ?
ceph version 15.2.4 (7447c15c6ff58d7fce91843b705a268a1917325c) octopus (stable)
6 a" G* R" S) [* {8 B查看状态( ~: @2 E& H* X0 x8 O; Y
2 M6 o, o6 j1 h[root@cephnode01 ~]# ceph status
% Z7 X) s, G3 n' K% R cluster:3 ^! T" J- ~, k2 `) E5 ~ W
id: 8a4fdb4e-f31c-11ea-be33-000c29358c7a
& r! @& {# L* P, J- x2 q health: HEALTH_OK
0 J: c% Y: Z6 ~2 z2 H3 d Reduced data availability: 1 pg inactive
- \# S% g4 d' J& E3 L OSD count 0 < osd_pool_default_size 3
- |7 M. v/ r7 O" G& T 8 I2 N, k8 V, j% m& [& j
services:
$ g+ H0 \2 y: A8 c) b4 {8 O9 e mon: 1 daemons, quorum ceph135 (age 14m)
3 y; }9 D/ t% ?( ]" A" j) h mgr: ceph03.oesega(active, since 10m)* d8 _& I- T: u, [
osd: 0 osds: 0 up (since 31m), 0 in
( q; Z) b4 D7 ^: S- g
; A/ n3 @; x. c" s) d! R' T7 b2 ?/ Y data:. t$ L8 ^( Q" ^) J: W
pools: 1 pools, 1 pgs
' C% b, k% Q, C4 s objects: 0 objects, 0 B; y9 B1 t+ d9 F0 n+ p9 M; C
usage: 0 B used, 0 B / 0 B avail' ]8 Y. |/ O0 c6 z, L) u
pgs: 100.000% pgs unknown
/ Z% ^) C: l1 ^4 f* S 1 unknown
' V7 n3 O6 o, i8 d* G+ [5.3 添加新节点进ceph集群
; y4 {! b& y0 o8 q3 l0 p2 H) ]+ Z( \( l7 Q8 y
[root@cephnode01 ~]# ssh-copy-id -f -i /etc/ceph/ceph.pub root@cephnode025 v. F6 A: U) n a" ?* Z
[root@cephnode01 ~]# ssh-copy-id -f -i /etc/ceph/ceph.pub root@cephnode034 N- o5 P; p; `. ~
[root@cephnode01 ~]# ceph orch host add cephnode02
3 v# C: Q" _% D. t9 D1 n+ nAdded host 'cephnode02'
. D9 V j) r# E/ H- C% s' _5 p[root@cephnode01 ~]# ceph orch host add cephnode03: R% w# g3 K* k$ z- `# S1 ]- G
Added host 'cephnode03'
/ Z. }4 A% @; H1 P) R( W5.4 部署添加 monitor. @0 M& Q9 B/ A4 K7 u: S4 Z
选择需要设置mon的节点,全选
4 _' ]) Y& `$ i& }/ w; [" k# y7 E, C7 k8 u j3 g9 B8 F
[root@cephnode01 ~]# ceph orch host label add cephnode01 mon
& V" G' x' e0 Z7 e$ t$ UAdded label mon to host cephnode01
% z$ }& }1 H" F( n[root@cephnode01 ~]# ceph orch host label add cephnode02 mon
' f8 j; x/ X3 B5 RAdded label mon to host cephnode02
) {, P3 d' X3 |3 K+ X" s[root@cephnode01 ~]# ceph orch host label add cephnode03 mon
; i6 p$ @; b5 Q/ hAdded label mon to host cephnode03
# P) S$ N- w) g/ o& b; p[root@cephnode01 ~]# ceph orch host ls, q" {- @! I" o; a& z2 |
HOST ADDR LABELS STATUS : J. Q& Z" z5 l) L- ]
cephnode01 cephnode01 mon
7 H3 W0 A, e+ S2 a# v8 h: M) Pcephnode02 cephnode02 mon
- G& u" d: b- [: Ycephnode03 cephnode03 mon9 N- b4 g, q3 S( O
告诉cephadm根据标签部署mon,这步需要等待各节点拉取images并启动容器2 ~* ^- y. e D* A* `
1 q, p" o- G5 q1 m
[root@cephnode01 ~]# ceph orch apply mon label:mon
9 n0 ^, R9 [1 x0 ]: ^具体验证是否安装完成,其他两台节点可查看下3 W, W3 O5 w* Z. L+ j0 h) S, r
8 @9 _+ a9 t9 `8 Z7 \" r s[root@cephnode02 ~]# podman ps -a8 q; D- M+ x( E) L @6 C
...$ \% v# l( P1 v4 A
[root@cephnode02 ~]# podman images: p- H3 u4 M' t3 f c) p# t
REPOSITORY TAG IMAGE ID CREATED SIZE
' N2 V0 C3 l9 _# }+ I' _docker.io/ceph/ceph v15 852b28cb10de 3 weeks ago 1 GB
; R7 d4 A7 b7 R( i$ Gdocker.io/prom/node-exporter v0.18.1 e5a616e4b9cf 15 months ago 24.3 MB
; m9 F5 k# U0 r- ]* J# }; f6. 部署OSD
+ K1 |# i6 x; R9 m" M" t6.1 查看可使用的硬盘
+ D/ w; F/ ^5 O; ^ ?! O. X& k3 l# i( M
/ O0 g& w1 Y) A" t' |, b* t[root@cephnode01 ~]# ceph orch device ls4 }- M! m3 v# h0 W# s, _
HOST PATH TYPE SIZE DEVICE AVAIL REJECT REASONS # Q+ R# o0 w% r- H+ P0 X
ceph01 /dev/sda hdd 20.0G False locked, Insufficient space (<5GB) on vgs, LVM detected 7 m1 \, l. ~2 `7 u
ceph01 /dev/sdb hdd 20.0G True
3 U0 \ M0 v3 f: d! S1 p& k4 [5 Tceph02 /dev/sda hdd 20.0G False Insufficient space (<5GB) on vgs, LVM detected, locked
* R% L9 p4 E. [, w' jceph02 /dev/sdb hdd 20.0G True) r: _& l- h0 [5 |2 n) \
ceph03 /dev/sda hdd 20.0G False locked, Insufficient space (<5GB) on vgs, LVM detected
6 |1 P' u% Y. w: zceph03 /dev/sdb hdd 20.0G True
2 f3 x; s' o9 j# W2 |3 T( k6.2 使用所有可用硬盘. q5 w- d# \) D% x3 V
- j! ^$ S- N8 o2 S; g[root@cephnode01 ~]# ceph orch apply osd --all-available-devices3 w& g$ }* D1 O) E4 e# P& m
添加单块盘的方式
" l' }$ O- l6 C
$ d: R/ x& N0 l+ g3 ]/ Z1 W[root@cephnode01 ~]# ceph orch daemon add osd cephnode02:/dev/sdc
' K z% h, h3 W! M6.3 验证部署情况5 a4 T: B- m, L; U6 o, J( N
7 R# b* O: q5 y
[root@cephnode01 ~]# ceph osd df) Y9 [) C g4 {# j7 ?
ID CLASS WEIGHT REWEIGHT SIZE RAW USE DATA OMAP META AVAIL %USE VAR PGS STATUS' H/ G0 _4 Q' }- N8 U- e
0 hdd 0.01949 1.00000 20 GiB 1.0 GiB 3.8 MiB 1 KiB 1024 MiB 19 GiB 5.02 1.00 1 up& Y# o8 r z" b$ z- ^+ l$ v% u
1 hdd 0.01949 1.00000 20 GiB 1.0 GiB 3.8 MiB 1 KiB 1024 MiB 19 GiB 5.02 1.00 1 up
" O# E6 i" W9 q' B5 u' a( l1 B& S 2 hdd 0.01949 1.00000 20 GiB 1.0 GiB 3.8 MiB 1 KiB 1024 MiB 19 GiB 5.02 1.00 1 up% n4 R- l1 b5 z" o
TOTAL 60 GiB 3.0 GiB 11 MiB 4.2 KiB 3.0 GiB 57 GiB 5.02
9 z1 k$ L. ?) T4 hMIN/MAX VAR: 1.00/1.00 STDDEV: 0' o7 Y7 G4 A: j- h
7. 存储部署' o# d& m3 c! L# \
7.1 CephFS部署# ~& \# i# e, C) j# k% x7 a+ g
部署cephfs的mds服务,指定集群名及mds的数量
% z' ?3 K J2 v. Y7 ]. B) r/ Z
: q. I) ^- E/ o |3 F4 P[root@cephnode01 ~]# ceph orch apply mds fs-cluster --placement=3
0 l& X4 p% Z% y+ h* r2 X) m) M2 Z) k9 j. i9 d1 \
[root@cephnode01 ~]# ceph -s
1 @) ]( r" g2 w7 ^7 ~/ r! q1 Q cluster:* H9 l# G, [. A: z) C
id: 8a4fdb4e-f31c-11ea-be33-000c29358c7a
5 j f/ F& E$ f* Q u" [" o health: HEALTH_OK
: t9 {: f% B. H% O: f$ |( f+ V ' T2 a9 _* D; k6 z1 V
services:8 j m p9 b; t4 n
mon: 3 daemons, quorum cephnode01,cephnode02,cephnode03 (age 1m)
( |5 }* f( l" R- k+ p mgr: cephnode01.oesega(active, since 49m), standbys: cephnode02.lphrtb, cephnode03.wkthtb
2 X, b) e4 T9 O1 }% U U5 r mds: 3 up:standby3 z8 |' F- {7 I+ [% y: ~: F
osd: 3 osds: 3 up (since 51m), 3 in (since 30m)
: E8 [( ?! n/ t
9 i s. y$ D2 t) u6 }; O data:9 Q# C! F: b! `% \
pools: 1 pools, 1 pgs
. I3 [! e8 P7 j. w7 u objects: 0 objects, 0 B
3 }. ?/ o3 q4 ^1 [) [ usage: 3.0 GiB used, 57 GiB / 60 GiB avail
! U% I3 s, _9 q7 [+ l2 ] pgs: 1 active+clean( E) i. x: b0 v5 m) K m
7.2 部署RGW
3 X& L( ]0 H7 ]. h) d创建一个领域! }3 q, t9 I+ b& }/ F" ]
7 P& D* \' e; l1 Z7 D" O; C[root@cephnode01 ~]# radosgw-admin realm create --rgw-realm=rgw-org --default
2 _$ G+ z; c3 b/ f{4 C5 l1 s8 @: U( c) L8 \
"id": "43dc34c0-6b5b-411c-9e23-687a29c8bd00",/ M% @) a o) j5 F) `8 w
"name": "rgw-org",. a' H, q9 @, i) o1 U
"current_period": "ea3dd54c-2dfe-4180-bf11-4415be6ccafd",
- V2 R G& g. B6 K: m/ J# H. d: I "epoch": 1
) F, w% B* Z E# z& N3 T}
7 k! v- a' s3 h: [# ^创建一个zonegroup区域组& ]0 j# u! x7 F
' l3 e U( S0 B8 B5 j# T
[root@cephnode01 ~]# radosgw-admin zonegroup create --rgw-zonegroup=rgwgroup --master --default
9 o1 v& A% r3 Z- u{0 @4 ~$ N7 b/ e# Z) ] R( h0 E' d
"id": "1878ecaa-216b-4c99-ad4e-b72f4fa9193f",- G" w+ E$ W1 _( H( Q
"name": "rgwgroup",+ Z* s7 `5 f6 G. j8 |' R2 N+ Z; M
"api_name": "rgwgroup",7 [5 ^( \4 f/ ^+ P, [
"is_master": "true",
+ y4 L( ~' {$ I% T* h "endpoints": [],) [$ H( s% v: m3 T& I
"hostnames": [],
% K* E- p% G/ g/ M4 ]. w c( r% T "hostnames_s3website": [],
* r+ e' C3 S( Y+ H+ m "master_zone": "",. {3 z' |1 N8 u& B) [% F5 |# ^
"zones": [],
8 P" U9 R4 \+ B' ^. F2 i "placement_targets": [],4 c% c) c( t, Q+ X% W
"default_placement": "",
: T/ S# b$ t7 l' K "realm_id": "43dc34c0-6b5b-411c-9e23-687a29c8bd00",! D U" G9 `* ]4 H
"sync_policy": {8 _+ @" x, u: g6 k- a
"groups": []
8 S- `) L; ~+ n4 J }
% |6 b' G% l1 \8 \, {) T' z}* T- b7 F6 R9 r. n/ d* r& g/ H" i
创建一个区域+ C% ^/ S: v' _. h& d
I! ^5 Q3 H2 @4 J3 b7 F J[root@cephnode01 ~]# radosgw-admin zone create --rgw-zonegroup=rgwgroup --rgw-zone=zone-dc1 --master --default6 h. b; X/ o( g3 J2 O5 Z0 x& t
{
* P) m3 v8 _+ s( J& b "id": "fbdc5f83-9022-4675-b98e-39738920bb57",
6 |: Y$ U/ K4 A4 A1 S "name": "zone-dc1",2 g( e5 O! C3 n$ ?8 j: z
"domain_root": "zone-dc1.rgw.meta:root",: [( Q1 n9 @# M6 ^
"control_pool": "zone-dc1.rgw.control",
4 ]! E2 O4 S3 u2 E/ d$ O6 S "gc_pool": "zone-dc1.rgw.log:gc",
" T& B f7 D" A# e) I: `( p, { "lc_pool": "zone-dc1.rgw.log:lc",
/ {* K6 I9 s' r$ L, s- ~( F "log_pool": "zone-dc1.rgw.log",2 S: W! T! ?* j7 X* I' V; f
"intent_log_pool": "zone-dc1.rgw.log:intent",( s5 j* }$ O0 b
"usage_log_pool": "zone-dc1.rgw.log:usage",- ?$ X) @- ?) m* u" B! @6 X g- a
"roles_pool": "zone-dc1.rgw.meta:roles",8 N* V7 m- |- T' T& d9 q
"reshard_pool": "zone-dc1.rgw.log:reshard",
1 E7 F3 l/ ?& _% z5 d "user_keys_pool": "zone-dc1.rgw.meta:users.keys", u* ^3 U# D7 o* e1 G! m& n
"user_email_pool": "zone-dc1.rgw.meta:users.email",
1 d( f* F6 I. h* C "user_swift_pool": "zone-dc1.rgw.meta:users.swift",
6 [6 [2 |' l2 h+ ? "user_uid_pool": "zone-dc1.rgw.meta:users.uid",- E& Y P3 n2 d9 S a$ h( ~$ V
"otp_pool": "zone-dc1.rgw.otp",/ O C8 L# s2 A6 Y
"system_key": {
, x% Y; |, A0 q% ^ "access_key": "",
" X( H: Z; Q2 O2 Y3 E "secret_key": ""3 L2 [5 b" H- ~* J9 c- P ^
},& C4 b+ u9 p% c, l; l& r
"placement_pools": [
. F3 j4 T) y+ ^$ k; Z) H8 h {
( [; U. u7 n6 U$ n7 {1 y- I) a, X "key": "default-placement",
- k- G/ F! e# j) g "val": {
. {, W$ G( ?& t/ @, d "index_pool": "zone-dc1.rgw.buckets.index",( m: v+ H# S# ?
"storage_classes": {1 [+ S9 f% b0 S* {4 B4 o
"STANDARD": {/ l3 p, P5 [. U1 b0 i& H
"data_pool": "zone-dc1.rgw.buckets.data"
# }. [# M' w0 A# ] }
* U' d) d2 }6 J& p& A7 l. e },5 m2 ?% Z+ C1 ?! t3 F
"data_extra_pool": "zone-dc1.rgw.buckets.non-ec",5 r* M( z1 a. e1 h$ @% s
"index_type": 0
" O2 a" V7 T2 a1 F- o }
2 \1 k8 V/ M6 K9 p }
7 W6 {7 j( \5 B ],5 f* J* [8 U+ E, D0 e7 @8 C% E$ Z" G
"realm_id": "43dc34c0-6b5b-411c-9e23-687a29c8bd00"/ d7 \9 G& s- d" H h; X" ^
}
/ I. c9 g: y, k! {8 g! u# A为特定领域和区域部署一组radosgw守护进程,这里只指定了两个节点开启rgw7 a( v+ h) F+ Z* s! ]) t$ \
1
0 j* x& b/ }2 b$ x" h[root@cephnode01 ~]# ceph orch apply rgw rgw-org zone-dc1 --placement="2 cephnode02 cephnode03"
- C& K- m* d( G8 x验证3 w+ U% l3 O( Z/ f) z4 b# t6 \) l
) O- V" \: B# U2 _7 C[root@cephnode01 ~]# ceph -s
+ \( B8 p, G* V- ~+ L cluster:
' o& P8 L3 r0 t# D- e j id: 8a4fdb4e-f31c-11ea-be33-000c29358c7a
1 N# b7 b* H) m; r1 z health: HEALTH_OK
: Q- h; l4 H' U. U ! Q$ k" n' o7 b$ X7 b' M" P e
services:$ n7 ^9 [$ K- M7 X
mon: 3 daemons, quorum cephnode01,cephnode02,cephnode03 (age 1m)
- M. C* ^0 p$ ~ mgr: cephnode01.oesega(active, since 49m), standbys: cephnode02.lphrtb, cephnode03.wkthtb
( \- F' m2 ^+ `" _- R) x mds: 3 up:standby
' k4 J+ f3 Z! {1 c8 v6 N8 t4 c osd: 3 osds: 3 up (since 51m), 3 in (since 30m)
4 o0 Q' u" `5 _. _- ~ rgw: 2 daemons active (rgw-org.zone-dc1.cephnode02.cdgjsi, rgw-org.zone-dc1.cephnode03.nmbbsz)
8 Y8 r9 g' U+ E; ~( \+ Z9 I# I data:
: R& K. V. P9 W, F4 o" F3 W pools: 1 pools, 1 pgs
# j. z( H) [; Z- F, `: U& z1 ~: U- p objects: 0 objects, 0 B5 F% |! U8 c: q4 ?- a
usage: 3.0 GiB used, 57 GiB / 60 GiB avail0 B+ E! B1 ^8 F; Q0 k5 q) a' U
pgs: 1 active+clean
7 U( a: r# Q& N, R/ U: _为RGW开启dashborad
& W; J/ ?( }6 q6 Q. H+ y$ n' z7 `0 U; l9 ~$ P: v
#创建rgw的管理用户
% A6 c( \6 k( D9 W$ R! Q[root@cephnode01 ~]# radosgw-admin user create --uid=admin --display-name=admin --system
: S+ G) J, c2 v( ?& S V{& W7 p s, y" ] [: h
"user_id": "admin",6 e1 c. X6 ~3 d/ h, D, b
"display_name": "admin",
, \ V7 ~5 H' l# D, i "email": "",0 ]2 p5 |9 h. }; s3 F" V L
"suspended": 0,
' {$ _4 _$ {) s2 q "max_buckets": 1000,
* J' {" }3 C0 d. V "subusers": [],
1 p( I4 Y6 V* a "keys": [
4 B2 V% I3 b4 t3 q$ y. j% x1 T! `7 p {
6 }% P- ?3 l$ A+ T( F "user": "admin",4 a; y+ {, B% G+ G1 J
"access_key": "WG9W5O9O11TGGOLU6OD2",$ |! I: B6 t! H2 M8 `/ J5 X) [7 H
"secret_key": "h2DfrWvlS4NMkdgGin4g6OB6Z50F1VNmhRCRQo3W"
2 E: k2 F( I0 g3 n } d# s& s% U# O' U$ u1 u
],& |% I3 O" Y' |& f' q
"swift_keys": [],
5 z9 M3 n" N# I "caps": [],. I+ d( }8 d/ k8 W* g) S
"op_mask": "read, write, delete",
1 s- m, l9 X8 K! u" b, Z+ l "system": "true",4 O' I/ I3 V0 c. L' C
"default_placement": "",
8 O' ? Z; G. R5 b. U3 S5 k "default_storage_class": "",
$ T8 |0 ^0 ]' ^8 I* y "placement_tags": [],
7 g# h4 Q/ D/ v/ y/ [/ K$ h "bucket_quota": {3 n1 B6 A3 z A H+ ~+ A* `
"enabled": false,
8 X6 ?8 ^. }. g' E% _/ S "check_on_raw": false,( j& J/ i0 i+ T/ |0 Y4 u
"max_size": -1,
, E) j) K- i* b. p "max_size_kb": 0,
& g( f- o) z; } "max_objects": -1
# X2 F' Y5 i! s7 W },
4 X: S: n) g' H/ @6 B "user_quota": {
: z! k, H' P$ Y' y# }1 ~& P "enabled": false,( }7 d2 W# {+ N2 z7 G
"check_on_raw": false,
0 @, P/ B7 ?4 h8 r; E "max_size": -1,8 ]) O% w9 F8 T* f' }1 e2 N
"max_size_kb": 0,
: i7 ]$ z6 h) X "max_objects": -1
; ?% V# u8 V# z) ] },
1 o5 l$ Q5 v/ k* C "temp_url_keys": [],
2 H; g% t' f1 j. j8 f# I "type": "rgw",0 x8 }; N1 e6 T k- ]
"mfa_ids": []9 ` o# d/ v" T0 H
} t8 u+ }. l- F7 B( D i
设置dashboard凭证
) H+ C4 _% Q! \2 o) X, P3 j, p1 o L8 Z1 G
[root@cephnode01 ~]# ceph dashboard set-rgw-api-access-key WG9W5O9O11TGGOLU6OD2
: B6 w/ M( E+ X g) ~Option RGW_API_ACCESS_KEY updated
$ U+ A" o# g: V5 w9 G! M$ B[root@cephnode01 ~]# ceph dashboard set-rgw-api-secret-key h2DfrWvlS4NMkdgGin4g6OB6Z50F1VNmhRCRQo3W0 t5 `/ T+ ^/ L4 {% P* \+ T6 p
Option RGW_API_SECRET_KEY updated
6 ^5 z+ @- H( L' t设置禁用证书验证、http访问方式及使用admin账号4 |. M/ _/ j* `) x
3 O! s8 P* c0 I. u t# `ceph dashboard set-rgw-api-ssl-verify False
. j. a, g' s2 Fceph dashboard set-rgw-api-scheme http
% ^) T1 e8 ], ?1 I( xceph dashboard set-rgw-api-host 10.15.253.2259 h6 f+ R( b- {5 y, H" s
ceph dashboard set-rgw-api-port 80) i3 k- T1 F8 b! m: a; N( C# Y4 J
ceph dashboard set-rgw-api-user-id admin
/ D+ o. o7 i/ R' ?0 x- {重启RGW
1 t) z- v3 p: i9 x& O c1 O9 o: C# @8 r; x) J
ceph orch restart rgw
+ i6 @! ?) N! h. C1 F6 V3 `1 A: ^/ \ |
|