网站建设开源模板,wordpress 数据恢复,怎样在wordpress里添加菜单,墨客网站建设Ceph集群安装部署 目录 Ceph集群安装部署 1、环境准备 1.1 环境简介1.2 配置hosts解析(所有节点)1.3 配置时间同步2、安装docker(所有节点)3、配置镜像 3.1 下载ceph镜像(所有节点执行)3.2 搭建制作本地仓库(ceph-01节点执行)3.3 配置私有仓库(所有节点执行)3.4 为 Docker 镜像… Ceph集群安装部署 目录 Ceph集群安装部署 1、环境准备 1.1 环境简介1.2 配置hosts解析(所有节点)1.3 配置时间同步2、安装docker(所有节点)3、配置镜像 3.1 下载ceph镜像(所有节点执行)3.2 搭建制作本地仓库(ceph-01节点执行)3.3 配置私有仓库(所有节点执行)3.4 为 Docker 镜像打标签(ceph-01节点执行)4、安装ceph工具(所有节点执行)5、引导集群(ceph-01节点执行)6、添加主机到集群(ceph-01节点执行)7、部署OSD存储数据(ceph-01节点执行)8、访问仪表盘查看状态 1、环境准备 1.1 环境简介 主机名IP磁盘一磁盘二磁盘三CPU内存操作系统虚拟化工具ceph-01192.168.200.33100G50G50G2C4GUbuntu 22.04VMware15ceph-02192.168.200.34100G50G50G2C4GUbuntu 22.04VMware15ceph-03192.168.200.35100G50G50G2C4GUbuntu 22.04VMware15 1.2 配置hosts解析(所有节点) rootceph-01:~# cat /etc/hosts EOF
192.168.200.33 ceph-01
192.168.200.34 ceph-02
192.168.200.35 ceph-03
EOF 1.3 配置时间同步 所有节点执行 # 可配置开启
rootceph-01:~# timedatectl set-ntp true# 配置上海时区
rootceph-01:~# timedatectl set-timezone Asia/Shanghai# 系统时钟与硬件时钟同步
rootceph-01:~# hwclock --systohc ceph-01节点执行 # 安装服务
rootceph-01:~# apt install -y chrony# 配置文件
rootceph-01:~# cat /etc/chrony/chrony.conf EOF
server controller iburst maxsources 2
allow all
local stratum 10
EOF# 重启服务
rootceph-01:~# systemctl restart chronyd ceph-02、ceph-03节点 # 安装服务
rootceph-02:~# apt install -y chrony# 配置文件
rootceph-02:~# cat /etc/chrony/chrony.conf EOF
pool controller iburst maxsources 4
EOF# 重启服务
rootceph-02:~# systemctl restart chronyd 2、安装docker(所有节点) # 安装证书
rootceph-01:~# apt -y install ca-certificates curl gnupg lsb-release# 安装官方GPG key
rootceph-01:~# mkdir -p /etc/apt/keyrings
rootceph-01:~# curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg# 建立docker资源库
rootceph-01:~# echo deb [arch$(dpkg --print-architecture) signed-by/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable | tee /etc/apt/sources.list.d/docker.list /dev/null# 更新资源库
rootceph-01:~# apt update# 开始安装docker
rootceph-01:~# apt -y install docker-ce# 设置开机自启动
rootceph-01:~# systemctl enable docker 3、配置镜像 3.1 下载ceph镜像(所有节点执行) rootceph-01:~# docker pull quay.io/ceph/ceph:v17
rootceph-01:~# docker pull quay.io/ceph/ceph-grafana:8.3.5
rootceph-01:~# docker pull quay.io/prometheus/prometheus:v2.33.4
rootceph-01:~# docker pull quay.io/prometheus/node-exporter:v1.3.1
rootceph-01:~# docker pull quay.io/prometheus/alertmanager:v0.23.0 3.2 搭建制作本地仓库(ceph-01节点执行) # 下载镜像
rootceph-01:~# docker pull registry# 启动仓库容器
rootceph-01:~# docker run -d --name registry -p 5000:5000 --restart always 3a0f7b0a13efrootceph-01:~# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
04b3ea9c4c08 3a0f7b0a13ef /entrypoint.sh /etc… 1 second ago Up Less than a second 0.0.0.0:5000-5000/tcp, :::5000-5000/tcp registry 3.3 配置私有仓库(所有节点执行) # 配置仓库地址
rootceph-01:~# cat /etc/docker/daemon.json EOF
{
insecure-registries:[192.168.200.33:5000]
}
EOF# 重启docker
rootceph-01:~# systemctl daemon-reload
rootceph-01:~# systemctl restart docker 3.4 为 Docker 镜像打标签(ceph-01节点执行) # 打地址标签
rootceph-01:~# docker tag 0912465dcea5 192.168.200.33:5000/ceph:v17# 查看打开的标签
rootceph-01:~# docker images | grep 0912465dcea5
192.168.200.33:5000/ceph v17 0912465dcea5 12 months ago 1.34GB
quay.io/ceph/ceph v17 0912465dcea5 12 months ago 1.34GB# 推入仓库
rootceph-01:~# docker push 192.168.200.33:5000/ceph:v17 4、安装ceph工具(所有节点执行) rootceph-01:~# apt install -y cephadm ceph-common 5、引导集群(ceph-01节点执行) # 初始化mon节点
rootceph-01:~# mkdir -p /etc/ceph
rootceph-01:~# cephadm --image 192.168.200.33:5000/ceph:v17 bootstrap --mon-ip 192.168.200.33 --initial-dashboard-user admin --initial-dashboard-password 000000 --skip-pull 6、添加主机到集群(ceph-01节点执行) # 传输ceph密钥
rootceph-01:~# ssh-copy-id -f -i /etc/ceph/ceph.pub ceph-02
rootceph-01:~# ssh-copy-id -f -i /etc/ceph/ceph.pub ceph-03 # 集群机器发现
rootceph-01:~# ceph orch host add ceph-02
rootceph-01:~# ceph orch host add ceph-03# 查看主机
rootceph-01:~# ceph orch host ls
HOST ADDR LABELS STATUS
ceph-01 192.168.200.33 _admin
ceph-02 192.168.200.34
ceph-03 192.168.200.35
3 hosts in cluster ps:
# 要部署其他监视器
ceph orch apply mon ceph-01,ceph-02,ceph-03# 删除集群
cephadm rm-cluster --fsid d92b85c0-3ecd-11ed-a617-3f7cf3e2d6d8 --force 7、部署OSD存储数据(ceph-01节点执行) # 查看可用的磁盘设备
rootceph-01:~# ceph orch device ls
HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS
ceph-01 /dev/sdb hdd 53.6G Yes 18m ago
ceph-01 /dev/sdc hdd 53.6G Yes 18m ago
ceph-02 /dev/sdb hdd 53.6G Yes 45s ago
ceph-02 /dev/sdc hdd 53.6G Yes 45s ago
ceph-03 /dev/sdb hdd 53.6G Yes 32s ago
ceph-03 /dev/sdc hdd 53.6G Yes 32s ago # 添加到ceph集群中在未使用的设备上自动创建osd
rootceph-01:~# ceph orch apply osd --all-available-devicesPS:
# 从特定主机上的特定设备创建OSD
ceph orch daemon add osd ceph-01:/dev/sdb
ceph orch daemon add osd ceph-02:/dev/sdb
ceph orch daemon add osd ceph-03:/dev/sdb # 查看osd磁盘
rootceph-01:~# ceph -scluster:id: cf4e18fa-36a8-11ee-b041-03777440eaachealth: HEALTH_OKservices:mon: 3 daemons, quorum ceph-01,ceph-02,ceph-03 (age 3m)mgr: ceph-01.ydxjzm(active, since 7m), standbys: ceph-02.zpbmpmosd: 6 osds: 0 up, 6 in (since 1.16241s)data:pools: 1 pools, 1 pgsobjects: 2 objects, 449 KiBusage: 120 MiB used, 300 GiB / 300 GiB availpgs: 1 activecleanrootceph-01:~# ceph df
--- RAW STORAGE ---
CLASS SIZE AVAIL USED RAW USED %RAW USED
hdd 300 GiB 300 GiB 120 MiB 120 MiB 0.04
TOTAL 300 GiB 300 GiB 120 MiB 120 MiB 0.04--- POOLS ---
POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
.mgr 1 1 449 KiB 2 449 KiB 0 95 GiB 8、访问仪表盘查看状态 访问https://192.168.200.33:8443/ 访问https://192.168.200.33:3000/