0
点赞
收藏
分享

微信扫一扫

uniapp vue3 梯形选项卡组件

主要内容:

Keepalived高可用、部署Ceph分布式存储

一、网站架构进阶项目案例

案例1:Keepalived高可用

  • 1)利用keepalived实现两台代理服务器的高可用(192.168.2.5/192.168.2.6)
  • 2)配置VIP为192.168.2.80
  • 3)修改对应的域名解析记录

实验拓扑图:

主机配置表:

步骤1:配置第二台代理服务器proxy2(解决单点故障)

1)部署HAProxy(负载均衡)

[root@proxy2 ~]# yum -y install haproxy
[root@proxy2 ~]# vim /etc/haproxy/haproxy.cfg
listen wordpress *:80      //监听80端口
  balance roundrobin      //轮询算法
  server web1 192.168.2.11:80 check inter 2000 rise 2 fall 3
  server web2 192.168.2.12:80 check inter 2000 rise 2 fall 3
  server web3 192.168.2.13:80 check inter 2000 rise 2 fall 3
[root@proxy2 ~]# systemctl start haproxy
[root@proxy2 ~]# systemctl enable haproxy
[root@proxy2 ~]# ss -nlptu | grep :80
tcp    LISTEN     0      128       *:80                    *:*                   users:(("haproxy",pid=684,fd=7))
[root@proxy2 ~]# firewall-cmd --set-default-zone=trusted
[root@proxy2 ~]# setenforce 0
[root@proxy2 ~]# sed -i '/SELINUX/s/enforcing/permissive/' /etc/selinux/config

步骤2:为两台代理服务器配置keepalived(高可用)—> VIP:192.168.2.80

1)配置第一台代理服务器proxy(192.168.2.5)

[root@proxy ~]# yum install -y keepalived
[root@proxy ~]# vim /etc/keepalived/keepalived.conf
global_defs {
  router_id  proxy1    //设置路由ID号
  vrrp_iptables         //设置防火墙规则(手动添加该行)
}
vrrp_instance VI_1 {
  state MASTER        //主服务器为MASTER(备服务器需要修改为BACKUP)
  interface eth1        //网卡名称
  virtual_router_id 51               
  priority 100          //服务器优先级,优先级高优先获取VIP
  advert_int 1
  authentication {
    auth_type pass
    auth_pass 1111     //主备服务器密码必须一致
  }
  virtual_ipaddress {     //VIP地址
  192.168.4.80
}   
}
[root@proxy ~]# systemctl start keepalived
[root@proxy ~]# systemctl enable keepalived

2)配置第二台代理服务器proxy2(192.168.2.6)

[root@proxy2 ~]# yum install -y keepalived
[root@proxy2 ~]# vim /etc/keepalived/keepalived.conf
global_defs {
  router_id  proxy2       //设置路由ID号
  vrrp_iptables              //设置防火墙规则(手动添加该行)
}
vrrp_instance VI_1 {
  state BACKUP           //主服务器为MASTER(备服务器需要修改为BACKUP)
  interface eth1           //网卡名称
  virtual_router_id 51               
  priority 95              //服务器优先级,优先级高优先获取VIP
  advert_int 1
  authentication {
    auth_type pass
    auth_pass 1111      //主备服务器密码必须一致
  }
  virtual_ipaddress {     //VIP地址
  192.168.2.80
}   
}
[root@proxy2 ~]# systemctl start keepalived
[root@proxy2 ~]# systemctl enable keepalived

验证高可用:

1)在优先级高的MASTER主机查看是否有VIP地址

[root@proxy ~]# ip add show eth1 | grep 192.168.2.80    //查看MASTER的VIP
inet 192.168.2.80/32 scope global eth1

使用浏览器访问http://192.168.2.80,确认是否能查看WEB页面

2)停止优先级高的MASTER主机上的Keepalived服务,在优先级低的BACKUP主机上查看是否有VIP地址;并使用浏览器访问http://192.168.2.80,确认WEB页面

[root@proxy2 ~]# ip add show eth1 | grep 192.168.2.80
    inet 192.168.2.80/32 scope global eth1

3)在优先级高的MASTER主机上再启动keepalived服务,查看VIP地址是否回到主服务器上;

步骤3:修改DNS服务器

1)修改网站域名对应的解析记录,解析VIP地址

[root@dns ~]# vim /var/named/lab.com.zone
$TTL 1D
@       IN SOA  @ rname.invalid. (
                                        0       ; serial
                                        1D      ; refresh
                                        1H      ; retry
                                        1W      ; expire
                                        3H )    ; minimum
@       NS      dns.lab.com.
dns     A       192.168.2.10
www     A       192.168.2.10
www     A       192.168.2.80    //添加解析VIP地址记录

2)重启DNS服务

[root@dns ~]# systemctl restart named

验证:

[root@proxy ~]# vim /etc/resolv.conf
nameserver 192.168.2.10
[root@proxy ~]# nslookup www.lab.com
Server:               192.168.2.10
Address:   192.168.2.10#53
 
Name:      www.lab.com
Address: 192.168.2.10
Name:      www.lab.com
Address: 192.168.2.80

[root@proxy ~]# ping www.lab.com
PING www.lab.com (192.168.2.80) 56(84) bytes of data.
64 bytes from proxy (192.168.2.80): icmp_seq=1 ttl=255 time=0.014 ms
64 bytes from proxy (192.168.2.80): icmp_seq=2 ttl=255 time=0.023 ms


常见问题:nfs解决 “clnt_create: RPC: Program not registered”

运行showmount -e时会出现clnt_create: RPC: Program not registered的问题


案例2:部署Ceph分布式存储

部署Ceph分布式存储,实现如下效果:

  • 1)使用三台服务器部署Ceph分布式存储
  • 2)实现Ceph文件系统共享
  • 3)将网站数据从NFS迁移到Ceph存储

实验拓扑图

主机配置表

步骤1:准备实验环境

1)为虚拟机添加磁盘(node1、node2、node3操作)

KVM虚拟机:为3台ceph节点主机各添加2块20G磁盘

[root@node1 ~]# lsblk     //查看新增的2块磁盘(以node1为例)
NAME   MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
vda    253:0    0  30G  0 disk
└─vda1 253:1    0  30G  0 part /
vdb    253:16   0  20G  0 disk
vdc    253:32   0  20G  0 disk
[root@node2 ~]# lsblk
[root@node3 ~]# lsblk

2)为3台ceph服务器添加光驱设备(node1、node2、node3)

  • 方法1:添加光驱设备,并加载ceph10.iso
[root@node1 ~]# blkid /dev/cdrom
/dev/cdrom: UUID="2019-01-24-18-15-06-00" LABEL="CDROM" TYPE="iso9660"
[root@node1 ~]# mount /dev/cdrom /mnt/
[root@node1 ~]# ls /mnt/
EULA  GPL  MON  OSD  README  RPM-GPG-KEY-redhat-release  Tools  TRANS.TBL
[root@node1 ~]# echo "/dev/cdrom /mnt iso9660 defaults 0 0" >> /etc/fstab
[root@node1 ~]# mount -a
或者:
[root@node1 ~]# echo "mount /dev/cdrom /mnt" /etc/rc.local
  • 方法2:第二阶段素材ceph10.iso挂载/var/ftp/ceph目录,为所有虚拟机提供YUM源
[root@localhost 桌面]# mkdir /var/ftp/ceph
[root@localhost 桌面]# mount -t iso9660 /linux-soft/2/ceph10.iso /var/ftp/ceph/
[root@localhost 桌面]# df -h
文件系统        容量  已用  可用 已用% 挂载点
/dev/loop2      284M  284M     0  100% /var/ftp/ceph

3)所有主机设置防火墙和SELinux(所有主机操作)

[root@node1 ~]# firewall-cmd --set-default-zone=trusted
[root@node1 ~]# sed -i '/SELINUX/s/enforcing/permissive/' /etc/selinux/config
[root@node1 ~]# setenforce 0
[root@node2 ~]# firewall-cmd --set-default-zone=trusted
[root@node2 ~]# sed -i '/SELINUX/s/enforcing/permissive/' /etc/selinux/config
[root@node2 ~]# setenforce 0
[root@node3 ~]# firewall-cmd --set-default-zone=trusted
[root@node3 ~]# sed -i '/SELINUX/s/enforcing/permissive/' /etc/selinux/config
[root@node3 ~]# setenforce 0

4)在node1配置SSH密钥,让node1可用无密码连接node1,node2,node3

[root@node1 ~]# ssh-keygen -f /root/.ssh/id_rsa -N ''
[root@node1 ~]# for i in {41..43}
> do
> ssh-copy-id 192.168.2.$i   //将密钥传递给192.168.2.41、2.42、2.43
> done

解释说明:

5)修改/etc/hosts域名解析记录(不要删除原文件的数据),同步给所有ceph节点

[root@node1 ~]# vim /etc/hosts
192.168.2.41  node1
192.168.2.42  node2
192.168.2.43  node3
[root@node1 ~]# for i in {41..43}
> do
> scp /etc/hosts 192.168.2.$i:/etc
> done

6)配置所有节点的ceph.iso的YUM源,并同步到所有ceph节点

[root@node1 ~]# vim /etc/yum.repos.d/ceph.repo
[mon]
name=mon
baseurl=file:///mnt/MON
gpgcheck=0
 
[osd]
name=osd
baseurl=file:///mnt/OSD
gpgcheck=0
 
[tools]
name=tools
baseurl=file:///mnt/Tools
gpgcheck=0
[root@node1 ~]# yum repolist
源标识            源名称               状态
local_repo    CentOS-7 - Base         9,911
mon               mon                  41
osd               osd                  28
tools            tools                 33
repolist: 10,013
[root@node1 ~]# for i in {42,43}     //远程访问并进行mount永久挂载
> do
> ssh 192.168.2.$i "echo "/dev/cdrom /mnt iso9660 defaults 0 0" >> /etc/fstab ; mount -a"
> done
[root@node1 ~]# for i in {42,43}
> do
> scp /etc/yum.repos.d/ceph.repo 192.168.2.$i:/etc/yum.repos.d/
> done

7)配置NTP服务器同步时间

  • node1为NTP服务器
[root@node1 ~]# vim /etc/chrony.conf
allow 192.168.2.0/24      //修改26行
local stratum 10          //修改29行(去注释即可)
[root@node1 ~]# systemctl restart chronyd
  • node2和node3做NTP客户端
[root@node2 ~]# vim /etc/chrony.conf
server 192.168.2.41 iburst   
[root@node2 ~]# systemctl restart chronyd
[root@node2 ~]# chronyc sources -v      //查看时间同步结果,【^*】
210 Number of sources = 1
 
  .-- Source mode  '^' = server, '=' = peer, '#' = local clock.
 / .- Source state '*' = current synced, '+' = combined , '-' = not combined,
| /   '?' = unreachable, 'x' = time may be in error, '~' = time too variable.
||                                                 .- xxxx [ yyyy ] +/- zzzz
||      Reachability register (octal) -.           |  xxxx = adjusted offset,
||      Log2(Polling interval) --.      |          |  yyyy = measured offset,
||                                \     |          |  zzzz = estimated error.
||                                 |    |           \
MS Name/IP address         Stratum Poll Reach LastRx Last sample              
===============================================================================
^* node1                        10   6    37    40     -5ns[ -470us] +/-  168us
 
[root@node3 ~]# vim /etc/chrony.conf
server 192.168.2.41 iburst         
[root@node3 ~]# systemctl restart chronyd
[root@node3 ~]# chronyc sources -v     //查看时间同步结果,【^*】

步骤2:部署ceph集群,分布式存储系统(OSD、MON)

1)给node1主机安装ceph-deploy管理工具,创建并切换工作目录

[root@node1 ~]# yum -y install ceph-deploy
[root@node1 ~]# mkdir ceph-cluster ; cd ceph-cluster
[root@node1 ceph-cluster]#

2)给所有ceph节点安装ceph相关软件包

[root@node1 ceph-cluster]# for i in node{1..3}
> do
> ssh $i "yum -y install ceph-mon ceph-osd ceph-mds"
> done

3)初始化MON服务

[root@node1 ceph-cluster]# ceph-deploy new node1 node2 node3  //生成ceph配置文件
[root@node1 ceph-cluster]# ceph-deploy mon create-initial     //拷贝ceph配置文件给node1、node2、node3,启动所有节点的mon服务
[root@node1 ceph-cluster]# ls /etc/ceph/
ceph.client.admin.keyring  ceph.conf  rbdmap  tmp8O0CCk
[root@node2 ~]# ls /etc/ceph/
ceph.client.admin.keyring  ceph.conf  rbdmap  tmpA_W5Rj
[root@node3 ~]# ls /etc/ceph/
ceph.client.admin.keyring  ceph.conf  rbdmap  tmpnWxN6H

查看ceph状态(此时失败是正常的)

[root@node1 ceph-cluster]# ceph -s
    cluster b752cdca-c028-4b0c-ae1b-c8bf7dac0b48
     health HEALTH_ERR
            no osds
     monmap e1: 3 mons at {node1=192.168.2.41:6789/0,node2=192.168.2.42:6789/0,node3=192.168.2.43:6789/0}
            election epoch 6, quorum 0,1,2 node1,node2,node3
     osdmap e1: 0 osds: 0 up, 0 in
            flags sortbitwise
      pgmap v2: 64 pgs, 1 pools, 0 bytes data, 0 objects
            0 kB used, 0 kB / 0 kB avail
                  64 creating
[root@node1 ceph-cluster]# systemctl status ceph-mon@node1
[root@node2 ceph-cluster]# systemctl status ceph-mon@node2
[root@node3 ceph-cluster]# systemctl status ceph-mon@node3

4)使用ceph-deploy工具,初始化数据磁盘

[root@node1 ceph-cluster]# ceph-deploy disk zap node1:vdb node1:vdc \
node2:vdb node2:vdc node3:vdb node3:vdc

5)初始化OSD集群,创建OSD存储空间

[root@node1 ceph-cluster]# ceph-deploy osd create node1:vdb node1:vdc \
node2:vdb node2:vdc node3:vdb node3:vdc

查看ceph状态

[root@node1 ceph-cluster]# ceph osd tree
ID WEIGHT  TYPE NAME      UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 0.08752 root default                                    
-2 0.02917     host node1                                  
 0 0.01459         osd.0       up  1.00000          1.00000
 1 0.01459         osd.1       up  1.00000          1.00000
-3 0.02917     host node2                                  
 2 0.01459         osd.2       up  1.00000          1.00000
 3 0.01459         osd.3       up  1.00000          1.00000
-4 0.02917     host node3                                  
 4 0.01459         osd.4       up  1.00000          1.00000
 5 0.01459         osd.5     down        0          1.00000
 
[root@node1 ceph-cluster]# ceph -s
    cluster b752cdca-c028-4b0c-ae1b-c8bf7dac0b48
     health HEALTH_OK
     monmap e1: 3 mons at {node1=192.168.2.41:6789/0,node2=192.168.2.42:6789/0,node3=192.168.2.43:6789/0}
            election epoch 6, quorum 0,1,2 node1,node2,node3
     osdmap e44: 6 osds: 6 up, 6 in
            flags sortbitwise
      pgmap v75: 64 pgs, 1 pools, 0 bytes data, 0 objects
            203 MB used, 91890 MB / 92093 MB avail
                  64 active+clean
                  
[root@node1 ceph-cluster]# df -h
文件系统        容量  已用  可用 已用% 挂载点
/dev/vdb1      15G   35M   15G    1% /var/lib/ceph/osd/ceph-0
/dev/vdc1      15G   34M   15G    1% /var/lib/ceph/osd/ceph-1

步骤3:部署ceph文件系统

1)启动mds服务(可在node1、node2、node3启动,也可在多台主机启动mds)

[root@node1 ceph-cluster]# ceph-deploy mds create node3

2)创建存储池(文件系统由inode和block组成)

[root@node1 ceph-cluster]# ceph osd pool create cephfs_data 64
pool 'cephfs_data' created
[root@node1 ceph-cluster]# ceph osd pool create cephfs_metadata 64
pool 'cephfs_metadata' created
[root@node1 ceph-cluster]# ceph osd lspools     //查看共享池
0 rbd,1 cephfs_data,2 cephfs_metadata,

3)创建文件系统

[root@node1 ceph-cluster]# ceph fs new myfs1 cephfs_metadata cephfs_data
new fs with metadata pool 2 and data pool 1
[root@node1 ceph-cluster]# ceph fs ls    //查看文件系统
name: myfs1, metadata pool: cephfs_metadata, data pools: [cephfs_data ]

步骤4:迁移网站数据到ceph集群

1)卸载web1、web2、web3的NFS共享

[root@web1 ~]# systemctl stop nginx.service      //停止Nginx服务
[root@web2 ~]# systemctl stop nginx.service
[root@web3 ~]# systemctl stop nginx.service
[root@web1 ~]# umount /usr/local/nginx/html      //卸载挂载
[root@web2 ~]# umount /usr/local/nginx/html
[root@web3 ~]# umount /usr/local/nginx/html
[root@web1 ~]# vim /etc/fstab     //注释/etc/fstab开机挂载
#192.168.2.31:/web_share/html /usr/local/nginx/html/ nfs defaults 0 0
[root@web2 ~]# vim /etc/fstab
#192.168.2.31:/web_share/html /usr/local/nginx/html/ nfs defaults 0 0
[root@web3 ~]# vim /etc/fstab
#192.168.2.31:/web_share/html /usr/local/nginx/html/ nfs defaults 0 0

2)web服务器永久挂载Ceph文件系统(web1、web2、web3都需要操作)

① 在任意ceph节点,如node1查看ceph账户与密码

[root@node1 ceph-cluster]# cat /etc/ceph/ceph.client.admin.keyring
[client.admin]
         key = AQB6r7BgH3uCFhAAOA2h9lK8D6N7huL0bBJmyA==

② 安装ceph-common的客户端软件

[root@web1 ~]# yum -y install ceph-common
[root@web2 ~]# yum -y install ceph-common
[root@web3 ~]# yum -y install ceph-common

③ 挂载Ceph文件系统

[root@web1 ~]# mount -t ceph 192.168.2.41:6789:/ /usr/local/nginx/html/ -o name=admin,secret=AQB6r7BgH3uCFhAAOA2h9lK8D6N7huL0bBJmyA==
[root@web1 ~]# echo 'mount -t ceph 192.168.2.41:6789:/ /usr/local/nginx/html/ -o name=admin,secret=AQB6r7BgH3uCFhAAOA2h9lK8D6N7huL0bBJmyA==' >> /etc/rc.local
[root@web1 ~]# chmod +x /etc/rc.local
 
[root@web2 ~]# mount -t ceph 192.168.2.41:6789:/ /usr/local/nginx/html/ -o name=admin,secret=AQB6r7BgH3uCFhAAOA2h9lK8D6N7huL0bBJmyA==
[root@web2 ~]# echo 'mount -t ceph 192.168.2.41:6789:/ /usr/local/nginx/html/ -o name=admin,secret=AQB6r7BgH3uCFhAAOA2h9lK8D6N7huL0bBJmyA==' >> /etc/rc.local
[root@web2 ~]# chmod +x /etc/rc.local
 
[root@web3 ~]# mount -t ceph 192.168.2.41:6789:/ /usr/local/nginx/html/ -o name=admin,secret=AQB6r7BgH3uCFhAAOA2h9lK8D6N7huL0bBJmyA==
[root@web3 ~]# echo 'mount -t ceph 192.168.2.41:6789:/ /usr/local/nginx/html/ -o name=admin,secret=AQB6r7BgH3uCFhAAOA2h9lK8D6N7huL0bBJmyA==' >> /etc/rc.local
[root@web3 ~]# chmod +x /etc/rc.local

解释说明:

验证Ceph文件系统:

[root@web1 ~]# df -h /usr/local/nginx/html
文件系统             容量  已用  可用 已用% 挂载点
192.168.2.41:6789:/   90G  208M   90G    1% /usr/local/nginx/html
[root@web2 ~]# df -h /usr/local/nginx/html
文件系统             容量  已用  可用 已用% 挂载点
192.168.2.41:6789:/   90G  208M   90G    1% /usr/local/nginx/html
[root@web3 ~]# df -h /usr/local/nginx/html/
文件系统             容量  已用  可用 已用% 挂载点
192.168.2.41:6789:/   90G  208M   90G    1% /usr/local/nginx/html

补充:另一种解决方案,通过fstab实现永久挂载

提示:如果希望使用fstab实现永久挂载,客户端需要额外安装libcephfs1软件包。

[root@web1 ~]# yum -y install libcephfs1
[root@web1 ~]# vim /etc/fstab
… …
192.168.2.41:6789:/ /usr/local/nginx/html/    ceph   defaults,_netdev,name=admin,secret=AQCVcu9cWXkgKhAAWSa7qCFnFVbNCTB2DwGIOA== 0 0

补充:对于高可用的问题,可以在mount时同时写入多个IP

  • 临时命令:
[root@web1 ~]# mount -t ceph  \
192.168.2.41:6789,192.168.2.42:6789,192.168.2.43:6789:/ /usr/local/nginx/html  \
-o name=admin,secret=密钥
  • 永久修改:
[root@web1 ~]# vim /etc/fstab
192.168.2.41:6789,192.168.2.42:6789,192.168.2.43:6789:/ /usr/local/nginx/html/ \
ceph defaults,_netdev,name=admin,secret=密钥 0 0

3)迁移NFS服务器中的数据到Ceph存储

[root@nfs ~]# cd /web_share/
[root@nfs web_share]# tar -czpf /root/html.tar.gz ./*
[root@nfs web_share]# scp /root/html.tar.gz 192.168.2.11:/usr/local/nginx/html/
[root@web1 ~]# ls /usr/local/nginx/html
html.tar.gz
[root@web1 ~]# cd /usr/local/nginx/html/
[root@web1 html]# tar -xf html.tar.gz
[root@web1 ~]# ls /usr/local/nginx/html
50x.html     license.txt         wp-comments-post.php  wp-includes        wp-settings.php
a.html       readme.html         wp-config.php         wp-links-opml.php  wp-signup.php
html.tar.gz  wp-activate.php     wp-config-sample.php  wp-load.php        wp-trackback.php
index.html   wp-admin            wp-content            wp-login.php       xmlrpc.php
index.php    wp-blog-header.php  wp-cron.php          wp-mail.php

4)恢复web服务的Nginx服务

[root@web1 ~]# systemctl start nginx.service
[root@web2 ~]# systemctl start nginx.service
[root@web3 ~]# systemctl start nginx.service


扩展知识(常见面试题1)

扩展知识(常见面试题2)

扩展知识(常见面试题3)

 

小结:

本篇章节为【第三阶段】PROJECT1-DAY3 的学习笔记,这篇笔记可以初步了解到 Keepalived高可用、部署Ceph分布式存储。


Tip:毕竟两个人的智慧大于一个人的智慧,如果你不理解本章节的内容或需要相关笔记、视频,可私信小安,请不要害羞和回避,可以向他人请教,花点时间直到你真正的理解

举报

相关推荐

0 条评论