0
点赞
收藏
分享

微信扫一扫

ceph存储方式

Ceph块存储

镜像快照
  • 快照可以保存某一时间点时的状态数据
  • 快照是映像在特定时间点的只读逻辑副本
  • 希望回到以前的一个状态,可以恢复快照
  • 使用镜像、快照综合示例

## Ceph镜像快照管理

# 创建镜像
[root@client ~]# rbd ls
[root@client ~]# rbd create img1 --size 10G         #创建img1大小为10G
[root@client ~]# rbd ls
img1
[root@client ~]# rbd status img1
Watchers: none
[root@client ~]# rbd info img1
rbd image 'img1':
        size 10 GiB in 2560 objects
        ...

# 映射镜像到本地并格式化挂载
[root@client ~]# rbd map img1                       #映射img1镜像到本地
/dev/rbd0
[root@client ~]# rbd status img1
Watchers:
        watcher=192.168.88.10:0/3111235769 client.14964 cookie=18446462598732840961
[root@client ~]# mkfs -t xfs /dev/rbd0              #格式化rbd0设备
[root@client ~]# mount /dev/rbd0 /mnt/              #挂载rbd0设备到/mnt目录
[root@client ~]# df -hT | grep mnt
/dev/rbd0      xfs        10G  105M  9.9G   2% /mnt
[root@client ~]# cp /etc/hosts /etc/passwd /mnt/    #存储数据
[root@client ~]# ls /mnt/
hosts  passwd
[root@client ~]# 

# 创建快照
[root@client ~]# rbd snap ls img1                       #查看img1镜像已有快照
[root@client ~]# rbd snap create img1 --snap img1-sn1   #创建快照
Creating snap: 100% complete...done.
[root@client ~]# rbd snap ls img1                       #查看img1镜像已有快照
SNAPID  NAME      SIZE    PROTECTED  TIMESTAMP               
     4  img1-sn1  10 GiB             Tue Apr  2 15:55:00 2024
[root@client ~]# 

# 测试快照回滚数据
[root@client ~]# rm -rf /mnt/*                          #模拟误操作删除数据
[root@client ~]# ls /mnt/

[root@client ~]# umount /mnt                            #卸载rbd0设备,必须卸载后才能回滚
[root@client ~]# rbd unmap img1                         #取消映射关系
[root@client ~]# rbd snap rollback img1 --snap img1-sn1 #回滚到指定快照

[root@client ~]# rbd map img1                           #映射img1镜像到本地
/dev/rbd0
[root@client ~]# mount /dev/rbd0 /mnt/                  #挂载镜像
[root@client ~]# ls /mnt/                               #确认数据恢复
hosts  passwd
[root@client ~]# 

# 保护快照防止误删除
[root@client ~]# rbd snap protect img1 --snap img1-sn1  #保护指定快照
[root@client ~]# rbd snap ls img1
SNAPID  NAME      SIZE    PROTECTED  TIMESTAMP               
     4  img1-sn1  10 GiB  yes        Tue Apr  2 15:55:00 2024
[root@client ~]# rbd snap rm img1 --snap img1-sn1       #删除失败
2024-04-02T16:03:20.741+0800 7f22a13eb700 -1 librbd::Operations: snapshot is protected
Removing snap: 0% complete...failed.
rbd: snapshot 'img1-sn1' is protected from removal.
[root@client ~]# 

# 删除快照和镜像
[root@client ~]# rbd snap ls img1                           #查看镜像快照信息
SNAPID  NAME      SIZE    PROTECTED  TIMESTAMP               
     4  img1-sn1  10 GiB  yes        Tue Apr  2 15:55:00 2024
[root@client ~]# rbd snap unprotect img1 --snap img1-sn1    #取消快照保护模式
[root@client ~]# rbd snap ls img1
SNAPID  NAME      SIZE    PROTECTED  TIMESTAMP               
     4  img1-sn1  10 GiB             Tue Apr  2 15:55:00 2024
[root@client ~]# rbd remove img1                            #删除镜像失败,需先删除快照
Removing image: 0% complete...failed.
rbd: image has snapshots - these must be deleted with 'rbd snap purge' before the image can be removed.
[root@client ~]# rbd snap rm img1 --snap img1-sn1           #删除指定快照
Removing snap: 100% complete...done.
[root@client ~]# umount /dev/rbd0                           #卸载设备
[root@client ~]# rbd unmap img1                             #取消映射关系
[root@client ~]# rbd remove img1                            #删除镜像
Removing image: 100% complete...done.

镜像快照克隆
  • 不能将一个镜像同时挂载到多个节点,如果这样操作,将会损坏数据
  • 如果希望不同的节点,拥有完全相同的数据盘,可以使用克隆技术
  • 克隆是基于快照的,不能直接对镜像克隆
  • 快照必须是受保护的快照,才能克隆
  • 克隆流程

ceph存储方式_Ceph块存储


## 给多个客户端生成相同的数据盘

# 创建镜像
[root@client ~]# rbd ls
[root@client ~]# rbd create img2 --size 10G     #创建img2镜像
[root@client ~]# rbd ls
img2
[root@client ~]# rbd info img2                  #查看img2镜像信息
rbd image 'img2':
        size 10 GiB in 2560 objects
        ...
        
# 向镜像中写入数据
[root@client ~]# rbd map img2                   #映射img2镜像到本地
/dev/rbd0
[root@client ~]# mkfs.xfs /dev/rbd0             #格式化rbd设备
[root@client ~]# mount /dev/rbd0 /data/         #挂载rbd设备
[root@client ~]# for i in {1..10}               #写入数据
> do
> echo "hello world $i" > /data/file$i.txt
> done
[root@client ~]# ls /data/
file10.txt  file1.txt  file2.txt  file3.txt  file4.txt  file5.txt  file6.txt  file7.txt  file8.txt  file9.txt
[root@client ~]# umount /data                   #卸载rbd设备
[root@client ~]# rbd unmap img2                 #取消rbd设备映射

# 创建快照
[root@client ~]# rbd snap ls img2
[root@client ~]# rbd snap create img2 --snap img2-sn1   #创建img2-sn1快照
Creating snap: 100% complete...done.
[root@client ~]# rbd snap ls img2
SNAPID  NAME      SIZE    PROTECTED  TIMESTAMP               
     6  img2-sn1  10 GiB             Tue Apr  2 16:26:13 2024
[root@client ~]# 

# 保护快照
[root@client ~]# rbd snap protect img2 --snap img2-sn1
[root@client ~]# rbd snap ls img2
SNAPID  NAME      SIZE    PROTECTED  TIMESTAMP               
     6  img2-sn1  10 GiB  yes        Tue Apr  2 16:26:13 2024
[root@client ~]# 

# 克隆镜像
[root@client ~]# rbd clone img2 --snap img2-sn1 img2-sn1-1  #克隆镜像img2-sn1-1
[root@client ~]# rbd clone img2 --snap img2-sn1 img2-sn1-2  #克隆镜像img2-sn1-2
[root@client ~]# rbd ls
img2
img2-sn1-1
img2-sn1-2
[root@client ~]# 

# 客户端测试镜像
[root@client ~]# rbd map img2-sn1-1
/dev/rbd0
[root@client ~]# mount /dev/rbd0 /data/     #直接挂载
[root@client ~]# ls /data/                  #查看rbd镜像中数据
file10.txt  file1.txt  file2.txt  file3.txt  file4.txt  file5.txt  file6.txt  file7.txt  file8.txt  file9.txt
[root@client ~]# 

[root@ceph1 ~]# rbd map img2-sn1-2
/dev/rbd0
[root@ceph1 ~]# mount /dev/rbd0 /mnt/       #直接挂载
[root@ceph1 ~]# ls /mnt/                    #查看rbd镜像中数据
file10.txt  file1.txt  file2.txt  file3.txt  file4.txt  file5.txt  file6.txt  file7.txt  file8.txt  file9.txt
[root@ceph1 ~]# 

# 查看快照/克隆镜像信息
[root@client ~]# rbd info img2 --snap img2-sn1
rbd image 'img2':
        size 10 GiB in 2560 objects
        order 22 (4 MiB objects)
        snapshot_count: 1
        id: 3ad721c736ad
        block_name_prefix: rbd_data.3ad721c736ad
        format: 2
        features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
        op_features: 
        flags: 
        create_timestamp: Tue Apr  2 16:22:26 2024
        access_timestamp: Tue Apr  2 16:22:26 2024
        modify_timestamp: Tue Apr  2 16:22:26 2024
        protected: True             #受保护的快照
[root@client ~]# rbd info img2-sn1-1
rbd image 'img2-sn1-1':
        size 10 GiB in 2560 objects
        order 22 (4 MiB objects)
        snapshot_count: 0
        id: 876e6f355f21
        block_name_prefix: rbd_data.876e6f355f21
        format: 2
        features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
        op_features: 
        flags: 
        create_timestamp: Tue Apr  2 16:27:23 2024
        access_timestamp: Tue Apr  2 16:27:23 2024
        modify_timestamp: Tue Apr  2 16:27:23 2024
        parent: rbd/img2@img2-sn1   #父镜像和快照
        overlap: 10 GiB
[root@client ~]# rbd info img2-sn1-2
rbd image 'img2-sn1-2':
        size 10 GiB in 2560 objects
        order 22 (4 MiB objects)
        snapshot_count: 0
        id: 3b10254dc187
        block_name_prefix: rbd_data.3b10254dc187
        format: 2
        features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
        op_features: 
        flags: 
        create_timestamp: Tue Apr  2 16:27:26 2024
        access_timestamp: Tue Apr  2 16:27:26 2024
        modify_timestamp: Tue Apr  2 16:27:26 2024
        parent: rbd/img2@img2-sn1   #父镜像和快照
        overlap: 10 GiB
[root@client ~]# 

# 合并父子镜像
[root@client ~]# rbd flatten img2-sn1-2             #将img2所有的数据拷贝到克隆镜像
Image flatten: 100% complete...done.
[root@client ~]# rbd info img2-sn1-2                #已经无parent信息
rbd image 'img2-sn1-2':
        size 10 GiB in 2560 objects
        order 22 (4 MiB objects)
        snapshot_count: 0
        id: 3b10254dc187
        block_name_prefix: rbd_data.3b10254dc187
        format: 2
        features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
        op_features: 
        flags: 
        create_timestamp: Tue Apr  2 16:27:26 2024
        access_timestamp: Tue Apr  2 16:27:26 2024
        modify_timestamp: Tue Apr  2 16:27:26 2024
[root@client ~]# 

# 清理镜像/快照
[root@client ~]# umount /data                               #卸载rbd设备
[root@client ~]# rbd unmap img2-sn1-1                       #取消映射关系
[root@client ~]# rbd rm img2-sn1-1                          #删除克隆镜像
[root@client ~]# 

[root@client ~]# rbd snap unprotect img2 --snap img2-sn1    #取消快照保护
[root@client ~]# rbd snap rm img2 --snap img2-sn1           #删除快照
[root@client ~]# rbd rm img2                                #删除镜像
[root@client ~]# 

[root@client ~]# rbd ls                                     #查看镜像
img2-sn1-2      #已成为独立镜像,img2删除后无影响
[root@ceph1 ~]# cat /mnt/file1.txt 
hello world 1

镜像开机自动挂载


## 配置rbd设备开机自动挂载

# 创建测试镜像
[root@client ~]# rbd create img1 --size 10G
[root@client ~]# rbd map img1
/dev/rbd0
[root@client ~]# mkfs.xfs /dev/rbd0

# 配置开机自动映射镜像
[root@client ~]# vim /etc/ceph/rbdmap 
rbd/img1        id=admin,keyring=/etc/ceph/ceph.client.admin.keyring
[root@client ~]# systemctl enable rbdmap.service --now

# 配置rbd设备开机自动挂载
[root@client ~]# vim /etc/fstab 
/dev/rbd/rbd/img1   /data/  xfs    noauto 0 0

# 测试开机自动映射并挂载rbd设备
[root@client ~]# reboot

[root@client ~]# df -hT | grep data
/dev/rbd0      xfs        10G  105M  9.9G   2% /data

Ceph文件存储

文件系统
  • 文件系统是操作系统用于明确存储设备或分区上的文件的方法和数据结构
  • 即在存储设备上组织文件的方法
  • 操作系统中负责管理和存储文件信息的软件机构称为文件管理系统,简称文件系统
数据与元数据
  • 数据是文件的主要内容
  • 元数据则是描述这些数据的附加信息,如属主、属组、权限等
CephFS配置及使用
  • 创建CephFS文件系统


## CephFS中数据与元数据独立存储于不同的存储池

# 创建存储池(一个元数据池可以对应多个数据池)
[root@ceph1 ~]# ceph -s
  cluster:
    id:     2ca9f32a-f0a3-11ee-83c6-52540081f933
    health: HEALTH_OK
...
 
[root@ceph1 ~]# ceph osd pool ls                        #查看已有存储池
.mgr
rbd
[root@ceph1 ~]# ceph osd pool create data01 128         #创建数据池
pool 'data01' created       
[root@ceph1 ~]# ceph osd pool create metadata01 128     #创建元数据池
pool 'metadata01' created

# 创建文件系统
[root@ceph1 ~]# ceph fs new myfs01 metadata01 data01
new fs with metadata pool 4 and data pool 3
[root@ceph1 ~]# ceph fs ls
name: myfs01, metadata pool: metadata01, data pools: [data01 ]
[root@ceph1 ~]# 

# 启用MDS组件
[root@ceph1 ~]# ceph -s
  cluster:
    id:     2ca9f32a-f0a3-11ee-83c6-52540081f933
    health: HEALTH_ERR  #尚未启动MDS造成ERR状态
            1 filesystem is offline
            1 filesystem is online with fewer MDS than max_mds
    ...
    
[root@ceph1 ~]# ceph orch apply mds myfs01 --placement='3 ceph1 ceph2 ceph3'
[root@ceph1 ~]# ceph mds stat                   #获取MDS状态
myfs01:1 {0=myfs01.ceph1.kbxnvr=up:active} 2 up:standby
[root@ceph1 ~]# ceph orch ps --daemon-type=mds  #显示3个进程
[root@ceph1 ~]# ceph -s
  cluster:
    id:     2ca9f32a-f0a3-11ee-83c6-52540081f933
    health: HEALTH_OK
    ...

  • 客户端使用CephFS文件系统


## 挂载CephFS文件系统

# 确认cephfs支持相关包已安装
[root@ceph1 ~]# rpm -qa | grep cephfs
python3-cephfs-17.2.5-0.el8.x86_64
libcephfs2-17.2.5-0.el8.x86_64      #客户端支持cephfs的包
[root@ceph1 ~]# rpm -ql libcephfs2 
/etc/ceph
/usr/lib/.build-id
/usr/lib/.build-id/41/6cc757a4911a0f804d12adfccc951a4168b210
/usr/lib64/libcephfs.so.2
/usr/lib64/libcephfs.so.2.0.0       #cephfs驱动文件
[root@ceph1 ~]#

# Linux系统对CephFS有很好的支持,可以直接mount,操作时用自己查出来的key!!
[root@client ~]# cat /etc/ceph/ceph.client.admin.keyring 
[client.admin]
        key = AQAVfwtmJmI/CRAAKg1mVOsRIHcTvQckllYZsA==
        ...
[root@client ~]# mkdir /mydata
[root@client ~]# mount -t ceph 192.168.88.11:/ /mydata \
> -o name=admin,secret=AQAVfwtmJmI/CRAAKg1mVOsRIHcTvQckllYZsA==
[root@client ~]# df -hT | grep ceph
192.168.88.11:/ ceph       57G     0   57G   0% /mydata
[root@client ~]# umount /mydata

  • 多文件系统挂载(扩展实验,自行练习)


## 测试多文件系统挂载

# 创建新的文件系统
[root@ceph1 ~]# ceph osd  pool create data02 64
[root@ceph1 ~]# ceph osd  pool create metadata02 64
[root@ceph1 ~]# ceph fs new myfs02 metadata02 data02
new fs with metadata pool 6 and data pool 5
[root@ceph1 ~]# ceph fs ls
name: myfs01, metadata pool: metadata01, data pools: [data01 ]
name: myfs02, metadata pool: metadata02, data pools: [data02 ]
[root@ceph1 ~]# 

# 客户端测试
[root@client ~]# ls /mydata/
[root@client ~]# mount -t ceph 192.168.88.11:/ /mydata -o name=admin,secret=AQAVfwtmJmI/CRAAKg1mVOsRIHcTvQckllYZsA==,fs=myfs01
[root@client ~]# cp /etc/hosts /mydata/
[root@client ~]# ls /mydata/
hosts
[root@client ~]# umount /mydata 

[root@client ~]# ls /mydata/
[root@client ~]# mount -t ceph 192.168.88.11:/ /mydata -o name=admin,secret=AQAVfwtmJmI/CRAAKg1mVOsRIHcTvQckllYZsA==,fs=myfs02
[root@client ~]# ls /mydata/
[root@client ~]# cp /etc/passwd /mydata/
[root@client ~]# ls /mydata/
passwd
[root@client ~]# umount /mydata 

Ceph对象存储

对象存储
  • 概念
  • 对象存储(Object Storage)是一种用于存储大量非结构化数据的架构模型
  • 它使用简单的HTTP或HTTPS协议进行文件访问,而不是传统的文件系统API
  • 与传统的文件系统存储方式不同,对象存储不是将数据存储在目录或文件夹中,而是将数据存储为独立的数据对象,每个对象都包含数据本身、元数据(描述数据的属性)以及唯一标识符
  • Ceph对象存储
  • RGW(RADOWS GATEWAY)
  • RGW是Ceph对象存储的网关
  • 用于向客户端应用呈现存储界面
  • 提供RESTful API访问接口
Ceph-rgw配置及使用
  • 启动rgw服务


## 集群配置RGW网关

[root@ceph1 ~]# ceph -s
  cluster:
    id:     2ca9f32a-f0a3-11ee-83c6-52540081f933
    health: HEALTH_OK
    ...
[root@ceph1 ~]# ceph orch apply \
> rgw myrgw --placement="3 ceph1 ceph2 ceph3" \
> --port 8080
Scheduled rgw.myrgw update...
[root@ceph1 ~]# ceph orch ls
NAME           PORTS        RUNNING  REFRESHED  AGE  PLACEMENT                  
alertmanager   ?:9093,9094      1/1  7m ago     6h   count:1                    
crash                           3/3  8m ago     6h   *                          
grafana        ?:3000           1/1  7m ago     6h   count:1                    
mds.myfs01                      3/3  8m ago     31m  ceph1;ceph2;ceph3;count:3  
mgr                             3/3  8m ago     3h   ceph1;ceph2;ceph3;count:3  
mon                             3/3  8m ago     3h   ceph1;ceph2;ceph3;count:3  
node-exporter  ?:9100           3/3  8m ago     6h   *                          
osd                               9  8m ago     -    <unmanaged>                
prometheus     ?:9095           1/1  7m ago     6h   count:1                    
rgw.myrgw      ?:8080           3/3  -          8s   ceph1;ceph2;ceph3;count:3  
[root@ceph1 ~]# ceph orch ps --daemon-type=rgw
NAME                    HOST   PORTS   STATUS        REFRESHED  AGE  MEM USE  MEM LIM  VERSION  IMAGE ID      CONTAINER ID  
rgw.myrgw.ceph1.crmbzg  ceph1  *:8080  running (4m)    48s ago   4m    92.5M        -  17.2.5   cc65afd6173a  e031e8b48d62  
rgw.myrgw.ceph2.egxwux  ceph2  *:8080  running (5m)    65s ago   4m    96.2M        -  17.2.5   cc65afd6173a  4e607d332229  
rgw.myrgw.ceph3.lzissq  ceph3  *:8080  running (3m)    66s ago   3m    97.1M        -  17.2.5   cc65afd6173a  9b7dd5baf2fc  
[root@ceph1 ~]# 

  • 客户端使用rgw
  • ceph对象存储提供了一个与亚马逊S3(Amazon Simple Storage Service)兼容的接口
  • 在S3中,对象被存储在一个称作桶(bucket)的器皿中。这就好像是本地文件存储在目录中一样

## 客户端使用s3cmd或者awscli工具

# 创建对象存储用户
[root@client ~]# radosgw-admin user create \
> --uid=testuser --display-name="Test User" \
> --email=test@tedu.cn \
> --access-key=12345 --secret-key=67890
[root@client ~]# radosgw-admin user info --uid=testuser

# 安装Amazon S3 cli工具
[root@client ~]# yum -y install awscli.noarch

# 配置s3 cli工具
[root@client ~]# aws configure --profile=ceph
AWS Access Key ID [None]: 12345         #输入access_key
AWS Secret Access Key [None]: 67890     #输入secret_key
Default region name [None]: #回车
Default output format [None]: #回车
[root@client ~]# cat /root/.aws/config 
[profile ceph]
[root@client ~]# cat /root/.aws/credentials 
[ceph]
aws_access_key_id = 12345
aws_secret_access_key = 67890
[root@client ~]# 

# 创建桶
[root@client ~]# aws --profile=ceph --endpoint=http://ceph1:8080 s3 mb s3://testbucket
make_bucket: testbucket

# 测试文件上传下载
[root@client ~]# aws --profile=ceph --endpoint=http://ceph1:8080 --acl=public-read-write s3 cp /etc/hosts s3://testbucket/hosts.txt
upload: ../etc/hosts to s3://testbucket/hosts.txt    

[root@client ~]# aws --profile=ceph --endpoint=http://ceph1:8080 s3 ls s3://testbucket/
2024-04-02 18:05:34        321 hosts.txt

[root@client ~]# curl http://ceph1:8080/testbucket/hosts.txt
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
# BEGIN ANSIBLE MANAGED BLOCK
192.168.88.10 client
192.168.88.11 ceph1
192.168.88.12 ceph2
192.168.88.13 ceph3
192.168.88.240  quay.io
# END ANSIBLE MANAGED BLOCK

[root@client ~]# yum -y install wget
[root@client ~]# wget -O zhuji.txt http://ceph1:8080/testbucket/hosts.txt
[root@client ~]# cat zhuji.txt 

  • Ceph对象存储应用(扩展实验,自行练习)


## 使用Ceph存储图片
[root@server1 ~]# scp /etc/skel/Pictures/Wallpapers/desktop.jpg root@192.168.88.10:/root

[root@client ~]# ls desktop.jpg 
desktop.jpg
[root@client ~]# aws --profile=ceph --endpoint=http://ceph1:8080 --acl=public-read-write s3 cp /root/desktop.jpg s3://testbucket/
desktop.jpg
upload: ./desktop.jpg to s3://testbucket/desktop.jpg             
[root@client ~]# 

[root@client ~]# yum -y install nginx
[root@client ~]# systemctl start nginx.service 
[root@client ~]# vim /usr/share/nginx/html/index.html 
<html lang="zh-CN">  
<head>  
    <meta charset="UTF-8">  
    <title>我的简单页面</title>  
</head>  
<body>  
    <h1>测试Ceph对象存储</h1>  
    <img src="http://192.168.88.11:8080/testbucket/desktop.jpg">
</body>  
</html>
[root@client ~]# 

ceph存储方式_Ceph文件存储_02

Ceph-Dashboard

  • 通过浏览器访问https://192.168.88.11:8443,用户名为admin,密码是安装时指定的123456
举报

相关推荐

0 条评论