- [root@ceph1 ~]# ll /dev/sda
- brw-rw---- 1 root disk 8, 0 Dec 12 13:15 /dev/sda
- # b表示block,块设备
-
- [root@ceph1 ~]# ll /dev/tty
- crw-rw-rw- 1 root tty 5, 0 Dec 12 13:31 /dev/tty
- # c表示character,字符设备
- # 查看存储池。默认有一个名为.mgr的存储池,编号为1
- [ceph: root@ceph1 /]# ceph osd lspools
- 1 .mgr
-
- # 查看存储详细使用情况
- [ceph: root@ceph1 /]# ceph df
- --- RAW STORAGE ---
- CLASS SIZE AVAIL USED RAW USED %RAW USED
- hdd 180 GiB 180 GiB 187 MiB 187 MiB 0.10
- TOTAL 180 GiB 180 GiB 187 MiB 187 MiB 0.10
-
- --- POOLS ---
- POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
- .mgr 1 1 449 KiB 2 449 KiB 0 57 GiB
-
- # 查看.mgr存储池的副本数量
- [ceph: root@ceph1 /]# ceph osd pool get .mgr size
- size: 3
- # 不指定存储池名字执行查看操作。提示名为rbd的存储池不存在
- [ceph: root@ceph1 /]# rbd ls
- rbd: error opening default pool 'rbd'
- Ensure that the default pool has been created or specify an alternate pool name.
- rbd: listing images failed: (2) No such file or directory
- # 1. 创建名为rbd的存储池
- [ceph: root@ceph1 /]# ceph osd pool create rbd 100
- pool 'rbd' created
-
- # 2. 设置rbd存储池的应用类型是rbd。还可以是rgw或cephfs
- # 语法:ceph osd pool application enable <pool-name> <app-name>
- [ceph: root@ceph1 /]# ceph osd pool application enable rbd rbd
-
- # 3. 查看
- [ceph: root@ceph1 /]# ceph osd pool ls
- .mgr
- rbd
- [ceph: root@ceph1 /]# ceph df
- --- RAW STORAGE ---
- CLASS SIZE AVAIL USED RAW USED %RAW USED
- hdd 180 GiB 180 GiB 191 MiB 191 MiB 0.10
- TOTAL 180 GiB 180 GiB 191 MiB 191 MiB 0.10
-
- --- POOLS ---
- POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
- .mgr 1 1 897 KiB 2 2.6 MiB 0 57 GiB
- rbd 2 99 0 B 0 0 B 0 57 GiB
-
- # 4. 执行命令。不指定存储池,默认操作名为rbd的存储池。
- [ceph: root@ceph1 /]# rbd ls # 无输出内容,也不会报错
- # 1. 查看rbd存储池中有哪些镜像
- [ceph: root@ceph1 /]# rbd ls
-
- # 2. 创建名为img1的镜像,大小10GB
- [ceph: root@ceph1 /]# rbd create img1 --size 10G
-
- # 3. 查看存储池中有哪些镜像
- [ceph: root@ceph1 /]# rbd ls
- img1
-
- # 4. 查看镜像详情
- [ceph: root@ceph1 /]# rbd info img1
- rbd image 'img1':
- size 10 GiB in 2560 objects
- ...略...
-
- # 5. 扩容。容量只是承诺大小,并不会立即分配全部空间,所以值可以超过总容量。
- [ceph: root@ceph1 /]# rbd resize img1 --size 200G
- Resizing image: 100% complete...done.
- [ceph: root@ceph1 /]# rbd info img1
- rbd image 'img1':
- size 200 GiB in 51200 objects
- ...略...
-
- # 6. 删除镜像
- [ceph: root@ceph1 /]# rbd rm img1
- Removing image: 100% complete...done.
客户端使用ceph块存储需要解决的问题:
- # 1. 拷贝/linux-soft/s2/zzg/ceph_soft/cephclient-rpm/目录内所有rpm包到pubserver的/var/ftp/rpms目录
- # 2. 更新yum仓库
- [root@pubserver ~]# createrepo /var/ftp/rpms/
-
- # 3. 安装ceph客户端软件
- [root@client1 ~]# yum install -y ceph-common
-
- # 4. 将ceph1上的配置文件和密钥keyring文件拷贝给客户端
- [root@ceph1 ceph_soft]# scp /etc/ceph/ceph.client.admin.keyring /etc/ceph/ceph.conf 192.168.88.10:/etc/ceph/
-
- # 5. 在客户端验证是否可以操作ceph
- [root@client1 ~]# rbd create img1 --size 10G
- [root@client1 ~]# rbd ls
- img1
- [root@client1 ~]# rbd info img1
- rbd image 'img1':
- size 10 GiB in 2560 objects
- ...略...
-
- # 6. 将ceph镜像映射为本地硬盘
- [root@client1 ~]# rbd map img1
- /dev/rbd0 # rbd为固定名称,0是编号
-
- # 7. 查看
- [root@client1 ~]# lsblk
- NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
- sda 8:0 0 60G 0 disk
- └─sda1 8:1 0 60G 0 part /
- sr0 11:0 1 10.5G 0 rom
- rbd0 253:0 0 10G 0 disk # rbd0来自于ceph镜像
-
- [root@client1 ~]# rbd showmapped # 镜像img1映射为了本地硬盘rbd0
- id pool namespace image snap device
- 0 rbd img1 - /dev/rbd0
-
- # 8. 应用
- [root@client1 ~]# mkdir /data
- [root@client1 ~]# mkfs.xfs /dev/rbd0
- [root@client1 ~]# mount /dev/rbd0 /data/
- [root@client1 ~]# df -h /data/
- Filesystem Size Used Avail Use% Mounted on
- /dev/rbd0 10G 105M 9.9G 2% /data
- [root@client1 ~]# cp /etc/hosts /data/
- [root@client1 ~]# ls /data/
- hosts
- # 查看img1的状态
- [root@client1 ~]# rbd status img1
-
- # 按以下步骤删除img1
- [root@client1 ~]# umount /dev/rbd0
- [root@client1 ~]# rbd unmap img1
- [root@client1 ~]# rbd rm img1
- Removing image: 100% complete...done.
- # 1. 在rbd存储池中创建10GB的镜像,名为img1
- [root@client1 ~]# rbd --help # 查看子命令
- [root@client1 ~]# rbd help create # 查看子命令create的帮助
- [root@client1 ~]# rbd create img1 --size 10G
- [root@client1 ~]# rbd list
- img1
- [root@client1 ~]# rbd info img1
- rbd image 'img1':
- size 10 GiB in 2560 objects
- order 22 (4 MiB objects)
- snapshot_count: 0
- id: fa91208bfdaf
- block_name_prefix: rbd_data.fa91208bfdaf
- format: 2
- features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
- op_features:
- flags:
- create_timestamp: Sat Dec 17 10:44:17 2022
- access_timestamp: Sat Dec 17 10:44:17 2022
- modify_timestamp: Sat Dec 17 10:44:17 2022
-
-
- # 2. 在客户端使用镜像img1,将其挂载到/mnt
- [root@client1 ~]# rbd list
- img1
- [root@client1 ~]# rbd map img1
- /dev/rbd0
- [root@client1 ~]# mkfs.xfs /dev/rbd0
- [root@client1 ~]# mount /dev/rbd0 /mnt/
- [root@client1 ~]# rbd showmapped
- id pool namespace image snap device
- 0 rbd img1 - /dev/rbd0
- [root@client1 ~]# df -h /mnt/
- Filesystem Size Used Avail Use% Mounted on
- /dev/rbd0 10G 105M 9.9G 2% /mnt
-
-
- # 3. 向/mnt中写入数据
- [root@client1 ~]# cp /etc/hosts /mnt/
- [root@client1 ~]# cp /etc/passwd /mnt/
- [root@client1 ~]# ls /mnt/
- hosts passwd
-
- # 4. 创建img1的快照,名为img1-sn1
- [root@client1 ~]# rbd snap create img1 --snap img1-sn1
- Creating snap: 100% complete...done.
- [root@client1 ~]# rbd snap ls img1
- SNAPID NAME SIZE PROTECTED TIMESTAMP
- 4 img1-sn1 10 GiB Sat Dec 17 10:46:07 2022
-
- # 5. 删除/mnt/中的数据
- [root@client1 ~]# rm -f /mnt/*
-
- # 6. 通过快照还原数据
- [root@client1 ~]# umount /mnt/
- [root@client1 ~]# rbd unmap /dev/rbd0
- [root@client1 ~]# rbd help snap rollback # 查看子命令帮助
- # 回滚img1到快照img1-sn1
- [root@client1 ~]# rbd snap rollback img1 --snap img1-sn1
- # 重新挂载
- [root@client1 ~]# rbd map img1
- /dev/rbd0
- [root@client1 ~]# mount /dev/rbd0 /mnt/
- [root@client1 ~]# ls /mnt/ # 数据还原完成
- hosts passwd
- [root@client1 ~]# rbd help snap protect
- # 保护镜像img1的快照img1-sn1
- [root@client1 ~]# rbd snap protect img1 --snap img1-sn1
- [root@client1 ~]# rbd snap rm img1 --snap img1-sn1 # 不能删
- # 1. 取消对快照的保护
- [root@client1 ~]# rbd snap unprotect img1 --snap img1-sn1
-
- # 2. 删除快照
- [root@client1 ~]# rbd snap rm img1 --snap img1-sn1
-
- # 3. 卸载块设备
- [root@client1 ~]# umount /dev/rbd0
-
- # 4. 取消映射
- [root@client1 ~]# rbd unmap img1
-
- # 5. 删除镜像
- [root@client1 ~]# rbd rm img1
- # 1. 创建名为img2的镜像,大小10GB
- [root@client1 ~]# rbd create img2 --size 10G
-
- # 2. 向镜像中写入数据
- [root@client1 ~]# rbd map img2
- /dev/rbd0
- [root@client1 ~]# mkfs.xfs /dev/rbd0
- [root@client1 ~]# mount /dev/rbd0 /mnt/
- [root@client1 ~]# for i in {1..20}
- > do
- > echo "Hello World $i" > /mnt/file$i.txt
- > done
- [root@client1 ~]# ls /mnt/
- file10.txt file15.txt file1.txt file5.txt
- file11.txt file16.txt file20.txt file6.txt
- file12.txt file17.txt file2.txt file7.txt
- file13.txt file18.txt file3.txt file8.txt
- file14.txt file19.txt file4.txt file9.txt
-
- # 3. 卸载镜像
- [root@client1 ~]# umount /mnt/
- [root@client1 ~]# rbd unmap img2
-
- # 4. 为img2创建名为img2-sn1快照
- [root@client1 ~]# rbd snap create img2 --snap img2-sn1
-
- # 5. 保护img2-sn1快照
- [root@client1 ~]# rbd snap protect img2 --snap img2-sn1
-
- # 6. 通过受保护的快照img2-sn1创建克隆镜像
- [root@client1 ~]# rbd clone img2 --snap img2-sn1 img2-sn1-1
- [root@client1 ~]# rbd clone img2 --snap img2-sn1 img2-sn1-2
- # 7. 查看创建出来的、克隆的镜像
- [root@client1 ~]# rbd ls
- img2
- img2-sn1-1
- img2-sn1-2
-
- # 8. 不同的客户端挂载不同的克隆镜像,看到的是相同的数据
- [root@client1 ~]# rbd map img2-sn1-1
- /dev/rbd0
- [root@client1 ~]# mkdir /data
- [root@client1 ~]# mount /dev/rbd0 /data
- [root@client1 ~]# ls /data
- file10.txt file15.txt file1.txt file5.txt
- file11.txt file16.txt file20.txt file6.txt
- file12.txt file17.txt file2.txt file7.txt
- file13.txt file18.txt file3.txt file8.txt
- file14.txt file19.txt file4.txt file9.txt
-
- [root@ceph1 ~]# yum install -y ceph-common
- [root@ceph1 ~]# rbd map img2-sn1-2
- /dev/rbd0
- [root@ceph1 ~]# mkdir /data
- [root@ceph1 ~]# mount /dev/rbd0 /data/
- [root@ceph1 ~]# ls /data/
- file10.txt file15.txt file1.txt file5.txt
- file11.txt file16.txt file20.txt file6.txt
- file12.txt file17.txt file2.txt file7.txt
- file13.txt file18.txt file3.txt file8.txt
- file14.txt file19.txt file4.txt file9.txt
- # 查看快照信息
- [root@client1 ~]# rbd info img2 --snap img2-sn1
- rbd image 'img2':
- size 10 GiB in 2560 objects
- order 22 (4 MiB objects)
- snapshot_count: 1
- id: d46eed84bb61
- block_name_prefix: rbd_data.d46eed84bb61
- format: 2
- features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
- op_features:
- flags:
- create_timestamp: Sat Dec 17 10:58:05 2022
- access_timestamp: Sat Dec 17 10:58:05 2022
- modify_timestamp: Sat Dec 17 10:58:05 2022
- protected: True # 受保护
-
- # 查看克隆的快照
- [root@client1 ~]# rbd info img2-sn1-2
- rbd image 'img2-sn1-2':
- size 10 GiB in 2560 objects
- order 22 (4 MiB objects)
- snapshot_count: 0
- id: d48fe3d6559e
- block_name_prefix: rbd_data.d48fe3d6559e
- format: 2
- features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
- op_features:
- flags:
- create_timestamp: Sat Dec 17 10:59:53 2022
- access_timestamp: Sat Dec 17 10:59:53 2022
- modify_timestamp: Sat Dec 17 10:59:53 2022
- parent: rbd/img2@img2-sn1 # 父对象是rbd池中img2镜像的img2-sn1快照
- overlap: 10 GiB
合并父子镜像
- # 把img2的数据合并到子镜像img2-sn1-2中
- [root@client1 ~]# rbd flatten img2-sn1-2
- # 查看状态,它就没有父镜像了
- [root@client1 ~]# rbd info img2-sn1-2
- rbd image 'img2-sn1-2':
- size 10 GiB in 2560 objects
- order 22 (4 MiB objects)
- snapshot_count: 0
- id: d48fe3d6559e
- block_name_prefix: rbd_data.d48fe3d6559e
- format: 2
- features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
- op_features:
- flags:
- create_timestamp: Sat Dec 17 10:59:53 2022
- access_timestamp: Sat Dec 17 10:59:53 2022
- modify_timestamp: Sat Dec 17 10:59:53 2022
-
- # 删除父镜像,如果镜像正在被使用,则先取消
- [root@client1 ~]# umount /data/
- [root@client1 ~]# rbd unmap img2-sn1-1
- # 1. 删除镜像img2-sn1-1
- [root@client1 ~]# rbd rm img2-sn1-1
- # 2. 取消img2-sn1的保护
- [root@client1 ~]# rbd snap unprotect img2 --snap img2-sn1
- # 3. 删除img2-sn1快照
- [root@client1 ~]# rbd snap rm img2 --snap img2-sn1
- # 4. 删除img2
- [root@client1 ~]# rbd rm img2
-
- # 因为img2-sn1-2已经是独立的镜像了,所以它还可以使用
- # ceph1上的镜像没有受到影响
- [root@ceph1 ~]# cat /data/file1.txt
- Hello World 1
- # 1. 准备镜像
- [root@client1 ~]# rbd create img1 --size 10G
- [root@client1 ~]# rbd map img1
- /dev/rbd0
- [root@client1 ~]# mkfs.xfs /dev/rbd0
-
- # 2. 设置开机自动挂载
- [root@client1 ~]# vim /etc/ceph/rbdmap # 指定要挂载的镜像及用户名、密钥
- rbd/img1 id=admin,keyring=/etc/ceph/ceph.client.admin.keyring
- [root@client1 ~]# vim /etc/fstab # 追加
- /dev/rbd/rbd/img1 /data xfs noauto 0 0
- # noauto的意思是,等rbdmap服务启动后,再执行挂载
-
- # 3. 启动rbdmap服务
- [root@client1 ~]# systemctl enable rbdmap --now
-
- # 4. reboot后查看结果
- [root@client1 ~]# df -h /data/
- Filesystem Size Used Avail Use% Mounted on
- /dev/rbd0 10G 105M 9.9G 2% /data
元数据就是描述数据的属性。如属主、属组、权限等。
ceph文件系统中,数据和元数据是分开存储的
新建存储池
创建ceph文件系统
- # 1. 新建一个名为data1的存储池,目的是存储数据,有100个PG
- [root@client1 ~]# ceph osd pool create data01 100
-
- # 2. 新建一个名为metadata1的存储池,目的是存储元数据
- [root@client1 ~]# ceph osd pool create metadata01 100
-
- # 3. 创建名为myfs1的cephfs,数据保存到data1中,元数据保存到metadata1中
- [root@client1 ~]# ceph fs new myfs01 metadata01 data01
-
- # 4. 查看存储池
- [root@client1 ~]# ceph osd lspools
- 1 .mgr
- 2 rbd
- 3 data01
- 4 metadata01
- [root@client1 ~]# ceph df
- --- RAW STORAGE ---
- CLASS SIZE AVAIL USED RAW USED %RAW USED
- hdd 180 GiB 180 GiB 206 MiB 206 MiB 0.11
- TOTAL 180 GiB 180 GiB 206 MiB 206 MiB 0.11
-
- --- POOLS ---
- POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
- .mgr 1 1 449 KiB 2 1.3 MiB 0 57 GiB
- rbd 2 32 7.1 MiB 43 22 MiB 0.01 57 GiB
- data01 3 94 0 B 0 0 B 0 57 GiB
- metadata01 4 94 0 B 0 0 B 0 57 GiB
-
- # 5. 查看文件系统
- [root@client1 ~]# ceph fs ls
- name: myfs01, metadata pool: metadata01, data pools: [data01 ]
-
- # 6. 启动MDS服务
- [root@client1 ~]# ceph orch apply mds myfs01 --placement="2 ceph1 ceph2"
-
- # 7. 查看部署结果
- [root@client1 ~]# ceph -s
- cluster:
- id: a4b69ab4-79dd-11ed-ae7b-000c2953b002
- health: HEALTH_OK
-
- services:
- mon: 3 daemons, quorum ceph1,ceph3,ceph2 (age 92m)
- mgr: ceph1.gmqorm(active, since 92m), standbys: ceph3.giqaph
- mds: 1/1 daemons up, 1 standby # mds服务信息
- osd: 9 osds: 9 up (since 92m), 9 in (since 4d)
- ...略...
- # 挂载文件系统需要密码。查看密码
- [root@client1 ~]# cat /etc/ceph/ceph.client.admin.keyring
- [client.admin]
- key = AQBmhINh1IZjHBAAvgk8m/FhyLiH4DCCrnrdPQ==
-
- # -t 指定文件系统类型。-o是选项,提供用户名和密码
- [root@client1 ~]# mkdir /mydata
- [root@client1 ~]# mount.ceph 192.168.88.13:/ /mydata -o name=admin,secret=AQC5u5ZjnTA1ERAAruLAI8F1W1nyOgxZSx0UXw==
- [root@client1 ~]# df -h /mydata/
- Filesystem Size Used Avail Use% Mounted on
- 192.168.88.13:/ 57G 0 57G 0% /mydata
- # 1. 在ceph1/ceph2上部署rgw服务,名为myrgw
- [root@client1 ~]# ceph orch apply rgw myrgw --placement="2 ceph1 ceph2" --port 8080
-
- [root@client1 ~]# ceph -s
- cluster:
- id: a4b69ab4-79dd-11ed-ae7b-000c2953b002
- health: HEALTH_OK
-
- services:
- mon: 3 daemons, quorum ceph1,ceph3,ceph2 (age 101m)
- mgr: ceph1.gmqorm(active, since 6h), standbys: ceph3.giqaph
- mds: 1/1 daemons up, 1 standby
- osd: 9 osds: 9 up (since 6h), 9 in (since 5d); 1 remapped pgs
- rgw: 2 daemons active (2 hosts, 1 zones) # rgw信息
- ...略...
- # 1. 安装amazon S3 cli工具(客户端工具)
- [root@client1 ~]# yum install -y awscli
-
- # 2. 在ceph中创建一个用户
- [root@client1 ~]# radosgw-admin user create --uid=testuser --display-name="Test User" --email=test@tedu.cn --access-key=12345 --secret=67890
-
- # 3. 初始化客户端
- [root@client1 ~]# aws configure --profile=ceph
- AWS Access Key ID [None]: 12345
- AWS Secret Access Key [None]: 67890
- Default region name [None]: # 回车
- Default output format [None]: # 回车
-
- # 4. 创建名为testbucket的bucket,用于存储数据
- [root@client1 ~]# vim /etc/hosts # 添加以下内容
- 192.168.88.11 ceph1
- 192.168.88.12 ceph2
- 192.168.88.13 ceph3
- [root@client1 ~]# aws --profile=ceph --endpoint=http://ceph1:8080 s3 mb s3://testbucket
-
- # 5. 上传文件
- [root@client1 ~]# aws --profile=ceph --endpoint=http://ceph1:8080 --acl=public-read-write s3 cp /etc/hosts s3://testbucket/hosts.txt
-
- # 6. 查看bucket中的数据
- [root@client1 ~]# aws --profile=ceph --endpoint=http://ceph1:8080 s3 ls s3://testbucket
- 2022-12-17 17:05:58 241 hosts.txt
-
- # 7. 下载数据
- [root@client1 ~]# wget -O zhuji http://ceph1:8080/testbucket/hosts.txt
https://192.168.88.11:8443
,用户名为admin,密码是安装时指定的123456。