IP | hostname | CPU | 内存 | 硬盘 | 说明 |
---|---|---|---|---|---|
192.168.187.156 | monitor1 | 2核 | 2G | 20G | 管理节点,监控节点 |
192.168.187.157 | monitor2 | 2核 | 2G | 20G | 监控节点 |
192.168.187.158 | monitor3 | 2核 | 2G | 20G | 监控节点 |
192.168.187.159 | osd1 | 2核 | 2G | 20G | osd节点,也就是对象存储节点 |
192.168.187.160 | osd2 | 2核 | 2G | 20G | osd节点,也就是对象存储节点 |
各个节点都要操作
hostnamectl set-hostname monitor1
hostnamectl set-hostname monitor2
hostnamectl set-hostname monitor3
hostnamectl set-hostname osd1
hostnamectl set-hostname osd2
cat >> /etc/hosts <
在 monitor1上操作
cd
ssh-keygen -t rsa
一直回车就可以
ssh-copy-id -i .ssh/id_rsa.pub root@monitor2
ssh-copy-id -i .ssh/id_rsa.pub root@monitor3
ssh-copy-id -i .ssh/id_rsa.pub root@osd1
ssh-copy-id -i .ssh/id_rsa.pub root@osd2
yum install -y wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel autoconf automake zlib-devel python-devel epel-release lrzsz openssh-server
yum install -y deltarpm
所有节点节点操作
# 停止firewalld服务
systemctl stop firewalld.service
# 禁用firewalld服务
systemctl disable firewalld.service
# 查看状态
systemctl status firewalld.service
除非你要用iptables,否则可以不用装,我这里是没有安装。
yum install -y iptables-services
# 停止iptables服务 并 禁用这个服务
service iptables stop && systemctl disable iptables
# 查看状态
service iptables status
ntpdate cn.pool.ntp.org
# 启动ntpd,并且设置开机自启动
systemctl start ntpd && systemctl enable ntpd
ntpdate monitor1
crontab -e
* */1 * * * /usr/sbin/ntpdate monitor1
yum install -y yum-utils
sudo yum-config-manager --add-repo https://dl.fedoraproject.org/pub/epel/7/x86_64/
sudo yum install --nogpgcheck -y epel-release
sudo rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
sudo rm /etc/yum.repos.d/dl.fedoraproject.org*
vi /etc/yum.repos.d/ceph.repo
内容
[Ceph]
name=Ceph packages for $basearch
baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/x86_64/
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
[Ceph-noarch]
name=Ceph noarch packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/noarch/
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/SRPMS/
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
yum update -y
yum install -y ceph-deploy
yum install -y yum-plugin-priorities
yum install -y ceph
在 monitor1 上操作
用于保存ceph-deploy生成的配置文件信息
mkdir -p /root/ceph-deploy
# 进入目录
cd /root/ceph-deploy
# 执行ceph-deploy,生成的配置文件信息
ceph-deploy new monitor1 monitor2 monitor3
vi ceph.conf
内容
[global]
fsid = c9b9252e-b3e5-4ffb-84b5-3a5d68effefe
mon_initial_members = monitor1, monitor2, monitor3
mon_host = 192.168.187.156,192.168.187.157,192.168.187.158
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
# 新增,把默认副本数从3改成2,也就是只有2个osd
osd pool default size = 2
因为我们环境中有2个OSD,Ceph模式的默认osd副本个数为3,因此需要修改为2
# 进入目录
cd /root/ceph-deploy
ceph-deploy install monitor1 monitor2 monitor3 osd1 osd2
ceph-deploy mon create-initial
ll
可以看到生成很多密钥
ceph.bootstrap-mds.keyring # MDS启动key
ceph.bootstrap-mgr.keyring
ceph.bootstrap-osd.keyring # OSD启动key
ceph.bootstrap-rgw.keyring
ceph.client.admin.keyring # 管理员key
ceph.conf
ceph-deploy-ceph.log
ceph.mon.keyring
mkdir /var/local/osd1
chmod 777 /var/local/osd1/
# 进入目录
cd /root/ceph-deploy
ceph-deploy osd prepare osd1:/var/local/osd1
ceph-deploy osd prepare osd2:/var/local/osd1
# 进入目录
cd /root/ceph-deploy
ceph-deploy osd activate osd1:/var/local/osd1
ceph-deploy osd activate osd2:/var/local/osd1
# 进入目录
cd /root/ceph-deploy
# 把秘钥文件拷贝到管理节点和ceph节点
ceph-deploy admin monitor1 monitor2 monitor3 osd1 osd2
# 给秘钥文件 设置读权限
chmod +r /etc/ceph/ceph.client.admin.keyring
ceph health
HEALTH_OK,说明ceph osd添加成功,可以正常使用
# 创建一个pool,名称是testpool
ceph osd pool create testpool 256
# 查看都有哪些pool池
ceph osd lspools
# 在testpool中创建rbd,名称是myrbd,大小为10240
rbd create testpool/myrbd --size 10240
# 需要禁用,否则挂载不成功
rbd feature disable testpool/myrbd object-map fast-diff deep-flatten
# 查看块设备
rbd map testpool/myrbd
# 创建目录
mkdir /mnt/firstrbd
# 格式为xfs文件系统
mkfs.xfs /dev/rbd0
# 挂载
mount /dev/rbd0 /mnt/firstrbd