最新Ceph L版与openstack Pike对接
阅读原文时间:2023年07月10日阅读:2

安装Ceph luminous

实验环境

三台服务器,每台服务器都有4块硬盘,每台服务器都将自己的第一块硬盘作为系统盘,剩下的做ceph

一、在所有服务器上操作

#使用阿里源

yum install wget -y

rm -f /etc/yum.repos.d/*

wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo

wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo

sed -i '/aliyuncs.com/d' /etc/yum.repos.d/*.repo #删除阿里内网地址

#创建ceph源

echo '#阿里ceph源

[ceph]

name=ceph

baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/x86_64/

gpgcheck=0

[ceph-noarch]

name=cephnoarch

baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch/

gpgcheck=0

[ceph-source]

name=ceph-source

baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS/

gpgcheck=0

#'>/etc/yum.repos.d/ceph.repo

yum clean all && yum makecache #生成缓存

#关闭selinux、防火墙

systemctl stop firewalld.service

systemctl disable firewalld.service

firewall-cmd --state

sed -i '/^SELINUX=.*/c SELINUX=disabled' /etc/selinux/config

sed -i 's/^SELINUXTYPE=.*/SELINUXTYPE=disabled/g' /etc/selinux/config

grep --color=auto '^SELINUX' /etc/selinux/config

setenforce 0

二、只在node1上操作

三台服务器分别为node1 node2 node3   其中我们在node1上面安装ceph官方的自动化部署工具,也就是说我们只需要操作node1即可完成部署

#添加服务器主机名

echo '

10.0.0.11    node1

10.0.0.22    node2

10.0.0.33    node3

10.0.0.44    node4

10.0.0.55    node5

10.0.0.66    node6

'>>/etc/hosts

#创建秘钥

ssh-keygen

ssh-copy node2

ssh-copy node3

#将node1的hosts文件发送到其他服务器上面

scp /etc/hosts node2:/etc

scp /etc/hosts node3:/etc

三、开始在node1上安装ceph

#安装ceph-deploy配置工具

yum install -y ceph-deploy

yum install python-setuptools

#创建配置目录

mkdir /etc/ceph

cd /etc/ceph/

#初始化Mon配置

ceph-deploy new node1

# #修改冗余份数为2,日志大小2G

# #配置网络,单网卡忽略

echo '

public network = 10.0.0.0/24

cluster network = 10.0.0.0/24

mon_clock_drift_allowed = 2    

osd_journal_size = 4086

osd_pool_default_pg_num = 128

osd_pool_default_pgp_num = 128

osd pool default size = 2

osd pool default min size = 1

rbd_default_features = 1

client_quota = true

'>>./ceph.conf

#安装Ceph

ceph-deploy install node1 node2 node3

#yum install -y ceph ceph-radosgw #实际上是安装这2个rpm

#初始化monitor和key

cd /etc/ceph/

ceph-deploy --overwrite-conf mon create-initial

#创建osd

ceph-deploy osd create --data /dev/sdb node1

ceph-deploy osd create --data /dev/sdc node1

ceph-deploy osd create --data /dev/sdd node1

ceph-deploy osd create --data /dev/sdb node2

ceph-deploy osd create --data /dev/sdc node2

ceph-deploy osd create --data /dev/sdd node2

ceph-deploy osd create --data /dev/sdb node3

ceph-deploy osd create --data /dev/sdc node3

ceph-deploy osd create --data /dev/sdd node3

#拷贝配置及密钥

ceph-deploy admin node1 node2 node3

chmod 644 /etc/ceph/ceph.client.admin.keyring

#创建mon

ceph-deploy --overwrite-conf mon create node1

ceph-deploy --overwrite-conf admin node1

#添加mgr

#ceph 12开始,monitor必须添加mgr

ceph-deploy mgr create node1:mon_mgr

#启用dashboard (在node1节点)

ceph mgr module enable dashboard

web访问http://10.0.0.11:7000   即可看到ceph的运行情况

#查看相关命令

ceph health

ceph -s

ceph osd tree

ceph df

ceph mon stat

ceph osd stat

ceph pg stat

ceph osd lspools

ceph auth list

Ceph L版与openstack Pike对接

环境opestack Pike的controller、computer、cinder三个节点已经安装完毕,并且能够正常的使用

node4为controller

node5为computer和cinder

一、环境准备,所有openstack主机都要操作

#所有openstack节点都添加主机名

#添加服务器主机名

echo '

10.0.0.11    node1

10.0.0.22    node2

10.0.0.33    node3

10.0.0.44    node4

10.0.0.55    node5

10.0.0.66    node6

'>>/etc/hosts

#创建ceph源

echo '#阿里ceph源

[ceph]

name=ceph

baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/x86_64/

gpgcheck=0

[ceph-noarch]

name=cephnoarch

baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch/

gpgcheck=0

[ceph-source]

name=ceph-source

baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS/

gpgcheck=0

#'>/etc/yum.repos.d/ceph.repo

yum clean all && yum makecache #生成缓存

#关闭selinux、防火墙

systemctl stop firewalld.service

systemctl disable firewalld.service

firewall-cmd --state

sed -i '/^SELINUX=.*/c SELINUX=disabled' /etc/selinux/config

sed -i 's/^SELINUXTYPE=.*/SELINUXTYPE=disabled/g' /etc/selinux/config

grep --color=auto '^SELINUX' /etc/selinux/config

setenforce 0

#将glance组件的节点安装ceph客户端

yum install python-rbd

#将nova和cinder组件的节点安装ceph客户端

yum install ceph-common

#创建POOL

ceph osd pool create volumes 128

ceph osd pool create images 128

ceph osd pool create vms 128

#初始化POOL

rbd pool init volumes

rbd pool init images

rbd pool init vms

#将ceph的配置文件导入到各个openstack节点上

ssh node4 tee /etc/ceph/ceph.conf </etc/ceph/ceph.conf

ssh node5 tee /etc/ceph/ceph.conf </etc/ceph/ceph.conf

#创建ceph用户和密钥

ceph auth get-or-create client.glance mon 'profile rbd' osd 'profile rbd pool=images'

ceph auth get-or-create client.cinder mon 'profile rbd' osd 'profile rbd pool=volumes, profile rbd pool=vms, profile rbd-read-only pool=images'

ceph auth get-or-create client.cinder-backup mon 'profile rbd' osd 'profile rbd pool=backups'

#查询用户,写入文件

ceph auth get-or-create client.glance | ssh node4  tee /etc/ceph/ceph.client.glance.keyring

ssh node4 chown glance:glance /etc/ceph/ceph.client.glance.keyring

ceph auth get-or-create client.cinder | ssh node5  tee /etc/ceph/ceph.client.cinder.keyring

ssh node5  chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring

ceph auth get-or-create client.cinder-backup | ssh node5  tee /etc/ceph/ceph.client.cinder-backup.keyring

ssh node5  chown cinder:cinder /etc/ceph/ceph.client.cinder-backup.keyring

ceph auth get-key client.cinder | ssh node5 tee client.cinder.key

ceph与glance对接

#更改glance默认存储为ceph

#修改/etc/glance/glance-api.conf文件,添加一下内容

[DEFAULT]

default_store = rbd

show_image_direct_url = True

[glance_store]

#stores = file,http

#default_store = file

#filesystem_store_datadir = /var/lib/glance/images/

stores = rbd

default_store = rbd

rbd_store_pool = images

rbd_store_user = glance

rbd_store_ceph_conf = /etc/ceph/ceph.conf

rbd_store_chunk_size = 8

#重启服务

systemctl restart openstack-glance-api openstack-glance-registry

注意:在glance节点你需要查看/etc/ceph目录下是否有glance秘钥

ceph与nova对接,在computer节点上操作

#密钥加进libvirt

uuidgen

cat > secret.xml <<EOF

  ff2e1190-30f5-4849-9c1c-886b1e1ee181

 

    client.cinder secret

 

EOF

virsh secret-define --file secret.xml

virsh secret-set-value --secret ff2e1190-30f5-4849-9c1c-886b1e1ee181 --base64 $(cat client.cinder.key) && rm client.cinder.key secret.xml

virsh secret-list

UUID                                  用量

--------------------------------------------------------------------------------

ff2e1190-30f5-4849-9c1c-886b1e1ee181  ceph client.cinder secret

在computer节点的ceph配置文件里添加一下内容

ls -l /etc/ceph/

#ceph

echo '

[client]

rbd cache = true

rbd cache writethrough until flush = true

admin socket = /var/run/ceph/guests/$cluster-$type.$id.$pid.$cctid.asok

log file = /var/log/qemu/qemu-guest-$pid.log

rbd concurrent management ops = 20

[client.cinder]

keyring = /etc/ceph/ceph.client.cinder.keyring

'>>/etc/ceph/ceph.conf

mkdir -p /var/run/ceph/guests/ /var/log/qemu/

chown qemu:libvirt /var/run/ceph/guests /var/log/qemu/

#使用ceph存储,在/etc/nova/nova.conf添加以下内容

[libvirt]

virt_type = qemu

images_type = rbd

images_rbd_pool = vms

images_rbd_ceph_conf = /etc/ceph/ceph.conf

rbd_user = cinder

rbd_secret_uuid = 'ff2e1190-30f5-4849-9c1c-886b1e1ee181'

disk_cachemodes="network=writeback"

live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"

libvirt_inject_password = false

libvirt_inject_key = false

libvirt_inject_partition = -2

#重启服务

systemctl restart libvirtd.service openstack-nova-compute.service

ceph与cinder对接

#修改/etc/cinder/cinder.conf配置文件,注意:将之前的lvm的配置都删掉

[DEFAULT]

#enabled_backends = lvm

enabled_backends = ceph

[ceph]

volume_driver = cinder.volume.drivers.rbd.RBDDriver

rbd_pool = volumes

rbd_ceph_conf = /etc/ceph/ceph.conf

rbd_flatten_volume_from_snapshot = false

rbd_max_clone_depth = 5

rbd_store_chunk_size = 4

rados_connect_timeout = -1

glance_api_version = 2

rbd_user = cinder

rbd_secret_uuid = ff2e1190-30f5-4849-9c1c-886b1e1ee181

#重启服务   

systemctl restart openstack-cinder-volume.service