OpenStack學習系列之十二:安裝ceph並對接OpenStack

    Ceph 是一種為優秀的性能、可靠性和可擴展性而設計的統一的、分散式文件系統。Ceph 的統一體現在可以提供文件系統、塊存儲和對象存儲,分散式體現在可以動態擴展。在中國一些公司的雲環境中,通常會採用 Ceph 作為 OpenStack 的唯一後端存儲來提高數據轉發效率。

1.安裝ceph(版本:nautilus)

# 在node1、node2、node3上安裝ceph並組成集群
yum -y install ceph

監視器monitor(node1上操作)

    在node1上提供配置文件ceph.conf
[root@node1 ~]# cat /etc/ceph/ceph.conf
[global]
fsid = a7f64266-0894-4f1e-a635-d0aeaca0e993
mon initial members = node1
mon host = 192.168.31.101
public_network = 192.168.31.0/24
cluster_network = 172.16.100.0/24
     為集群創建一個秘鑰並生成一個監控秘鑰

ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
    生成管理員秘鑰,生成client.admin用戶並將用戶添加到秘鑰
sudo ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring \
    --gen-key -n client.admin \
    --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'

     生成bootstrap-osd秘鑰,生成client.bootstrap-osd用戶並將該用戶添加到秘鑰

sudo ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring \
    --gen-key -n client.bootstrap-osd \
    --cap mon 'profile bootstrap-osd' --cap mgr 'allow r'
    將生成的秘鑰添加到ceph.mon.keyring
sudo ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
sudo ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring
sudo chown ceph:ceph /tmp/ceph.mon.keyring
    使用主機名,主機IP地址和FSID生成監視器映射,將其保存為/tmp/monmap
monmaptool --create --add node1 192.168.31.101 --fsid a7f64266-0894-4f1e-a635-d0aeaca0e993 /tmp/monmap
    在監視器主機上創建默認數據目錄
sudo -u ceph mkdir /var/lib/ceph/mon/ceph-node1

    使用監視器映射和秘鑰填充監視器守護程式

sudo -u ceph ceph-mon --mkfs -i node1 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring

    啟動監視器

sudo systemctl start ceph-mon@node1
sudo systemctl enable ceph-mon@node1
    驗證監視器是否正在運行
ceph -s
    開啟mon的v2版本
ceph mon enable-msgr2
    禁用不安全模式 mon is allowing insecure global_id reclaim
ceph config set mon auth_allow_insecure_global_id_reclaim false
警告:Module ‘restful’ has failed dependency: No module named ‘pecan’,按照下面安裝後重啟mgr或者重啟系統

pip3 install pecan werkzeug

監視器守護程式配置 mgr(node1上操作)

mkdir /var/lib/ceph/mgr/ceph-node1
ceph auth get-or-create mgr.node1 mon 'allow profile mgr' osd 'allow *' mds 'allow *' > \
    /var/lib/ceph/mgr/ceph-node1/keyring
-------------------------------------
systemctl start ceph-mgr@node1 
systemctl enable ceph-mgr@node1 

添加OSD(node1上操作)

這裡使用bluestore後端,減少佔用,比filestore節省空間 
# 在node1上將添加3個100G硬碟加入ceph
ceph-volume lvm create --data /dev/sdb
ceph-volume lvm create --data /dev/sdc
ceph-volume lvm create --data /dev/sdd
添加其它節點上的OSD

# 同步配置到其它節點
scp /etc/ceph/* node2:/etc/ceph/
scp /etc/ceph/* node3:/etc/ceph/
scp /var/lib/ceph/bootstrap-osd/* node2:/var/lib/ceph/bootstrap-osd/
scp /var/lib/ceph/bootstrap-osd/* node3:/var/lib/ceph/bootstrap-osd/
# node2、node3節點上添加OSD
ceph-volume lvm create --data /dev/sdb
ceph-volume lvm create --data /dev/sdc
ceph-volume lvm create --data /dev/sdd

查看狀態

[root@node1 ~]# ceph -s
  cluster:
    id:     a7f64266-0894-4f1e-a635-d0aeaca0e993
    health: HEALTH_OK
 
  services:
    mon: 1 daemons, quorum node1 (age 8m)
    mgr: node1(active, since 7m)
    osd: 9 osds: 9 up (since 5s), 9 in (since 7s)
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   9.0 GiB used, 891 GiB / 900 GiB avail
    pgs: 

擴展監視器(將node2、node3也擴展為監視器)

修改ceph.conf中的mon initial members、mon host、public_network,並同步到其它節點上,
[root@node1 ~]# cat /etc/ceph/ceph.conf 
[global]
fsid = a7f64266-0894-4f1e-a635-d0aeaca0e993
mon initial members = node1,node2,node3
mon host = 192.168.31.101,192.168.31.102,192.168.31.103
public_network = 192.168.31.0/24
cluster_network = 172.16.100.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
------------------------------------# 拷貝文件到其它節點
scp /etc/ceph/ceph.conf  node2:/etc/ceph/
scp /etc/ceph/ceph.conf  node3:/etc/ceph/
在node2、node3節點上添加監視器

-------------------------# 這裡給出node2的操作,node3操作類似,修改對應名字即可
# 獲取集群已有的mon.keyring
ceph auth get mon. -o mon.keyring
# 獲取集群已有的mon.map
ceph mon getmap -o mon.map
# 創建監視器數據目錄,會自動創建/var/lib/ceph/mon/ceph-node2
ceph-mon -i node2 --mkfs --monmap mon.map --keyring mon.keyring
chown ceph.ceph /var/lib/ceph/mon/ceph-node2 -R
# 啟動mon
systemctl start ceph-mon@node2
systemctl enable ceph-mon@node2

-------------------------------------------
# 查看狀態
[root@node1 ~]# ceph -s
  cluster:
    id:     a7f64266-0894-4f1e-a635-d0aeaca0e993
    health: HEALTH_OK
 
  services:
    mon: 3 daemons, quorum node1,node2,node3 (age 2m)
    mgr: node1(active, since 20m)
    osd: 9 osds: 9 up (since 29s), 9 in (since 13m)
 
  task status:
 
  data:
    pools:   0 pools, 0 pgs
    objects: 0 objects, 0 B
    usage:   9.1 GiB used, 891 GiB / 900 GiB avail
    pgs:

安裝Dashboard

 

# node1安裝Dashboard
yum install ceph-mgr-dashboard -y
# 開啟mgr功能
ceph mgr module enable dashboard
# 生成並安裝自簽名的證書
ceph dashboard create-self-signed-cert  
# 創建一個dashboard登錄用戶名密碼(ceph、123456)
echo 123456 > ceph-dashboard-password.txt
ceph dashboard ac-user-create ceph -i ceph-dashboard-password.txt administrator 
# 查看服務訪問方式
[root@node1 ~]# ceph mgr services
{
    "dashboard": "//node1:8443/"
}
訪問地址://node1:8443/,並使用上面添加的帳號密碼ceph、123456登錄

2.OpenStack後端存儲對接ceph

 

---------------------------------------------# OpenStack對接ceph前的準備
# 前三個節點node1,node2,ndoe3是ceph集群,已經安裝組件,所以node4和node5要安裝ceph-common
yum -y install ceph-common

--- 拷貝配置文件,在node1上拷貝到所有nova節點
for i in $(seq 2 5); do scp /etc/ceph/* node$i:/etc/ceph;done

--創建存儲池  64
ceph osd pool create images 64
ceph osd pool create vms 64
ceph osd pool create volumes 64
ceph osd pool application enable images rbd
ceph osd pool application enable vms rbd
ceph osd pool application enable volumes rbd

---配置鑒權
ceph auth get-or-create client.cinder mon 'allow r' \
    osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images' \
    -o /etc/ceph/ceph.client.cinder.keyring
# ceph auth caps client.cinder mon 'allow *' osd 'allow *'    # 開放所有許可權,這裡兩個命令是將上面的命令進行分解
# ceph auth get client.cinder -o /etc/ceph/ceph.client.cinder.keyring   # 將證書導出給服務用
ceph auth get-or-create client.glance mon 'allow r' \
    osd 'allow class-read object_prefix rbd_children, allow rwx pool=images' \
    -o /etc/ceph/ceph.client.glance.keyring
    
---將生成的key拷貝,拷貝到所有nova節點
for i in $(seq 2 5); do scp /etc/ceph/*.keyring node$i:/etc/ceph;done
--- 修改許可權,拷貝到所有nova節點的key文件
for i in $(seq 2 5); do ssh node$i chown glance:glance /etc/ceph/ceph.client.glance.keyring ;done      # 這兩個文件glance和cinder要使用
for i in $(seq 2 5); do ssh node$i chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring;done

---將秘鑰加入到libvcirt中,所有nova節點執行(node2,node3.node4,node5)
ceph auth get-key client.cinder | tee client.cinder.key
uuidgen # 生成uuid,這裡使用文檔中的uuid
cat > secret.xml <<EOF
<secret ephemeral='no' private='no'>
  <uuid>ae3d9d0a-df88-4168-b292-c07cdc2d8f02</uuid>
  <usage type='ceph'>
    <name>client.cinder secret</name>
  </usage>
</secret>
EOF

virsh secret-define --file secret.xml

virsh secret-set-value --secret ae3d9d0a-df88-4168-b292-c07cdc2d8f02 --base64 $(cat client.cinder.key) && rm client.cinder.key secret.xml

---------------------------------------------# OpenStack對接ceph,配置 Glance 節點(node1),對接後需要重新上傳鏡像
# crudini --set /etc/glance/glance-api.conf DEFAULT "show_image_direct_url" "True"
# crudini --set /etc/glance/glance-api.conf glance_store "default_store" "rbd"
# crudini --set /etc/glance/glance-api.conf glance_store "rbd_store_user" "glance"
# crudini --set /etc/glance/glance-api.conf glance_store "rbd_store_pool" "images"
# crudini --set /etc/glance/glance-api.conf glance_store "stores" "glance.store.filesystem.Store, glance.store.http.Store, glance.store.rbd.Store"
# crudini --set /etc/glance/glance-api.conf paste_deploy "flavor" "keystone"

---------------------------------------------# OpenStack對接ceph,配置 Cinder 節點(node4,node5)
crudini --set /etc/cinder/cinder.conf DEFAULT "enabled_backends" "lvm,nfs,ceph"   # 同時支援lvm、nfs、ceph
crudini --set /etc/cinder/cinder.conf ceph "volume_driver" "cinder.volume.drivers.rbd.RBDDriver"
crudini --set /etc/cinder/cinder.conf ceph "volume_backend_name" "ceph"
crudini --set /etc/cinder/cinder.conf ceph "rbd_pool" "volumes"
crudini --set /etc/cinder/cinder.conf ceph "rbd_ceph_conf" "/etc/ceph/ceph.conf"
crudini --set /etc/cinder/cinder.conf ceph "rbd_flatten_volume_from_snapshot" "false"
crudini --set /etc/cinder/cinder.conf ceph "rbd_max_clone_depth" "5"
crudini --set /etc/cinder/cinder.conf ceph "rados_connect_timeout" "-1"
crudini --set /etc/cinder/cinder.conf ceph "glance_api_version" "2"
crudini --set /etc/cinder/cinder.conf ceph "rbd_user" "cinder"
crudini --set /etc/cinder/cinder.conf ceph "rbd_secret_uuid" "ae3d9d0a-df88-4168-b292-c07cdc2d8f02"

---------------------------------------------# OpenStack對接ceph,配置 Nova 節點(node2,node3,node4,node5)
crudini --set /etc/nova/nova.conf libvirt "images_type" "rbd"
crudini --set /etc/nova/nova.conf libvirt "images_rbd_pool" "vms"
crudini --set /etc/nova/nova.conf libvirt "images_rbd_ceph_conf" "/etc/ceph/ceph.conf"
crudini --set /etc/nova/nova.conf libvirt "rbd_user" "cinder"
crudini --set /etc/nova/nova.conf libvirt "rbd_secret_uuid" "ae3d9d0a-df88-4168-b292-c07cdc2d8f02"
crudini --set /etc/nova/nova.conf libvirt "inject_password" "false"
crudini --set /etc/nova/nova.conf libvirt "inject_key" "false"
crudini --set /etc/nova/nova.conf libvirt "inject_partition" "-2"
crudini --set /etc/nova/nova.conf libvirt "live_migration_flag" "VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST"

---------------------------------------------# OpenStack重啟服務
--- 控制節點
systemctl restart openstack-glance-api openstack-nova-api openstack-cinder-api openstack-cinder-scheduler

--- 計算節點
for i in $(seq 2 5); do ssh node$i systemctl restart openstack-nova-compute;done

--- 存儲節點
for i in 4 5; do ssh node$i systemctl restart openstack-cinder-volume;done

---------------------------------------------# OpenStack驗證,在node1節點執行
[root@node1 ~]# cinder type-create ceph
+--------------------------------------+------+-------------+-----------+
| ID                                   | Name | Description | Is_Public |
+--------------------------------------+------+-------------+-----------+
| 228269c3-6008-4e62-9408-a2fb04d74c1a | ceph | -           | True      |
+--------------------------------------+------+-------------+-----------+

[root@node1 ~]# cinder type-key ceph set volume_backend_name=ceph 
--- 對glance、cinder、nova操作後確認 rbd ls images / vms / images

創建一個卷後確認
[root@node1 ~]# openstack volume create  --size 1 --type ceph volume1
+---------------------+--------------------------------------+
| Field               | Value                                |
+---------------------+--------------------------------------+
| attachments         | []                                   |
| availability_zone   | nova                                 |
| bootable            | false                                |
| consistencygroup_id | None                                 |
| created_at          | 2022-03-01T10:22:13.000000           |
| description         | None                                 |
| encrypted           | False                                |
| id                  | 3a0ee405-ad4b-4453-8b66-029aa67f7af0 |
| migration_status    | None                                 |
| multiattach         | False                                |
| name                | volume1                              |
| properties          |                                      |
| replication_status  | None                                 |
| size                | 1                                    |
| snapshot_id         | None                                 |
| source_volid        | None                                 |
| status              | creating                             |
| type                | ceph                                 |
| updated_at          | None                                 |
| user_id             | 5a44718261844cbd8a65621b9e3cea8d     |
+---------------------+--------------------------------------+
[root@node1 ~]# rbd -p volumes ls -l     # 在ceph中查看創建的塊設備
NAME                                        SIZE  PARENT FMT PROT LOCK 
volume-3a0ee405-ad4b-4453-8b66-029aa67f7af0 1 GiB          2 

 

Tags: