ceph 005 賦權補充 rbd塊映射

我的ceph版本

[root@serverc ~]# ceph -v
ceph version 16.2.0-117.el8cp (0e34bb74700060ebfaa22d99b7d2cdc037b28a57) pacific (stable)

認證授權

[root@serverc ~]# ceph auth get-or-create  client.user2 mon 'allow rw' osd 'allow rw pool=pool2' > /etc/ceph/ceph.client.user2.keyring
只能看指定池子

x: x許可權
[root@serverc ~]# ceph auth get-or-create client.user2 --id boss
Error EACCES: access denied
[root@serverc ~]# ceph auth get client.boss
[client.boss]
    key = AQBOcfdinDbjNBAAKADdWC1teSs1k+IngZFtLA==
    caps mon = "allow rw"
    caps osd = "allow rw"
exported keyring for client.boss
[root@serverc ~]# 


[root@serverc ~]# ceph auth caps client.boss mon 'allow rwx' osd 'allow rwx'
updated caps for client.boss
[root@serverc ~]# ceph auth get-or-create client.user2 --id boss
[client.user2]
    key = AQCpb/ditbuXExAAI0DbTNL5dJta4DwXsd4pIw==
[root@serverc ~]# ceph auth get-or-create client.user3 --id boss
[client.user3]
    key = AQAgcvdifGyoHxAAyiO5TzBFb7n6ajvE18STRg==
[root@serverc ~]# 

讓你可以創建用戶(偏向於管理員)

指定許可權

ceph auth ls –keyring abc –name client.boss
當key不在標準的/etc目錄時,可以指定

ceph  auth  get-or-create client.user1 mon 'allow rw' osd 'allow rw pool=pool1 namespace=sys'  >  /etc/ceph/ceph.client.user8.keyring
精細化指定存儲池,以及命名空間,不指定則是所有命名空間,及所有存儲池

要保證ceph用戶對密鑰文件和配置文件由讀許可權
修改許可權時,要注意,全部都寫上,不能寫一點補一點

刪除用戶
ceph auth del client.user3

–id 默認admin
–name 默認client.admin
–keyring 默認/etc/ceph/ceph.client.admin.keyring
–conf 默認/etc/ceph/ceph.conf

profile 授權
osd相互訪問會使用osd profile許可權

內部許可權相當是,但可以賦給user1
訪問相應rbd,osd,mds之類

[root@serverc ceph]# ceph auth get-or-create-key client.user1
AQBifPZijsT7IhAAJa5qCKaMzQX29ni2yJu//Q==

獲取key

ceph auth get client.xxx 可以看到許可權資訊

導出用戶

ceph auth get client.breeze -o /etc/ceph/ceph.client.breeze.keyring

導入用戶

ceph auth  import -i /etc/ceph/ceph.client.breeze.keyring

Ceph密鑰管理

客戶端訪問ceph集群時,會使用本地的keyring文件,默認依次查找下列路徑和名稱的keyring文件:
/etc/ceph/$cluster.$name.keyring
/etc/ceph/$cluster.keyring
/etc/ceph/keyring
/etc/ceph/keyring.bin

管理RBD塊

ceph的三種存儲模式,都基於存儲池

[root@serverc ceph]# ceph osd pool create rbdpool
pool 'rbdpool' created

[root@serverc ceph]# ceph osd pool application enable rbdpool rbd
enabled application 'rbd' on pool 'rbdpool'
同樣效果或者兩個都寫
rbd pool init rbd

從存儲池創建一個塊

裸設備從池中創建,名為鏡像(被掛載的傢伙)

創建一個指定用戶,去管理rbd

[root@serverc ceph]# ceph auth get-or-create client.rbd mon 'profile rbd' mgr 'profile rbd' osd 'profile rbd' > /etc/ceph/ceph.client.rbd.keyring
這個用戶可以給業務端

[classroom環境]
clienta 管理節點 是集群一份子,沒有osd。集群客戶端

業務客戶端(伺服器)
虛擬機可不可以把東西放在rbd里

alias rbd=’rbd –id rbd’
可以偷懶

創建鏡像

[root@serverc ceph]# rbd -p rbdpool create --size 1G image1 --id rbd

查看存儲池裡的鏡像

[root@serverc ceph]# rbd ls -p rbdpool --id rbd
image1

查看鏡像詳情

[root@serverc ceph]# rbd info rbdpool/image1 --id rbd
rbd image 'image1':
    size 1 GiB in 256 objects  
    order 22 (4 MiB objects)
    snapshot_count: 0
    id: fae567c39ea1
    block_name_prefix: rbd_data.fae567c39ea1
    format: 2
    features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
    op_features: 
    flags: 
    create_timestamp: Sat Aug 13 07:59:16 2022
    access_timestamp: Sat Aug 13 07:59:16 2022
    modify_timestamp: Sat Aug 13 07:59:16 2022

256個對象

查看對象

[root@serverc ceph]# rados -p rbdpool ls
rbd_object_map.fae567c39ea1
rbd_directory
rbd_info
rbd_header.fae567c39ea1
rbd_id.image1

元數據資訊,描述這個詞,與對象的資訊

用了再分配對象,不會立即吃你1G

鏡像映射到伺服器

客戶端需要rbd命令
特殊許可權用戶
ceph配置文件

映射

[root@serverc ceph]# rbd map rbdpool/image1 --id rbd
[root@serverc ceph]# rbd showmapped
id  pool     namespace  image   snap  device   
0   rbdpool             image1  -     /dev/rbd0
1   rbdpool             image1  -     /dev/rbd1

[手誤映射多了,取消rbd的一個映射]

[root@serverc ceph]# rbd unmap /dev/rbd1


[root@serverc ceph]# mkfs.xfs /dev/rbd0
meta-data=/dev/rbd0              isize=512    agcount=8, agsize=32768 blks
        =                       sectsz=512   attr=2, projid32bit=1
        =                       crc=1        finobt=1, sparse=1, rmapbt=0
        =                       reflink=1
data     =                       bsize=4096   blocks=262144, imaxpct=25
        =                       sunit=16     swidth=16 blks
naming   =version 2              bsize=4096   ascii-ci=0, ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
        =                       sectsz=512   sunit=16 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
Discarding blocks...Done.
[root@serverc ceph]# mkdir /mnt/rbd0
[root@serverc ceph]# mount /dev/rbd0 /mnt/rbd0/
[root@serverc ceph]# df -h
/dev/rbd0      1014M   40M  975M   4% /mnt/rbd0


[root@serverc ceph]# rados -p rbdpool ls
rbd_data.fae567c39ea1.0000000000000020
rbd_object_map.fae567c39ea1
rbd_data.fae567c39ea1.0000000000000040
rbd_data.fae567c39ea1.00000000000000c0
rbd_directory
rbd_info
rbd_data.fae567c39ea1.0000000000000080
rbd_data.fae567c39ea1.00000000000000a0
rbd_data.fae567c39ea1.0000000000000060
rbd_data.fae567c39ea1.0000000000000000
rbd_header.fae567c39ea1
rbd_data.fae567c39ea1.00000000000000e0
rbd_data.fae567c39ea1.00000000000000ff
rbd_id.image1

[root@serverc rbd0]# dd if=/dev/zero of=file1 bs=1M count=20


[root@serverc rbd0]# sync
[root@serverc rbd0]# rados -p rbdpool ls
rbd_data.fae567c39ea1.0000000000000020
rbd_object_map.fae567c39ea1
rbd_data.fae567c39ea1.0000000000000040
rbd_data.fae567c39ea1.00000000000000c0
rbd_directory
rbd_data.fae567c39ea1.0000000000000003
rbd_data.fae567c39ea1.0000000000000001
rbd_info
rbd_data.fae567c39ea1.0000000000000080
rbd_data.fae567c39ea1.00000000000000a0
rbd_data.fae567c39ea1.0000000000000060
rbd_data.fae567c39ea1.0000000000000000
rbd_header.fae567c39ea1
rbd_data.fae567c39ea1.00000000000000e0
rbd_data.fae567c39ea1.0000000000000004
rbd_data.fae567c39ea1.0000000000000002
rbd_data.fae567c39ea1.00000000000000ff
rbd_id.image1
rbd_data.fae567c39ea1.0000000000000005
[root@serverc rbd0]# 
看不出什麼
塊多是因為三副本

擴大

[root@serverc rbd0]# rbd resize --size 2G rbdpool/image1 --id rbd
Resizing image: 100% complete...done.

[root@serverc rbd0]# rbd du rbdpool/image1
NAME    PROVISIONED  USED  
image1        2 GiB  56 MiB

[root@serverc rbd0]# xfs_growfs /mnt/rbd0/
meta-data=/dev/rbd0              isize=512    agcount=8, agsize=32768 blks
        =                       sectsz=512   attr=2, projid32bit=1
        =                       crc=1        finobt=1, sparse=1, rmapbt=0
        =                       reflink=1
data     =                       bsize=4096   blocks=262144, imaxpct=25
        =                       sunit=16     swidth=16 blks
naming   =version 2              bsize=4096   ascii-ci=0, ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
        =                       sectsz=512   sunit=16 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
data blocks changed from 262144 to 524288
[root@serverc rbd0]# df -h | tail -n 2
tmpfs           576M     0  576M   0% /run/user/0
/dev/rbd0       2.0G   68M  2.0G   4% /mnt/rbd0

1.創建rbd存儲池

ceph  osd  pool create  rbdpool  

2.初始化rbd存儲池

rbd pool init rbdpool

3.創建rbd用戶

ceph  auth  get-or-create  client.rbd mon 'profile rbd' mgr 'profile rbd' osd 'profile rbd' > /etc/ceph/ceph.client.rbd.keyring
允許與rbd有關的所有相關操作

4.創建rbd鏡像

alias rbd='rbd  --id rbd'
rbd  --id rbd --size 1G rbdpool/image1

5.映射鏡像

rbd map rbdpool/image1
rbd showmapped

6.格式化rbd

mkfs.xfs /dev/rbd0

7.掛載

mount  /dev/rbd0  /mnt/rbd0

8.永久掛載

/dev/rbd0   /mnt/rbd0  xfs  defaults,_netdev 0 0
網路設備,要等一切服務起來之後(不加_netdev真的會起不來)

9.開機自動映射

[root@serverc ~]# vim /etc/ceph/rbdmap 
[root@serverc ~]# cat /etc/ceph/rbdmap 
# RbdDevice		Parameters
#poolname/imagename	id=client,keyring=/etc/ceph/ceph.client.keyring
rbdpool/image1  id=rbd,keyring=/etc/ceph/ceph.client.rbd.keyring

[root@serverc ~]# systemctl enable rbdmap.service 
Created symlink /etc/systemd/system/multi-user.target.wants/rbdmap.service → /usr/lib/systemd/system/rbdmap.service.
[root@serverc ~]# 

ceph-common帶上了這個服務

10.擴容

rbd  resize  --size 2G rbdpool/image1
xfs_growfs  /mnt/rbd0/

11.刪除

#注釋fstab
[root@serverc ~]# umount  /mnt/rbd0 
[root@serverc ~]# rbd unmap rbdpool/image1
[root@serverc ~]# rbd showmapped
[root@serverc ~]# rbd rm rbdpool/image1
Removing image: 100% complete...done.
[root@serverc ~]# 

12.回收站功能(不太確定要不要刪)

[root@serverc ~]# rbd create --size 1G rbdpool/image2 
[root@serverc ~]# rbd trash move rbdpool/image2
[root@serverc ~]# rbd -p rbdpool ls
[root@serverc ~]# rbd trash ls -p rbdpool
fb5f7c2dd404 image2
[root@serverc ~]# rbd trash restore fb5f7c2dd404 -p rbdpool
[root@serverc ~]# rbd -p rbdpool ls
image2
[root@serverc ~]# 

rbd trash purge
清除回收站里的指定池的所有數據