阿里云-云小站(无限量代金券发放中)
【腾讯云】云服务器、云数据库、COS、CDN、短信等热卖云产品特惠抢购

Ceph CRUSH调整实例:在同一个主机中存在两种类型磁盘

159次阅读
没有评论

共计 4586 个字符,预计需要花费 12 分钟才能阅读完成。

说明:本文档针对在同一个主机中,存在两种磁盘的 OSD:SSD 盘和 SATA 盘,使用 CRUSH 进行分层的测试。以下测试环境均为 Ceph 0.94.x。

测试环境:

ceph-mon 节点 1
ceph-osd110G*2  20G*2
ceph-osd210G*2  20G*2
ceph-osd310G*1  20G*1
假设 10 G 为 SSD,20 G 为 SATA

ceph-osd1:
/dev/sdb1                5.0G   34M  5.0G   1% /var/lib/ceph/osd/ceph-0
/dev/sdc1                5.0G   34M  5.0G   1% /var/lib/ceph/osd/ceph-1
/dev/sdd1                 15G   35M   15G   1% /var/lib/ceph/osd/ceph-2
/dev/sde1                 15G   34M   15G   1% /var/lib/ceph/osd/ceph-3


ceph-osd2:
/dev/sdb1                5.0G   34M  5.0G   1% /var/lib/ceph/osd/ceph-4
/dev/sdc1                 15G   34M   15G   1% /var/lib/ceph/osd/ceph-5
/dev/sdd1                 15G   34M   15G   1% /var/lib/ceph/osd/ceph-6
/dev/sde1                5.0G   34M  5.0G   1% /var/lib/ceph/osd/ceph-7

ceph-osd3:/dev/sdb1                5.0G   34M  5.0G   1% /var/lib/ceph/osd/ceph-8
/dev/sdc1                 15G   34M   15G   1% /var/lib/ceph/osd/ceph-9

$ ceph osd tree
ID WEIGHT  TYPE NAME          UP/DOWN REWEIGHT PRIMARY-AFFINITY 
-1 0.04997 root default                                         
-2 0.01999     host ceph-osd1                                   
 0       0         osd.0           up  1.00000          1.00000 
 1       0         osd.1           up  1.00000          1.00000 
 2 0.00999         osd.2           up  1.00000          1.00000 
 3 0.00999         osd.3           up  1.00000          1.00000 
-3 0.01999     host ceph-osd2                                   
 4       0         osd.4           up  1.00000          1.00000 
 5 0.00999         osd.5           up  1.00000          1.00000 
 6 0.00999         osd.6           up  1.00000          1.00000 
 7       0         osd.7           up  1.00000          1.00000 
-4 0.00999     host ceph-osd3                                   
 8       0         osd.8           up  1.00000          1.00000 
 9 0.00999         osd.9           up  1.00000          1.00000

操作:

导出 crush map

$ ceph osd getcrushmap -o crushmap.map

将 map 转为可读模式

crushtool -d crushmap.map -o crushmap.txt

原先 cursh map:

# begin crush map
tunable choose_local_tries 0
tunable choose_local_fallback_tries 0
tunable choose_total_tries 50
tunable chooseleaf_descend_once 1
tunable straw_calc_version 1

# devices
device 0 osd.0
device 1 osd.1
device 2 osd.2
device 3 osd.3
device 4 osd.4
device 5 osd.5
device 6 osd.6
device 7 osd.7
device 8 osd.8
device 9 osd.9

# types
type 0 osd
type 1 host
type 2 chassis
type 3 rack
type 4 row
type 5 pdu
type 6 pod
type 7 room
type 8 datacenter
type 9 region
type 10 root

# buckets
host ceph-osd1 {id -2        # do not change unnecessarily
    # weight 0.020
    alg straw
    hash 0    # rjenkins1
    item osd.0 weight 0.000
    item osd.1 weight 0.000
    item osd.2 weight 0.010
    item osd.3 weight 0.010
}
host ceph-osd2 {id -3        # do not change unnecessarily
    # weight 0.020
    alg straw
    hash 0    # rjenkins1
    item osd.4 weight 0.000
    item osd.5 weight 0.010
    item osd.6 weight 0.010
    item osd.7 weight 0.000
}
host ceph-osd3 {id -4        # do not change unnecessarily
    # weight 0.010
    alg straw
    hash 0    # rjenkins1
    item osd.8 weight 0.000
    item osd.9 weight 0.010
}
root default {
    id -1        # do not change unnecessarily
    # weight 0.050
    alg straw
    hash 0    # rjenkins1
    item ceph-osd1 weight 0.020
    item ceph-osd2 weight 0.020
    item ceph-osd3 weight 0.010
}

# rules
rule replicated_ruleset {ruleset 0
    type replicated
    min_size 1
    max_size 10
    step take default
    step chooseleaf firstn 0 type host
    step emit
}

# end crush map

编辑后:
说明:增加了一个介于 osd 和 host 之间的 type,将一个主机上的资源分隔为两组。

# begin crush map
tunable choose_local_tries 0
tunable choose_local_fallback_tries 0
tunable choose_total_tries 50
tunable chooseleaf_descend_once 1
tunable straw_calc_version 1

# devices
device 0 osd.0
device 1 osd.1
device 2 osd.2
device 3 osd.3
device 4 osd.4
device 5 osd.5
device 6 osd.6
device 7 osd.7
device 8 osd.8
device 9 osd.9

# types
type 0 osd
type 1 diskarray
type 2 host
type 3 chassis
type 4 rack
type 5 row
type 6 pdu
type 7 pod
type 8 room
type 9 datacenter
type 10 region
type 11 root

# buckets
diskarray ceph-osd1-ssd {id -1
    alg straw
    hash 0
    item osd.0 weight 0.005
    item osd.1 weight 0.005
}

diskarray ceph-osd1-sata {id -2
    alg straw
    hash 0
    item osd.2 weight 0.015
    item osd.3 weight 0.015
}

diskarray ceph-osd2-ssd {id -3
    alg straw
    hash 0
    item osd.4 weight 0.005
    item osd.7 weight 0.005
}

diskarray ceph-osd2-sata {id -4
    alg straw
    hash 0
    item osd.5 weight 0.015
    item osd.6 weight 0.015
}

diskarray ceph-osd3-ssd {id -5
    alg straw
    hash 0
    item osd.8 weight 0.005
}

diskarray ceph-osd3-sata {id -6
    alg straw
    hash 0
    item osd.9 weight 0.015
}    

root ssd {id -7
    alg straw
    hash 0
    item ceph-osd1-ssd weight 0.010
    item ceph-osd2-ssd weight 0.010
    item ceph-osd3-ssd weight 0.005
}

root sata {id -8
    alg straw
    hash 0
    item ceph-osd1-sata weight 0.030
    item ceph-osd2-sata weight 0.030
    item ceph-osd3-sata weight 0.015
}

# rules
rule ssd_ruleset {ruleset 0
    type replicated
    min_size 1
    max_size 4
    step take ssd
    step chooseleaf firstn 0 type diskarray
    step emit
}

rule sata_ruleset {ruleset 1
    type replicated
    min_size 1
    max_size 5
    step take sata
    step chooseleaf firstn 0 type diskarray
    step emit
}

# end crush map

重新编译为二进制:

$ crushtool -c crushmapnew.txt -o crushmapnew.map

导入 ceph:

$ ceph osd setcrushmap -i crushmapnew.map

创建不同类型的 pool:

$ ceph osd pool create ssdpool 128 ssd_ruleset
$ ceph osd pool create satapool 128 sata_ruleset

注意:
0.94 版本中,ceph osd 需要设置:

osd_crush_update_on_start = false

否则在 OSD 启动时,OSD 会自动变更到 host 这个容器下。

10.2.x 版本未测试。

在 CentOS 7.1 上安装分布式存储系统 Ceph  http://www.linuxidc.com/Linux/2015-08/120990.htm

Ceph 环境配置文档 PDF http://www.linuxidc.com/Linux/2013-05/85212.htm 

CentOS7 下部署 Ceph 集群(版本 10.2.2)http://www.linuxidc.com/Linux/2017-02/140728.htm

Ceph 的安装过程 http://www.linuxidc.com/Linux/2013-05/85210.htm 

如何升级 Ceph 版本及注意事项  http://www.linuxidc.com/Linux/2017-02/140631.htm

HOWTO Install Ceph On FC12, FC 上安装 Ceph 分布式文件系统 http://www.linuxidc.com/Linux/2013-05/85209.htm 

实验环境 Ceph 9.2.1 部署笔记 http://www.linuxidc.com/Linux/2016-11/137094.htm

Ubuntu 16.04 快速安装 Ceph 集群  http://www.linuxidc.com/Linux/2016-09/135261.htm

Ceph 的详细介绍 :请点这里
Ceph 的下载地址 :请点这里

本文永久更新链接地址 :http://www.linuxidc.com/Linux/2017-03/141871.htm

正文完
星哥说事-微信公众号
post-qrcode
 
星锅
版权声明:本站原创文章,由 星锅 2022-01-21发表,共计4586字。
转载说明:除特殊说明外本站文章皆由CC-4.0协议发布,转载请注明出处。
【腾讯云】推广者专属福利,新客户无门槛领取总价值高达2860元代金券,每种代金券限量500张,先到先得。
阿里云-最新活动爆款每日限量供应
评论(没有评论)
验证码
【腾讯云】云服务器、云数据库、COS、CDN、短信等云产品特惠热卖中