ceph 安装测试记录

2019-10-17  本文已影响0人  ThirstyZebra

安装bbr

安装系统

关闭selinux

sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config`
setenforce 0

sshd 关闭dns

sudo vi /etc/ssh/sshd_config

sed -i 's/#useDNS yes/useDNS no/g' /etc/ssh/sshd_config

sudo 管理

sudo visudo

Defaults env_reset,timestamp_timeout=30
edit the line timestamp_timeout=30

打开 Ceph 需要的端口

firewall-cmd --list-all

sudo firewall-cmd --zone=public --add-port=6789/tcp --permanent
sudo firewall-cmd --zone=public --add-port=6800-7100/tcp --permanent
sudo firewall-cmd --reload

install gdisk

sudo yum install gdisk

sudo gdisk /dev/sdc
n
\n
\n
L
8e00
w

reload gpt table

sudo bash -c “echo 1 > /sys/block/sdc/device/rescan”
partprobe

lv info

sudo vgcreate ceph-journal-0 /dev/sdx4
sudo lvcreate -n ceph-db-0 -L 4G ceph-journal-0
sudo lvcreate -n ceph-db-1 -L 4G ceph-journal-0
sudo lvcreate -n ceph-db-2 -L 4G ceph-journal-0
sudo lvcreate -n ceph-db-3 -L 4G ceph-journal-0
sudo lvcreate -n ceph-wal-0 -L 16G ceph-journal-0
sudo lvcreate -n ceph-wal-1 -L 16G ceph-journal-0
sudo lvcreate -n ceph-wal-2 -L 16G ceph-journal-0
sudo lvcreate -n ceph-wal-3 -l 100%FREE ceph-journal-0

LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
root centos -wi-ao---- 30.00g
swap centos -wi-ao---- 7.50g
ceph-db-0 ceph-journal-0 -wi-a----- 4.00g
ceph-db-1 ceph-journal-0 -wi-a----- 4.00g
ceph-db-2 ceph-journal-0 -wi-a----- 4.00g
ceph-db-3 ceph-journal-0 -wi-a----- 4.00g
ceph-wal-0 ceph-journal-0 -wi-a----- 16.00g
ceph-wal-1 ceph-journal-0 -wi-a----- 16.00g
ceph-wal-2 ceph-journal-0 -wi-a----- 16.00g
ceph-wal-3 ceph-journal-0 -wi-a----- <16.54g

hdd

/dev/sda 8t
/dev/sdb 8t

清空磁盘

ceph-deploy disk zap

/usr/sbin/wipefs --all /dev/sdz
/bin/dd if=/dev/zero of=/dev/sdz bs=1M count=10

deploy osd

ceph-deploy osd create snail-01 --data /dev/sda --block-db ceph-journal-0/ceph-db-0 --block-wal ceph-journal-0/ceph-wal-0
ceph-deploy osd create snail-02 --data /dev/sda --block-db ceph-journal-0/ceph-db-0 --block-wal ceph-journal-0/ceph-wal-0

ceph-deploy osd create snail-01 --data /dev/sdb --block-db ceph-journal-0/ceph-db-1 --block-wal ceph-journal-0/ceph-wal-1
ceph-deploy osd create snail-02 --data /dev/sdb --block-db ceph-journal-0/ceph-db-1 --block-wal ceph-journal-0/ceph-wal-1

add more disk

ceph-deploy osd create snail-01 --data /dev/sdc --block-db ceph-journal-0/ceph-db-2 --block-wal ceph-journal-0/ceph-wal-2
ceph-deploy osd create snail-02 --data /dev/sdc --block-db ceph-journal-0/ceph-db-2 --block-wal ceph-journal-0/ceph-wal-2

ceph-deploy osd create snail-01 --data /dev/sde --block-db ceph-journal-0/ceph-db-3 --block-wal ceph-journal-0/ceph-wal-3
ceph-deploy osd create snail-02 --data /dev/sde --block-db ceph-journal-0/ceph-db-3 --block-wal ceph-journal-0/ceph-wal-3

add more

ceph-deploy osd create --dmcrypt snail-03 --data /dev/sda --block-db ceph-journal-0/ceph-db-0 --block-wal ceph-journal-0/ceph-wal-0
ceph-deploy osd create --dmcrypt snail-03 --data /dev/sdb --block-db ceph-journal-0/ceph-db-1 --block-wal ceph-journal-0/ceph-wal-1
ceph-deploy osd create --dmcrypt snail-03 --data /dev/sdc --block-db ceph-journal-0/ceph-db-2 --block-wal ceph-journal-0/ceph-wal-2
ceph-deploy osd create --dmcrypt snail-03 --data /dev/sdd --block-db ceph-journal-0/ceph-db-3 --block-wal ceph-journal-0/ceph-wal-3

ceph-deploy osd create --dmcrypt snail-04 --data /dev/sda --block-db ceph-journal-0/ceph-db-0 --block-wal ceph-journal-0/ceph-wal-0
ceph-deploy osd create --dmcrypt snail-04 --data /dev/sdb --block-db ceph-journal-0/ceph-db-1 --block-wal ceph-journal-0/ceph-wal-1
ceph-deploy osd create --dmcrypt snail-04 --data /dev/sdc --block-db ceph-journal-0/ceph-db-2 --block-wal ceph-journal-0/ceph-wal-2
ceph-deploy osd create --dmcrypt snail-04 --data /dev/sdd --block-db ceph-journal-0/ceph-db-3 --block-wal ceph-journal-0/ceph-wal-3

上一篇下一篇

猜你喜欢

热点阅读