# apply repository to nodes on each
[ceph]
name=Ceph packages for $basearch
baseurl=https://download.ceph.com/rpm-octopus/el7/$basearch
enabled=1
priority=2
gpgcheck=1
pe=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
[ceph-noarch]
name=Ceph noarch packages
baseurl=https://download.ceph.com/rpm-octopus/el7/noarch
enabled=1
priority=2
gpgcheck=1
gpgkey=https://download.ceph.com/keys/release.asc
[ceph-source]
name=Ceph source packages
baseurl=https://download.ceph.com/rpm-octopus/el7/SRPMS
enabled=0
priority=2
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
$ sudo yum update
$ sudo yum install ceph-deploy
$ sudo yum install ceph-common ceph-mds ceph-mgr -y
$ sudo yum install fcgi -y
### ceph
# /etc/hosts.allow (node간에 sshd 접속허용)
sshd:192.168.172.43,192.168.172.44
# /etc/hosts(노드간 hosts 설정)
$ sudo echo -e "
> 192.168.172.42 v134
> 192.168.172.43 v135
> 192.168.172.44 v136
> " >> /etc/hosts
# ~/.ssh/config (ansible, ceph-deploy ssh 통신 설정)
[asmanager@v134 ceph-cluster]$ sudo vi ~/.ssh/config
Host v134
Hostname v134
User asmanager
Port 5501
Host v135
Hostname v135
User asmanager
Port 5501
Host v136
Hostname v136
User asmanager
Port 5501
$ mkdir ceph-cluster
$ cd ceph-cluster
$ sudo chown asmanager:asmanager cepth-cluster
# sudo 권한으로 실행하지 않는다.(내부적으로 sudoers 를 사용함)
[asmanager@v134 ceph-cluster]$ ceph-deploy new v134
[asmanager@v134 ceph-cluster]$ ceph-deploy install --no-adjust-repos v134 v135 v136
[136][DEBUG ] Complete!
[v136][INFO ] Running command: sudo ceph --version
[v136][DEBUG ] ceph version 15.2.16 (d46a73d6d0a67a79558054a3a5a72cb561724974) octopus (stable)
[asmanager@v134 ceph-cluster]$ sudo ceph --version
ceph version 15.2.16 (d46a73d6d0a67a79558054a3a5a72cb561724974) octopus (stable)
# monitor 노드 설정
[asmanager@v134 ceph-cluster]$ ceph-deploy new v134
# 정상 실행시 아래 keyring 파일이 생성됨
[asmanager@v134 ceph-cluster]$ ceph-deploy mon create-initial
Once you complete the process, your local directory should have the following keyrings:
ceph.client.admin.keyring
ceph.bootstrap-mgr.keyring
ceph.bootstrap-osd.keyring
ceph.bootstrap-mds.keyring
ceph.bootstrap-rgw.keyring
ceph.bootstrap-rbd.keyring
# [ceph_deploy.mon][WARNIN] mon.vdicnode01 monitor is not yet in quorum, tries left: 5
# 위에 에러가 설치가 실패되면 다음을 확인함.
# 1) 모든 노드에 ceph 설치된 부분을 초기화 - STARTING OVER
# 2)
[asmanager@v134 ceph-cluster]$ ceph-deploy admin v134 v135 v136
# admin 설치이후 ceph 상태, storage cluster(osd) 생성 후 ceph -s 상태가 정상 표시된다.
[asmanager@v134 ceph-cluster]$ ceph -v
ceph version 15.2.16 (d46a73d6d0a67a79558054a3a5a72cb561724974) octopus (stable)
[asmanager@v134 ceph-cluster]$ sudo ceph -s
cluster:
id: 5cdfeff3-0d31-4204-a151-b4eea0e6d575
health: HEALTH_WARN
mon is allowing insecure global_id reclaim
Module 'restful' has failed dependency: No module named 'pecan'
OSD count 0 < osd_pool_default_size 3
services:
mon: 1 daemons, quorum v134 (age 23m)
mgr: v134(active, since 8m)
osd: 0 osds: 0 up, 0 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 0 B used, 0 B / 0 B avail
pgs:
Deploy a manager daemon. (Required only for luminous+ builds):
# lumunous 버전보다 큰 경우 manager node 설치
$ ceph-deploy mgr create node1 *Required only for luminous+ builds, i.e >= 12.x builds*
# created osd
# 에러가 나는경우 설치 라이브러리 권한 확인
# sudo chown -R asmanager:asmanager /var/lib/ceph/
$ ceph-deploy osd create --data /dev/sdc v134
$ ceph-deploy osd create --data /dev/sdc v135
$ ceph-deploy osd create --data /dev/sdc v136
$ cept-deploy mds create v134
$ ceph-deploy mon add v135
$ ceph-deploy mon add v136