linux:ceph:clover_deploy
Table of Contents
Deploying clover
Documentation | |
---|---|
Name: | Deploying clover |
Description: | raw commands of a live CEPH storage cluster deployment |
Modification date : | 29/07/2018 |
Owner: | dodger |
Notify changes to: | Owner |
Tags: | ceph, object storage |
Scalate to: | Thefuckingbofh |
RAW
CloneWars deploy clover
for i in 01 03 05 07 09 11 13 15 17 19 ; do bash /home/bofher/scripts/CloneWars/CloneWars.sh -c nuciberterminal -h AVMLP-OSD-0${i} -i 10.20.54.1${i} -d 2048GB -m 20 -O -r 4096 -v 2 -o 2 -F ; rm -fv /etc/salt/pki/master/minions/avmlp-osd-0${i}.ciberterminal.net ; done for i in 02 04 06 08 10 12 14 16 18 20 ; do bash /home/bofher/scripts/CloneWars/CloneWars.sh -c nuciberterminal2 -h AVMLP-OSD-0${i} -i 10.20.54.1${i} -d 2048GB -m 20 -O -r 4096 -v 2 -o 2 -F ; rm -fv /etc/salt/pki/master/minions/avmlp-osd-0${i}.ciberterminal.net ; done for i in 01 03 ; do bash /home/bofher/scripts/CloneWars/CloneWars.sh -c nuciberterminal -h AVMLP-OSM-0${i} -i 10.20.54.$((${i}+50)) -d 50GB -m 20 -O -r 4096 -v 2 -o 2 -F ; rm -fv /etc/salt/pki/master/minions/avmlp-osm-0${i}.ciberterminal.net ; done for i in 02 04 ; do bash /home/bofher/scripts/CloneWars/CloneWars.sh -c nuciberterminal2 -h AVMLP-OSM-0${i} -i 10.20.54.$((${i}+50)) -d 50GB -m 20 -O -r 4096 -v 2 -o 2 -F ; rm -fv /etc/salt/pki/master/minions/avmlp-osm-0${i}.ciberterminal.net ; done for i in 01 03 ; do bash /home/bofher/scripts/CloneWars/CloneWars.sh -c nuciberterminal -h AVMLP-OSGW-0${i} -i 10.20.54.$((${i}+10)) -d 50GB -m 20 -O -r 4096 -v 2 -o 2 -F ; rm -fv /etc/salt/pki/master/minions/avmlp-osgw-0${i}.ciberterminal.net ; done for i in 02 04 ; do bash /home/bofher/scripts/CloneWars/CloneWars.sh -c nuciberterminal2 -h AVMLP-OSGW-0${i} -i 10.20.54.$((${i}+10)) -d 50GB -m 20 -O -r 4096 -v 2 -o 2 -F ; rm -fv /etc/salt/pki/master/minions/avmlp-osgw-0${i}.ciberterminal.net ; done for i in 01 03 ; do bash /home/bofher/scripts/CloneWars/CloneWars.sh -c nuciberterminal -h AVMLP-OSFS-0${i} -i 10.20.55.$((${i}+10)) -d 50GB -m 20 -O -r 4096 -v 2 -o 2 -F ; rm -fv /etc/salt/pki/master/minions/avmlp-osfs-0${i}.ciberterminal.net ; done for i in 02 04 ; do bash /home/bofher/scripts/CloneWars/CloneWars.sh -c nuciberterminal2 -h AVMLP-OSFS-0${i} -i 10.20.55.$((${i}+10)) -d 50GB -m 20 -O -r 4096 -v 2 -o 2 -F ; rm -fv /etc/salt/pki/master/minions/avmlp-osfs-0${i}.ciberterminal.net ; done
salt
export THESERVER="avmlp-os*-0*" salt-key -A salt "${THESERVER}" state.apply salt "${THESERVER}" state.apply nsupdate salt "${THESERVER}" test.ping salt "${THESERVER}" pkg.install yum-plugin-priorities salt "${THESERVER}" user.add ceph 1002 salt "${THESERVER}" file.write /etc/sudoers.d/ceph \ "ceph ALL = (root) NOPASSWD:ALL" salt "${THESERVER}" cmd.run "sudo whoami" runas=ceph salt "${THESERVER}" cmd.run \ "ssh-keygen -q -N '' -f /home/ceph/.ssh/id_rsa" \ runas=ceph salt "${THESERVER}" cmd.run "cp /home/ceph/.ssh/id_rsa.pub /home/ceph/.ssh/authorized_keys" export VMNAMESTART="avmlp-os" salt "${THESERVER}" cmd.run "cat /home/ceph/.ssh/id_rsa.pub" |egrep -v "^${VMNAMESTART}" | sed 's/^[[:space:]]\{1,5\}//g' > all_cluster_nodes.txt while read LINE ; do salt "${THESERVER}" file.append /home/ceph/.ssh/authorized_keys "${LINE}" ; done < all_cluster_nodes.txt
avmlp-osm-001
cat << EOM > ceph.repo [Ceph] name=Ceph packages for \$basearch baseurl=http://download.ceph.com/rpm-nautilus/el7/\$basearch enabled=1 gpgcheck=1 type=rpm-md gpgkey=https://download.ceph.com/keys/release.asc [Ceph-noarch] name=Ceph noarch packages baseurl=http://download.ceph.com/rpm-nautilus/el7/noarch enabled=1 gpgcheck=1 type=rpm-md gpgkey=https://download.ceph.com/keys/release.asc [ceph-source] name=Ceph source packages baseurl=http://download.ceph.com/rpm-nautilus/el7/SRPMS enabled=1 gpgcheck=1 type=rpm-md gpgkey=https://download.ceph.com/keys/release.asc EOM export GWSERVERS="avmlp-osgw-004.ciberterminal.net avmlp-osgw-003.ciberterminal.net avmlp-osgw-001.ciberterminal.net avmlp-osgw-002.ciberterminal.net" export OSDSERVERS="avmlp-osd-001.ciberterminal.net avmlp-osd-002.ciberterminal.net avmlp-osd-003.ciberterminal.net avmlp-osd-004.ciberterminal.net avmlp-osd-005.ciberterminal.net avmlp-osd-006.ciberterminal.net avmlp-osd-007.ciberterminal.net avmlp-osd-008.ciberterminal.net avmlp-osd-009.ciberterminal.net avmlp-osd-010.ciberterminal.net avmlp-osd-011.ciberterminal.net avmlp-osd-012.ciberterminal.net avmlp-osd-013.ciberterminal.net avmlp-osd-014.ciberterminal.net avmlp-osd-015.ciberterminal.net avmlp-osd-016.ciberterminal.net avmlp-osd-017.ciberterminal.net avmlp-osd-018.ciberterminal.net avmlp-osd-019.ciberterminal.net avmlp-osd-020.ciberterminal.net" export MONSERVERS="avmlp-osm-002.ciberterminal.net avmlp-osm-001.ciberterminal.net avmlp-osm-004.ciberterminal.net avmlp-osm-003.ciberterminal.net avmlp-osm-005.ciberterminal.net avmlp-osm-006.ciberterminal.net" export MDSSERVERS="avmlp-osfs-002.ciberterminal.net avmlp-osfs-001.ciberterminal.net avmlp-osfs-004.ciberterminal.net avmlp-osfs-003.ciberterminal.net" export ALLSERVERS="${GWSERVERS} ${OSDSERVERS} ${MONSERVERS} ${MDSSERVERS}" for i in ${ALLSERVERS} ; do scp ceph.repo ${i}:/home/ceph/ ; ssh ${i} "sudo cp -pfv /home/ceph/ceph.repo /etc/yum.repos.d/" ; done for i in ${ALLSERVERS} ; do ssh ${i} "sudo chown root. /etc/yum.repos.d/ceph.repo" ; done for i in ${ALLSERVERS} ; do ssh ${i} "sudo ls -l /etc/yum.repos.d/ceph.repo" ; done
salt
export THESERVER="avmlp-os[mdgf]*-0*" salt "${THESERVER}" cmd.run "yum -y install ceph-deploy"
avmlp-osm-001
ceph-deploy install ${ALLSERVERS} --repo-url https://download.ceph.com/rpm-nautilus/el7/
salt
CHECK
export THESERVER="avmlp-os[mdgf]*-0*" salt "${THESERVER}" cmd.run "rpm -qa |egrep ceph" for i in $(salt "${THESERVER}" test.ping| egrep "^a"|awk -F\: '{print $1}'| sort) ; do salt "${i}" cmd.run "sudo ceph --version" ; done
avmlp-osm-001
ceph-deploy new ${MONSERVERS} ceph-deploy mon create-initial for i in ${MONSERVERS} ; do ceph-deploy gatherkeys ${i} ; done ceph-deploy admin ${MONSERVERS} sudo ceph mon enable-msgr2 ceph-deploy mgr create ${MONSERVERS}
osd's
for THESERVER in ${OSDSERVERS} ; do echo "${THESERVER}"; echo "################### ${THESERVER}: creating disk" ; ceph-deploy osd create ${THESERVER} --data /dev/sdb; echo "################### ${THESERVER}: listing (check) disk" ; ceph-deploy osd list ${THESERVER}; done
CRUSH
mkdir /home/ceph/crush/mod_20190730 cd /home/ceph/crush/mod_20190730 ceph osd getcrushmap -o crushmap.bin crushtool -d crushmap.bin -o crushmap.txt
- crushmap.txt
# begin crush map tunable choose_local_tries 0 tunable choose_local_fallback_tries 0 tunable choose_total_tries 50 tunable chooseleaf_descend_once 1 tunable chooseleaf_vary_r 1 tunable chooseleaf_stable 1 tunable straw_calc_version 1 tunable allowed_bucket_algs 54 # devices device 0 osd.0 class hdd device 1 osd.1 class hdd device 2 osd.2 class hdd device 3 osd.3 class hdd device 4 osd.4 class hdd device 5 osd.5 class hdd device 6 osd.6 class hdd device 7 osd.7 class hdd device 8 osd.8 class hdd device 9 osd.9 class hdd device 10 osd.10 class hdd device 11 osd.11 class hdd device 12 osd.12 class hdd device 13 osd.13 class hdd device 14 osd.14 class hdd device 15 osd.15 class hdd device 16 osd.16 class hdd device 17 osd.17 class hdd device 18 osd.18 class hdd device 19 osd.19 class hdd # types type 0 osd type 1 host type 2 chassis type 3 rack type 4 row type 5 pdu type 6 pod type 7 room type 8 datacenter type 9 zone type 10 region type 11 root # buckets host avmlp-osd-001 { id -3 # do not change unnecessarily id -4 class hdd # do not change unnecessarily # weight 1.999 alg straw2 hash 0 # rjenkins1 item osd.0 weight 1.999 } host avmlp-osd-002 { id -5 # do not change unnecessarily id -6 class hdd # do not change unnecessarily # weight 1.999 alg straw2 hash 0 # rjenkins1 item osd.1 weight 1.999 } host avmlp-osd-003 { id -7 # do not change unnecessarily id -8 class hdd # do not change unnecessarily # weight 1.999 alg straw2 hash 0 # rjenkins1 item osd.2 weight 1.999 } host avmlp-osd-004 { id -9 # do not change unnecessarily id -10 class hdd # do not change unnecessarily # weight 1.999 alg straw2 hash 0 # rjenkins1 item osd.3 weight 1.999 } host avmlp-osd-005 { id -11 # do not change unnecessarily id -12 class hdd # do not change unnecessarily # weight 1.999 alg straw2 hash 0 # rjenkins1 item osd.4 weight 1.999 } host avmlp-osd-006 { id -13 # do not change unnecessarily id -14 class hdd # do not change unnecessarily # weight 1.999 alg straw2 hash 0 # rjenkins1 item osd.5 weight 1.999 } host avmlp-osd-007 { id -15 # do not change unnecessarily id -16 class hdd # do not change unnecessarily # weight 1.999 alg straw2 hash 0 # rjenkins1 item osd.6 weight 1.999 } host avmlp-osd-008 { id -17 # do not change unnecessarily id -18 class hdd # do not change unnecessarily # weight 1.999 alg straw2 hash 0 # rjenkins1 item osd.7 weight 1.999 } host avmlp-osd-009 { id -19 # do not change unnecessarily id -20 class hdd # do not change unnecessarily # weight 1.999 alg straw2 hash 0 # rjenkins1 item osd.8 weight 1.999 } host avmlp-osd-010 { id -21 # do not change unnecessarily id -22 class hdd # do not change unnecessarily # weight 1.999 alg straw2 hash 0 # rjenkins1 item osd.9 weight 1.999 } host avmlp-osd-011 { id -23 # do not change unnecessarily id -24 class hdd # do not change unnecessarily # weight 1.999 alg straw2 hash 0 # rjenkins1 item osd.10 weight 1.999 } host avmlp-osd-012 { id -25 # do not change unnecessarily id -26 class hdd # do not change unnecessarily # weight 1.999 alg straw2 hash 0 # rjenkins1 item osd.11 weight 1.999 } host avmlp-osd-013 { id -27 # do not change unnecessarily id -28 class hdd # do not change unnecessarily # weight 1.999 alg straw2 hash 0 # rjenkins1 item osd.12 weight 1.999 } host avmlp-osd-014 { id -29 # do not change unnecessarily id -30 class hdd # do not change unnecessarily # weight 1.999 alg straw2 hash 0 # rjenkins1 item osd.13 weight 1.999 } host avmlp-osd-015 { id -31 # do not change unnecessarily id -32 class hdd # do not change unnecessarily # weight 1.999 alg straw2 hash 0 # rjenkins1 item osd.14 weight 1.999 } host avmlp-osd-016 { id -33 # do not change unnecessarily id -34 class hdd # do not change unnecessarily # weight 1.999 alg straw2 hash 0 # rjenkins1 item osd.15 weight 1.999 } host avmlp-osd-017 { id -35 # do not change unnecessarily id -36 class hdd # do not change unnecessarily # weight 1.999 alg straw2 hash 0 # rjenkins1 item osd.16 weight 1.999 } host avmlp-osd-018 { id -37 # do not change unnecessarily id -38 class hdd # do not change unnecessarily # weight 1.999 alg straw2 hash 0 # rjenkins1 item osd.17 weight 1.999 } host avmlp-osd-019 { id -39 # do not change unnecessarily id -40 class hdd # do not change unnecessarily # weight 1.999 alg straw2 hash 0 # rjenkins1 item osd.18 weight 1.999 } host avmlp-osd-020 { id -41 # do not change unnecessarily id -42 class hdd # do not change unnecessarily # weight 1.999 alg straw2 hash 0 # rjenkins1 item osd.19 weight 1.999 } rack nuciberterminal { # weight 19.990 alg straw2 hash 0 # rjenkins1 item avmlp-osd-001 weight 1.999 item avmlp-osd-003 weight 1.999 item avmlp-osd-005 weight 1.999 item avmlp-osd-007 weight 1.999 item avmlp-osd-009 weight 1.999 item avmlp-osd-011 weight 1.999 item avmlp-osd-013 weight 1.999 item avmlp-osd-015 weight 1.999 item avmlp-osd-017 weight 1.999 item avmlp-osd-019 weight 1.999 } rack nuciberterminal2 { # weight 19.990 alg straw2 hash 0 # rjenkins1 item avmlp-osd-002 weight 1.999 item avmlp-osd-004 weight 1.999 item avmlp-osd-006 weight 1.999 item avmlp-osd-008 weight 1.999 item avmlp-osd-010 weight 1.999 item avmlp-osd-012 weight 1.999 item avmlp-osd-014 weight 1.999 item avmlp-osd-016 weight 1.999 item avmlp-osd-018 weight 1.999 item avmlp-osd-020 weight 1.999 } datacenter mediapro { # weight 0.000 alg straw2 hash 0 # rjenkins1 item nuciberterminal2 weight 19.990 } datacenter itconic { # weight 0.000 alg straw2 hash 0 # rjenkins1 item nuciberterminal weight 19.990 } root default { id -1 # do not change unnecessarily id -2 class hdd # do not change unnecessarily # weight 39.980 alg straw2 hash 0 # rjenkins1 item mediapro weight 19.990 item itconic weight 19.990 } # rules rule replicated_rule { id 0 type replicated min_size 2 max_size 10 step take default step chooseleaf firstn 0 type datacenter step emit } rule ciberterminalRule { id 1 type replicated min_size 2 max_size 10 # begin iterating in the "root" of the crush tree step take default step choose firstn 4 type datacenter step chooseleaf firstn 2 type host step emit } # end crush map
crushtool -c crushmap.txt -o crushmap_new.bin crushtool --test -i crushmap_new.bin --show-utilization --rule 1 --num-rep=4 crushtool --test -i crushmap_new.bin --show-choose-tries --rule 1 --num-rep=4
avmlp-osm-001 /home/ceph/crush/mod_20190730 # ceph osd crush tree ID CLASS WEIGHT TYPE NAME -1 39.97998 root default -46 19.98999 datacenter itconic -43 19.98999 rack nuciberterminal -3 1.99899 host avmlp-osd-001 0 hdd 1.99899 osd.0 -7 1.99899 host avmlp-osd-003 2 hdd 1.99899 osd.2 -11 1.99899 host avmlp-osd-005 4 hdd 1.99899 osd.4 -15 1.99899 host avmlp-osd-007 6 hdd 1.99899 osd.6 -19 1.99899 host avmlp-osd-009 8 hdd 1.99899 osd.8 -23 1.99899 host avmlp-osd-011 10 hdd 1.99899 osd.10 -27 1.99899 host avmlp-osd-013 12 hdd 1.99899 osd.12 -31 1.99899 host avmlp-osd-015 14 hdd 1.99899 osd.14 -35 1.99899 host avmlp-osd-017 16 hdd 1.99899 osd.16 -39 1.99899 host avmlp-osd-019 18 hdd 1.99899 osd.18 -45 19.98999 datacenter mediapro -44 19.98999 rack nuciberterminal2 -5 1.99899 host avmlp-osd-002 1 hdd 1.99899 osd.1 -9 1.99899 host avmlp-osd-004 3 hdd 1.99899 osd.3 -13 1.99899 host avmlp-osd-006 5 hdd 1.99899 osd.5 -17 1.99899 host avmlp-osd-008 7 hdd 1.99899 osd.7 -21 1.99899 host avmlp-osd-010 9 hdd 1.99899 osd.9 -25 1.99899 host avmlp-osd-012 11 hdd 1.99899 osd.11 -29 1.99899 host avmlp-osd-014 13 hdd 1.99899 osd.13 -33 1.99899 host avmlp-osd-016 15 hdd 1.99899 osd.15 -37 1.99899 host avmlp-osd-018 17 hdd 1.99899 osd.17 -41 1.99899 host avmlp-osd-020 19 hdd 1.99899 osd.19
recovery tunning
ceph tell 'osd.*' config set osd_recovery_max_single_start 1000 ceph tell 'osd.*' config set osd_max_backfills 100 ceph tell 'osd.*' config set osd_recovery_max_active 300 ceph tell 'osd.*' config set osd_recovery_max_single_start 1000 ceph tell 'osd.*' config set osd_backfill_scan_max 51200 ceph tell 'osd.*' config set osd_backfill_scan_min 6400 ceph tell 'osd.*' config set osd_recovery_sleep_hdd 0
radosgw
ceph-deploy rgw create ${GWSERVERS}
check:
avmlp-osm-001 /home/ceph # ceph -s cluster: id: a3a799ce-f1d3-4230-a915-06e988fee767 health: HEALTH_WARN Degraded data redundancy: 188/564 objects degraded (33.333%), 18 pgs degraded too few PGs per OSD (3 < min 30) services: mon: 6 daemons, quorum avmlp-osm-001,avmlp-osm-002,avmlp-osm-003,avmlp-osm-004,avmlp-osm-005,avmlp-osm-006 (age 3h) mgr: avmlp-osm-002.ciberterminal.net(active, since 4h), standbys: avmlp-osm-004.ciberterminal.net, avmlp-osm-003.ciberterminal.net, avmlp-osm-001.ciberterminal.net, avmlp-osm-005.ciberterminal.net, avmlp-osm-006.ciberterminal.net osd: 20 osds: 20 up (since 4h), 20 in (since 4h) rgw: 4 daemons active (avmlp-osgw-001.ciberterminal.net, avmlp-osgw-002.ciberterminal.net, avmlp-osgw-003.ciberterminal.net, avmlp-osgw-004.ciberterminal.net) data: pools: 4 pools, 32 pgs objects: 188 objects, 2.0 KiB usage: 20 GiB used, 40 TiB / 40 TiB avail pgs: 188/564 objects degraded (33.333%) 18 active+undersized+degraded 14 active+undersized io: client: 60 KiB/s rd, 0 B/s wr, 68 op/s rd, 45 op/s wr avmlp-osm-001 /home/ceph # ceph osd pool ls detail pool 1 '.rgw.root' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 autoscale_mode warn last_change 85 flags hashpspool stripe_width 0 application rgw pool 2 'default.rgw.control' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 autoscale_mode warn last_change 87 flags hashpspool stripe_width 0 application rgw pool 3 'default.rgw.meta' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 autoscale_mode warn last_change 90 flags hashpspool stripe_width 0 application rgw pool 4 'default.rgw.log' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 autoscale_mode warn last_change 91 flags hashpspool stripe_width 0 application rgw
Especial configs:
cat >> ceph.conf<<EOF ########################################## # HYPERCEPH gateways config ########################################## [client.rgw.avmlp-osgw-001.ciberterminal.net] rgw_frontends = civetweb port=80 num_threads=512 rgw_num_rados_handles = 8 rgw_thread_pool_size = 512 rgw_cache_enabled = true rgw s3 auth aws4 force boto2 compat = false rgw resolve cname = true rgw dns name = clover.ciberterminal.net [client.rgw.avmlp-osgw-002.ciberterminal.net] rgw_frontends = civetweb port=80 num_threads=512 rgw_num_rados_handles = 8 rgw_thread_pool_size = 512 rgw_cache_enabled = true rgw s3 auth aws4 force boto2 compat = false rgw resolve cname = true rgw dns name = clover.ciberterminal.net [client.rgw.avmlp-osgw-003.ciberterminal.net] rgw_frontends = civetweb port=80 num_threads=512 rgw_num_rados_handles = 8 rgw_thread_pool_size = 512 rgw_cache_enabled = true rgw s3 auth aws4 force boto2 compat = false rgw resolve cname = true rgw dns name = clover.ciberterminal.net [client.rgw.avmlp-osgw-004.ciberterminal.net] rgw_frontends = civetweb port=80 num_threads=512 rgw_num_rados_handles = 8 rgw_thread_pool_size = 512 rgw_cache_enabled = true rgw s3 auth aws4 force boto2 compat = false rgw resolve cname = true rgw dns name = clover.ciberterminal.net EOF ceph-deploy --overwrite-conf config push ${ALLSERVERS} # deploy admin ceph-deploy admin ${GWSERVERS}
Restart gateways:
salt "avmlp-osgw-*" service.restart ceph-radosgw.target salt "avmlp-osgw-*" cmd.run "netstat -nap|egrep radosgw|egrep LISTEN"
Increase number of pg's per osd:
# check ceph tell 'osd.*' config get mon_max_pg_per_osd # change ceph tell 'osd.*' config set mon_max_pg_per_osd ${NEW_PG_PER_OSD_NUMBER} # check again ceph tell 'osd.*' config get mon_max_pg_per_osd
Change pool “size”:
for i in $(ceph osd pool ls) ; do ceph osd pool set ${i} size 4 ; done for i in $(ceph osd pool ls) ; do ceph osd pool set ${i} min_size 4 ; done for i in $(ceph osd pool ls) ; do ceph osd pool set ${i} pg_num 40 ; done for i in $(ceph osd pool ls) ; do ceph osd pool set ${i} pgp_num 40 ; done for i in $(ceph osd pool ls) ; do ceph osd pool set ${i} pg_autoscale_mode on ; done for i in $(ceph osd pool ls) ; do ceph osd pool set ${i} crush_rule ciberterminalRule ; done
Create data+index pool:
export POOL_NAME="default.rgw.buckets.data" ceph osd pool create ${POOL_NAME} 512 512 replicated ciberterminalRule ceph osd pool set ${POOL_NAME} pg_autoscale_mode on ceph osd pool application enable ${POOL_NAME} rgw ceph osd pool set ${POOL_NAME} compression_algorithm snappy ceph osd pool set ${POOL_NAME} compression_mode aggressive ceph osd pool set ${POOL_NAME} compression_min_blob_size 10240 ceph osd pool set ${POOL_NAME} compression_max_blob_size 4194304 export POOL_NAME="default.rgw.buckets.index" ceph osd pool create ${POOL_NAME} 128 128 replicated ciberterminalRule ceph osd pool set ${POOL_NAME} pg_autoscale_mode on ceph osd pool application enable ${POOL_NAME} rgw
Check CRUSH Maps
Now that there're pools with PG's, is time to check that the CRUSH map is ok:
ceph pg dump | egrep "^[0-9]" | awk '{print $17}'| less
avmlp-osm-001 /home/ceph # ceph pg dump | egrep "^[0-9]" | awk '{print $17}'| head dumped all [8,6,19,7] [19,15,18,0] [17,13,10,8] [6,0,7,13] [18,4,13,15] [0,6,15,11] [4,8,17,1] [3,5,6,10] [11,17,12,10] [13,15,6,2]
The PG's must be on both CPD's (even/odd OSD's).
Deploy MDS's
for i in ${MDSSERVERS} ; do ssh ${i} "sudo rm -fv /etc/ceph/ceph.conf" ; done ceph-deploy mds create ${MDSSERVERS} export POOL_NAME="cephfs_data-ftp" ceph osd pool create ${POOL_NAME} 256 256 replicated ciberterminalRule ceph osd pool set ${POOL_NAME} size 4 ceph osd pool set ${POOL_NAME} compression_algorithm snappy ceph osd pool set ${POOL_NAME} compression_mode aggressive ceph osd pool set ${POOL_NAME} compression_min_blob_size 10240 ceph osd pool set ${POOL_NAME} compression_max_blob_size 4194304 ceph osd pool set ${POOL_NAME} pg_autoscale_mode on export POOL_NAME="cephfs_metadata-ftp" ceph osd pool create ${POOL_NAME} 60 60 replicated ciberterminalRule ceph osd pool set ${POOL_NAME} size 4 ceph osd pool set ${POOL_NAME} pg_autoscale_mode on ceph fs new cephfs cephfs_metadata-ftp cephfs_data-ftp ceph fs ls ceph -s ceph mds stat
Create client:
export CLIENTNAME="avmlp-sftp" ceph auth get-or-create client.cephfs-${CLIENTNAME} mon "allow r" mds "allow r path=/, allow rw path=/ftp" osd "allow class-read object_prefix rbd_children, allow rw pool=cephfs_data-ftp" > /etc/ceph/client.cephfs-${CLIENTNAME}.keyring # ceph auth caps client.cephfs-avmlp-sftp mon "allow r" mds "allow r path=/, allow rw path=/ftp" osd "allow class-read object_prefix rbd_children, allow rw pool=cephfs_data-ftp"
radosgw User creation
export THEUSERNAME="dodger" radosgw-admin user create --uid="${THEUSERNAME}" --display-name="${THEUSERNAME} user" radosgw-admin subuser create --uid=${THEUSERNAME} --subuser=${THEUSERNAME} --access=full radosgw-admin key create --subuser=${THEUSERNAME}:swift --key-type=swift --gen-secret radosgw-admin user modify --uid=${THEUSERNAME} --max-buckets=1 radosgw-admin user modify --uid=${THEUSERNAME} --max-buckets=10
monitoring
export THEUSERNAME="centreon" sudo radosgw-admin user create --uid="${THEUSERNAME}" --display-name="${THEUSERNAME} user" sudo radosgw-admin user modify --uid=${THEUSERNAME} --max-buckets=1 export QUOTASCOPE="user" sudo radosgw-admin quota set --quota-scope=${QUOTASCOPE} --uid=${THEUSERNAME} --max-objects=10
Added mds's to centreon:
for ((x=1; x<5; x++)); do HOSTNAME="avmlp-osfs-0$(printf "%02d" ${x})" ; HOSTIP=$(dig ${HOSTNAME}.ciberterminal.net | egrep -v "^;|^$"| awk '{print $5}') ; centreon -u ${CENTUSER} -p ${CENTPASSWORD} -o HOST -a ADD -v "${HOSTNAME};${HOSTNAME}.ciberterminal.net;${HOSTIP};ciberterminal-CEPH-osfs;Central;CEPH-PRO" ; let x++ ; done for ((x=2; x<5; x++)); do HOSTNAME="avmlp-osfs-0$(printf "%02d" ${x})" ; HOSTIP=$(dig ${HOSTNAME}.ciberterminal.net | egrep -v "^;|^$"| awk '{print $5}') ; centreon -u ${CENTUSER} -p ${CENTPASSWORD} -o HOST -a ADD -v "${HOSTNAME};${HOSTNAME}.ciberterminal.net;${HOSTIP};ciberterminal-CEPH-osfs;Central2;CEPH-PRO" ; let x++ ; done for ((x=1; x<5; x++)); do HOSTNAME="avmlp-osfs-0$(printf "%02d" ${x})" ; echo centreon -u ${CENTUSER} -p ${CENTPASSWORD} -o HOST -a applytpl -v "${HOSTNAME}" ; done
Dashboard
# only for monitors export THESERVER="a*-osm-00*.ciberterminal.net" salt "${THESERVER}" pkg.install ceph-mgr-dashboard
ceph mgr module enable dashboard ceph dashboard create-self-signed-cert ceph mgr services ceph dashboard ac-user-create bofher **** administrator
radosgw-admin user create --uid=cephdashboard --display-name=cephdashboard --system ceph dashboard set-rgw-api-access-key $(cat access_cephdash) ceph dashboard set-rgw-api-secret-key $(cat secret_cephdash) ceph dashboard set-rgw-api-host ceph_endpoint.ciberterminal.net ceph dashboard set-rgw-api-port 80 ceph mgr module disable dashboard ceph mgr module enable dashboard
Enable pg_autoscaler
ceph mgr module enable pg_autoscaler
Additional modules
# only for monitors export THESERVER="a*-osm-00*.ciberterminal.net" for i in ceph-mgr-diskprediction-cloud.noarch ceph-mgr-diskprediction-local.noarch ceph-mgr-rook.noarch ceph-mgr-ssh.noarch ceph-medic ceph-grafana-dashboards.noarch ; do salt "${THESERVER}" pkg.install ${i} ; done
Maybe you'll need to performa a hard reset of the systemd service… If you see that:
acclm-osm-001 /home/ceph/ceph-deploy # systemctl start ceph-mgr@acclm-osm-001.ciberterminal.net.service Job for ceph-mgr@acclm-osm-001.ciberterminal.net.service failed because start of the service was attempted too often. See "systemctl status ceph-mgr@acclm-osm-001.ciberterminal.net.service" and "journalctl -xe" for details. To force a start use "systemctl reset-failed ceph-mgr@acclm-osm-001.ciberterminal.net.service" followed by "systemctl start ceph-mgr@acclm-osm-001.ciberterminal.net.service" again.
I used that line:
for i in 1 2 3 4 ; do salt "acclm-osm-00${i}*" cmd.run "systemctl reset-failed ceph-mgr@acclm-osm-00${i}.ciberterminal.net.service" ; salt "acclm-osm-00${i}*" cmd.run "systemctl start ceph-mgr@acclm-osm-00${i}.ciberterminal.net.service" ; done
linux/ceph/clover_deploy.txt · Last modified: 2022/02/11 11:36 by 127.0.0.1