-
Notifications
You must be signed in to change notification settings - Fork 20
Expand file tree
/
Copy pathcephadm
More file actions
executable file
·835 lines (714 loc) · 26.9 KB
/
cephadm
File metadata and controls
executable file
·835 lines (714 loc) · 26.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
#!/bin/bash
#
# lib/cephadm
# Functions to control the configuration
# and operation of the **Ceph** storage service
# when deployed using the cephadm tool
# ``stack.sh`` calls the entry points in this order:
#
# - pre_install_ceph
# - install_ceph
# - configure_ceph
# - init_ceph
# - cleanup_ceph # unstack || clean
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
# GENERIC CEPHADM INTERNAL OPTIONS, DO NOT EDIT
CEPH_RELEASE=${CEPH_RELEASE:-tentacle}
CEPH_PUB_KEY="/etc/ceph/ceph.pub"
CEPH_CONFIG="/etc/ceph/ceph.conf"
BOOTSTRAP_CONFIG="$HOME/bootstrap_ceph.conf"
CEPH_KEYRING="/etc/ceph/ceph.client.admin.keyring"
TARGET_BIN=/usr/bin
# TOGGLED IN THE CI TO SAVE RESOURCES
DISABLE_CEPHADM_POST_DEPLOY=${DISABLE_CEPHADM_POST_DEPLOY:-False}
# DEFAULT OPTIONS
ATTEMPTS=30
CONTAINER_IMAGE=${CONTAINER_IMAGE:-'quay.io/ceph/ceph:v20'}
DEVICES=()
if [[ "$REMOTE_CEPH" = "False" ]]; then
FSID=$(uuidgen)
else
FSID=$(cat $CEPH_CONFIG | grep fsid | awk 'BEGIN { RS = "fsid = "} ; { print $0 }' - )
fi
KEY_EXPORT_DIR="/etc/ceph"
KEYS=("client.openstack") # at least the client.openstack default key should be created
MIN_OSDS=1
SERVICES=()
SLEEP=5
CEPHADM_DEV_OSD=${CEPHADM_DEV_OSD:-"True"}
CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-30G}
TARGET_DEV_OSD_DIR=${TARGET_DEV_OSD_DIR:-"/opt/stack"}
# POOLS
DEFAULT_PG_NUM=${DEFAULT_PG_NUM:-8}
DEFAULT_PGP_NUM=${DEFAULT_PGP_NUM:-8}
# RGW OPTIONS
RGW_PORT=8080
# CLIENT CONFIG
CEPH_CLIENT_CONFIG=$HOME/ceph_client.conf
CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph}
# The resulting client config pointed by the other clients
CEPH_CONF_FILE=${CEPH_CONF_FILE:-$CEPH_CONF_DIR/ceph.conf}
# LOG(s) and EXPORTED CONFIG FILES
EXPORT=$HOME/ceph_export.yml
RBD_CLIENT_LOG=/var/log/ceph/qemu-guest-\$pid.log
MDS_LOG_FILE="$HOME/mds_log.conf"
MDS_LOGS=${MDS_LOGS:-"False"}
# MANILA DEFAULTS
MANILA_CEPH_USER=${MANILA_CEPH_USER:-manila}
# NFS OPTIONS: Only apply when ENABLE_CEPH_MANILA=True
# Whether or not cephadm should deploy/manage NFS-Ganesha? If set to False,
# we'll deploy a "standalone" NFS Ganesha instead, not managed by cephadm.
CEPHADM_DEPLOY_NFS=${CEPHADM_DEPLOY_NFS:-True}
# Clustered NFS Options
FSNAME=${FSNAME:-'cephfs'}
NFS_PORT=2049
CEPHFS_CLIENT=0
CEPHFS_CLIENT_NAME="client.$MANILA_CEPH_USER"
CEPHFS_CLIENT_LOG="/var/log/ceph-$CEPHFS_CLIENT_NAME.log"
CEPHFS_MULTIPLE_FILESYSTEMS=${CEPHFS_MULTIPLE_FILESYSTEMS:-False}
ENABLE_INGRESS=${ENABLE_INGRESS:-True}
VIP=${CEPH_INGRESS_IP:-$HOST_IP}
# GLANCE DEFAULTS
GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance}
GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images}
GLANCE_RGW_BACKEND=${GLANCE_RGW_BACKEND:-False}
# Cinder DEFAULTS
CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes}
CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder}
CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)}
# Enables new features such as Clone v2 API, which allows proper handling of
# deleting snapshots with child clone images.
CEPH_MIN_CLIENT_VERSION=${CEPH_MIN_CLIENT_VERSION:-mimic}
# Cinder Backup DEFAULTS
CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups}
CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak}
# Nova DEFAUTLS
NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms}
[ -z "$SUDO" ] && SUDO=sudo
## Admin
# Admin: enable debug mode
function set_debug {
if [ "$DEBUG" -eq 1 ]; then
echo "[CEPHADM] Enabling Debug mode"
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph config set mgr mgr/cephadm/log_to_cluster_level debug
echo "[CEPHADM] See debug logs running: ceph -W cephadm --watch-debug"
fi
}
function enable_verbose_mds_logging {
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph config set mds debug_mds 20
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph config set mds debug_ms 20
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph config set mds debug_client 20
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph config set mds log_to_file true
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph config set global mon_cluster_log_to_file true
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph config set global log_to_file true
touch "$MDS_LOG_FILE"
cat <<EOF > "$MDS_LOG_FILE"
LOG {
COMPONENTS {
ALL = FULL_DEBUG;
}
}
EOF
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -m $MDS_LOG_FILE -- ceph nfs cluster config set "$FSNAME" \
-i /mnt/mds_log.conf
}
# Admin: check ceph cluster status
function check_cluster_status {
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph -s -f json-pretty
}
# Admin: export ceph cluster config spec
function export_spec {
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph orch ls --export > "$EXPORT"
echo "Ceph cluster config exported: $EXPORT"
}
# Pre-install ceph: install required dependencies
function install_deps {
if [[ "$REMOTE_CEPH" = "False" ]]; then
install_package python3-cephfs python3-prettytable python3-rados python3-rbd python3-requests
fi
}
# Pre-install ceph: get cephadm binary
function get_cephadm {
# NOTE(gouthamr): cephadm binary here is a python executable, and the
# $os_PACKAGE ("rpm") doesn't really matter. There is no ubuntu/debian
# equivalent being published by the ceph community.
os_release="el9"
ceph_version=$(_get_ceph_version)
case $CEPH_RELEASE in
pacific|octopus)
os_release="el8";;
esac
curl -f -O https://download.ceph.com/rpm-${ceph_version}/${os_release}/noarch/cephadm
$SUDO mv cephadm $TARGET_BIN
$SUDO chmod +x $TARGET_BIN/cephadm
echo "[GET CEPHADM] cephadm is now available"
if [ -z "$CEPHADM" ]; then
CEPHADM=${TARGET_BIN}/cephadm
fi
}
# Pre-install ceph: bootstrap config
function bootstrap_config {
cat <<EOF > "$BOOTSTRAP_CONFIG"
[global]
log to file = true
osd crush chooseleaf type = 0
osd_pool_default_size = 1
[mon]
mon_warn_on_pool_no_redundancy = False
[osd]
osd_memory_target_autotune = true
osd_numa_auto_affinity = true
[mgr]
mgr/cephadm/autotune_memory_target_ratio = 0.2
EOF
}
## Install
# Install ceph: run cephadm bootstrap
function start_ceph {
cluster=$(sudo cephadm ls | jq '.[]' | jq 'select(.name | test("^mon*")).fsid')
if [ -z "$cluster" ]; then
$SUDO "$CEPHADM" --image "$CONTAINER_IMAGE" \
bootstrap \
--fsid $FSID \
--config "$BOOTSTRAP_CONFIG" \
--output-config $CEPH_CONFIG \
--output-keyring $CEPH_KEYRING \
--output-pub-ssh-key $CEPH_PUB_KEY \
--allow-overwrite \
--allow-fqdn-hostname \
--skip-monitoring-stack \
--skip-dashboard \
--single-host-defaults \
--skip-firewalld \
--skip-mon-network \
--mon-ip "$HOST_IP"
test -e $CEPH_CONFIG
test -e $CEPH_KEYRING
if [ "$CEPHADM_DEV_OSD" == 'True' ]; then
create_osd_dev
fi
# Wait cephadm backend to be operational
# and add osds via drivegroups
sleep "$SLEEP"
add_osds
fi
}
# Install ceph: create a loopback device to be used as osd
function create_osd_dev {
sudo dd if=/dev/zero of=$TARGET_DEV_OSD_DIR/ceph-osd.img bs=1 count=0 seek="$CEPH_LOOPBACK_DISK_SIZE"
osd_dev=$(sudo losetup -f --show $TARGET_DEV_OSD_DIR/ceph-osd.img)
sudo pvcreate $osd_dev
sudo vgcreate ceph_vg $osd_dev
sudo lvcreate -n ceph_lv_data -l +100%FREE ceph_vg
DEVICES+=("/dev/ceph_vg/ceph_lv_data")
}
# cleanup ceph: delete the osd file and release the loopback device
function delete_osd_dev {
if [ $(sudo lvs --noheadings -o lv_path -S lv_name=ceph_lv_data) ]; then
sudo lvremove --force /dev/ceph_vg/ceph_lv_data
sudo vgremove --force ceph_vg
osd_dev=$(sudo losetup -j $TARGET_DEV_OSD_DIR/ceph-osd.img -l -n -O NAME)
sudo pvremove --force $osd_dev
sudo losetup -d $osd_dev
sudo rm -f $TARGET_DEV_OSD_DIR/ceph-osd.img
sudo partprobe
DEVICES=()
fi
}
# Install ceph: add osds
function add_osds {
# let's add some osds
if [ -z "$DEVICES" ]; then
echo "Using ALL available devices"
$SUDO "$CEPHADM" shell ceph orch apply osd --all-available-devices
else
for item in "${DEVICES[@]}"; do
echo "Creating osd $item on node $HOSTNAME"
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph orch daemon add osd "$HOSTNAME:$item"
done
fi
while [ "$ATTEMPTS" -ne 0 ]; do
num_osds=$($SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph -s -f json | jq '.osdmap | .num_up_osds')
if [ "$num_osds" -ge "$MIN_OSDS" ]; then
break;
fi
ATTEMPTS=$(("$ATTEMPTS" - 1))
sleep 1
done
echo "[CEPHADM] OSD(s) deployed: $num_osds"
# [ "$num_osds" -lt "$MIN_OSDS" ] && exit 255
}
# Install ceph: create and enable pools
function add_pools {
[ "${#POOLS[@]}" -eq 0 ] && return;
for pool in "${POOLS[@]}"; do
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph osd pool create "$pool" "$DEFAULT_PG_NUM" \
"$DEFAULT_PGP_NUM" replicated --autoscale-mode on
# set the application to the pool (which also means rbd init the pool)
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph osd pool application enable "$pool" rbd
done
}
# Utility: build caps according to the generated pools
function build_caps {
local CAPS=""
for pool in "${POOLS[@]}"; do
caps="allow rwx pool="$pool
CAPS+=$caps,
done
echo "${CAPS::-1}"
}
# Install ceph: create a keyring
function _create_key {
local name=$1
local caps
local osd_caps
if [ "${#POOLS[@]}" -eq 0 ]; then
osd_caps="allow *"
else
caps=$(build_caps)
osd_caps="allow class-read object_prefix rbd_children, $caps"
fi
$SUDO "$CEPHADM" shell -v "$KEY_EXPORT_DIR:$KEY_EXPORT_DIR" --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph auth get-or-create "$name" mgr "allow rw" mon "allow r" osd "$osd_caps" \
-o "$KEY_EXPORT_DIR/ceph.$name.keyring"
$SUDO chown ${STACK_USER}:$(id -g -n $whoami) \
${CEPH_CONF_DIR}/ceph.$name.keyring
}
# Install ceph: create one or more keyrings
function create_keys {
for key_name in "${KEYS[@]}"; do
echo "Creating key $key_name"
_create_key "$key_name"
done
}
# Install ceph: add MDS
function cephfs_config {
# Two pools are generated by this action
# - cephfs.$FSNAME.data
# - cephfs.$FSNAME.meta
# and the mds daemon is deployed
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph fs volume create "$FSNAME"
}
# Get Ceph version
function _get_ceph_version {
local ceph_version_str
ceph_version_str=$(sudo podman run --rm --entrypoint ceph $CONTAINER_IMAGE \
--version | awk '{ print $3 }')
echo $ceph_version_str
}
function _install_and_configure_clustered_nfs {
local ceph_version
ceph_version=$(_get_ceph_version)
echo "[CEPHADM] Deploy nfs.$FSNAME backend"
if [[ "${ceph_version%%\.*}" -ge 18 && $ENABLE_INGRESS == "True" ]]; then
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph nfs cluster create \
"$FSNAME" "$HOSTNAME" --port $NFS_PORT --ingress \
--ingress-mode haproxy-protocol --virtual_ip $VIP
else
echo "[CEPHADM] Ingress service is not deployed \
to preserve the ability to apply client restrictions."
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph nfs cluster create \
"$FSNAME" "$HOSTNAME" --port $NFS_PORT
fi
}
function _install_and_configure_standalone_nfs {
source $CEPH_PLUGIN_DIR/lib/nfs-ganesha
install_nfs_ganesha
configure_nfs_ganesha
start_nfs_ganesha
}
# Install ceph: add NFS
function ceph_nfs_config {
if [[ "$CEPHADM_DEPLOY_NFS" == "True" ]]; then
_install_and_configure_clustered_nfs
else
_install_and_configure_standalone_nfs
fi
}
function _create_swift_endpoint {
local swift_service
swift_service=$(get_or_create_service "swift" "object-store" "Swift Service")
local swift_endpoint
swift_endpoint="$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:${CEPH_RGW_PORT}/swift/v1"
get_or_create_endpoint $swift_service \
"$REGION_NAME" $swift_endpoint $swift_endpoint $swift_endpoint
}
# RGW pre config
function configure_ceph_embedded_rgw {
# keystone endpoint for radosgw
_create_swift_endpoint
# Create radosgw service user with admin privileges
create_service_user "radosgw" "admin"
if [ "$CEPH_RGW_KEYSTONE_SSL" = "True" ]; then
# radosgw needs to access keystone's revocation list
sudo mkdir -p ${dest}/nss
sudo openssl x509 -in /etc/keystone/ssl/certs/ca.pem -pubkey | \
sudo certutil -d ${dest}/nss -A -n ca -t "TCu,Cu,Tuw"
sudo openssl x509 -in /etc/keystone/ssl/certs/signing_cert.pem -pubkey | \
sudo certutil -A -d ${dest}/nss -n signing_cert -t "P,P,P"
fi
}
# General Ceph utility to set config options within the monitor's config database
function set_config_key {
local section=$1
local key=$2
local value=$3
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
ceph config set ${section} ${key} ${value}
}
# RGW config keys: no iniset anymore, everything is pushed as mgr key/value
function configure_rgw_ceph_section {
# RGW KEYSTONE KEYS
declare -A RGW_CONFIG_KEYS
RGW_CONFIG_KEYS=(['rgw_keystone_api_version']=3
['rgw_keystone_url']="$KEYSTONE_SERVICE_URI"
['rgw_keystone_accepted_roles']="member, _member_, Member, admin"
['rgw_keystone_accepted_admin_roles']="ResellerAdmin"
['rgw_keystone_admin_domain']="$SERVICE_DOMAIN_NAME"
['rgw_keystone_admin_project']="$SERVICE_PROJECT_NAME"
['rgw_keystone_admin_user']="radosgw"
['rgw_s3_auth_use_keystone']="true"
['rgw_keystone_admin_password']="$SERVICE_PASSWORD"
['rgw_keystone_verify_ssl']="false"
['rgw_keystone_implicit_tenants']="true"
['rgw_swift_versioning_enabled']="true"
['rgw_swift_enforce_content_length']="true"
['rgw_swift_account_in_url']="true"
['rgw_trust_forwarded_https']="true"
['rgw_max_attr_name_len']=128
['rgw_max_attrs_num_in_req']=90
['rgw_max_attr_size']=256
)
for k in ${!RGW_CONFIG_KEYS[@]}; do
set_config_key "global" ${k} ${RGW_CONFIG_KEYS[$k]}
done
}
# Deploy rbd-mirror on the current site
#
# Note:
#
# One-way Replication
# - When data is mirrored from a primary cluster to a secondary cluster,
# the rbd-mirror daemon runs only on the secondary cluster.
#
# Two-way Replication
# - When data is mirrored from a primary cluster to a secondary cluster,
# but data can be promoted/demoted (switching the role of primary and
# secondary cluster), the rbd-mirror daemon runs on both clusters.
function rbd_mirror {
$SUDO "$CEPHADM" shell --fsid "$FSID" --config "$CEPH_CONFIG" \
--keyring $CEPH_KEYRING -- ceph orch apply rbd-mirror \
"--placement=$HOSTNAME count:1"
}
# Install ceph: add RGW
function rgw {
configure_ceph_embedded_rgw
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph orch apply rgw default default default default \
"--placement=$HOSTNAME count:1" --port "$RGW_PORT"
configure_rgw_ceph_section
}
# TODO: (fpantano) Remove this hack
function start_ceph_embedded_rgw {
# noop
:
}
# Configure cephfs and ceph_nfs
function configure_ceph_manila {
# Deploy mds and configure cephfs
cephfs_config
# Deploy and configure ganesha
[ $MANILA_CEPH_DRIVER == 'cephfsnfs' ] && ceph_nfs_config
# Add manila keys to the list
KEYS+=("client.$MANILA_CEPH_USER")
}
# Install ceph: services deployment
function enable_services {
for item in "${SERVICES[@]}"; do
case "$item" in
cephfs|CEPHFS)
echo "[CEPHADM] Config cephfs volume on node $HOSTNAME"
cephfs_config
CEPHFS_CLIENT=1
;;
nfs|NFS)
echo "[CEPHADM] Deploying NFS on node $HOSTNAME"
ceph_nfs_config
CEPHFS_CLIENT=1
;;
rgw|RGW)
echo "[CEPHADM] Deploying RGW on node $HOSTNAME"
rgw
;;
rbdmirror|RBDMIRROR)
echo "[CEPHADM] Deploying RBD-MIRROR on node $HOSTNAME"
rbd_mirror
;;
esac
done
}
# Install ceph: client config
function client_config {
echo "Dump the minimal ceph.conf"
cp $CEPH_CONFIG "$CEPH_CLIENT_CONFIG"
cat >> "$CEPH_CLIENT_CONFIG" <<-EOF
[client.libvirt]
admin socket = /var/run/ceph/\$cluster-\$type.\$id.\$pid.\$cctid.asok
log file = $RBD_CLIENT_LOG
EOF
if [ "$CEPHFS_CLIENT" -eq 1 ]; then
cat >> "$CEPH_CLIENT_CONFIG" <<-EOF
[$CEPHFS_CLIENT_NAME]
client mount uid = 0
client mount gid = 0
log file = $CEPHFS_CLIENT_LOG
admin socket = /var/run/ceph/\$cluster-\$type.\$id.\$pid.\$cctid.asok
keyring = $KEY_EXPORT_DIR/ceph.$CEPHFS_CLIENT_NAME.keyring
EOF
echo "Client config exported: $CEPH_CLIENT_CONFIG"
fi
# Nova resolves the keyring using the pattern $cluster.conf
# For this reason we need to override the content of the
# generated (minimal) ceph.conf with the client part.
$SUDO cp $CEPH_CLIENT_CONFIG $CEPH_CONF_FILE
}
## Remove ceph
# Remove ceph: remove cluster and zap osds
function stop_ceph {
local cluster_deleted
local timeout
if ! [ -x "$CEPHADM" ]; then
get_cephadm
CEPHADM=${TARGET_BIN}/cephadm
fi
cluster_deleted=0
timeout=3
while : ; do
CLUSTER_FSID=$(sudo cephadm ls | jq '.[]' | jq 'select(.name | test("^mon*")).fsid' | tr -d \")
if [[ -n "$CLUSTER_FSID" ]]; then
sudo cephadm rm-cluster --zap-osds --fsid $CLUSTER_FSID --force
else
cluster_deleted=1
echo "[CEPHADM] Cluster deleted"
fi
$(( timeout-- ))
[[ "$cluster_deleted" -eq 0 && "$timeout" -eq 0 ]] && \
echo "[CEPHADM] Error deleting the cluster" && exit 255
[[ "$cluster_deleted" -eq 1 || "$timeout" -eq 0 ]] && break
done
}
## devstack-plugin-ceph functions
function pre_install_ceph {
# Check dependencies for the service.
install_deps
}
function install_ceph {
# Install the service.
bootstrap_config
get_cephadm
start_ceph
}
function config_glance {
if [[ "$GLANCE_RGW_BACKEND" = "True" && "$ENABLE_CEPH_RGW" = "True" ]]; then
# common glance accounts for swift
create_service_user "glance-swift" "ResellerAdmin"
iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_PROJECT_NAME:glance-swift
AUTH_URL=$KEYSTONE_SERVICE_URI/v$CEPH_RGW_IDENTITY_API_VERSION
iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD
iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $AUTH_URL
iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_name $SERVICE_DOMAIN_NAME
iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_name $SERVICE_DOMAIN_NAME
iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version $CEPH_RGW_IDENTITY_API_VERSION
if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then
# NOTE(abhishekk): As this is all in one setup there will be only
# one swift instance available even if glance multiple store is enabled.
# We are assuming the store name as `swift_store`.
iniset $GLANCE_API_CONF glance_store default_backend "swift_store"
iniset $GLANCE_API_CONF "swift_store" swift_store_create_container_on_put True
iniset $GLANCE_API_CONF "swift_store" swift_store_config_file $GLANCE_SWIFT_STORE_CONF
iniset $GLANCE_API_CONF "swift_store" default_swift_reference ref1
else
iniset $GLANCE_API_CONF glance_store default_store swift
iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True
iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF
iniset $GLANCE_API_CONF glance_store default_swift_reference ref1
iniset $GLANCE_API_CONF glance_store stores "file, http, swift"
fi
else
iniset $GLANCE_API_CONF DEFAULT show_multiple_locations True
if [[ "$GLANCE_ENABLE_MULTIPLE_STORES" == "True" ]]; then
# NOTE(abhishekk): As this is all in one setup there will be only
# one rbd instance available even if glance multiple store is enabled.
# We are assuming the store name as `robust_rbd`. Also we will set another
# file store 'fast' along with it as old setup also used to configure file
# store when rbd is enabled.
iniset $GLANCE_API_CONF DEFAULT enabled_backends "robust_rbd:rbd,fast:file"
iniset $GLANCE_API_CONF glance_store default_backend robust_rbd
iniset $GLANCE_API_CONF "robust_rbd" rbd_store_ceph_conf $CEPH_CONF_FILE
iniset $GLANCE_API_CONF "robust_rbd" rbd_store_user $GLANCE_CEPH_USER
iniset $GLANCE_API_CONF "robust_rbd" rbd_store_pool $GLANCE_CEPH_POOL
else
iniset $GLANCE_API_CONF glance_store default_store rbd
iniset $GLANCE_API_CONF glance_store stores "file, http, rbd"
iniset $GLANCE_API_CONF glance_store rbd_store_ceph_conf $CEPH_CONF_FILE
iniset $GLANCE_API_CONF glance_store rbd_store_user $GLANCE_CEPH_USER
iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL
fi
fi
}
function config_nova {
iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER}
iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID}
iniset $NOVA_CONF libvirt inject_key false
iniset $NOVA_CONF libvirt inject_partition -2
iniset $NOVA_CONF libvirt disk_cachemodes "network=writeback"
iniset $NOVA_CONF libvirt images_type rbd
iniset $NOVA_CONF libvirt images_rbd_pool ${NOVA_CEPH_POOL}
iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE}
}
function set_min_client_version {
if [ ! -z "$CEPH_MIN_CLIENT_VERSION" ]; then
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph osd set-require-min-compat-client ${CEPH_MIN_CLIENT_VERSION}
fi
}
function set_memory_config {
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph config get osd osd_memory_target
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph config set osd osd_memory_target \
2147483648
}
# import_libvirt_secret_ceph() - Imports Cinder user key into libvirt
# so it can connect to the Ceph cluster while attaching a Cinder block device
function import_libvirt_secret_ceph {
[ "$ENABLE_CEPH_NOVA" == "False" ] && return;
NOVA_VIRSH_SECRET=$($SUDO cat ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring | awk '/key/ {print $3}')
cat <<EOF | sudo tee secret.xml>/dev/null
<secret ephemeral='no' private='no'>
<uuid>${CINDER_CEPH_UUID}</uuid>
<usage type='ceph'>
<name>client.${CINDER_CEPH_USER} secret</name>
</usage>
</secret>
EOF
$SUDO virsh secret-define --file secret.xml # 2>/dev/null
$SUDO virsh secret-set-value --secret ${CINDER_CEPH_UUID} \
--base64 ${NOVA_VIRSH_SECRET} # 2>/dev/null
$SUDO rm -f secret.xml
}
# _undefine_virsh_secret() - Undefine Cinder key secret from libvirt
function _undefine_virsh_secret {
local virsh_uuid
virsh_uuid=$($SUDO virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
echo $virsh_uuid
$SUDO virsh secret-undefine ${virsh_uuid} &>/dev/null
}
function configure_ceph {
if is_ceph_enabled_for_service manila; then
SERVICES+=('cephfs')
KEYS+=("client.$MANILA_CEPH_USER")
fi
[ "$MANILA_CEPH_DRIVER" == "cephfsnfs" ] && SERVICES+=('nfs')
if is_ceph_enabled_for_service glance; then
POOLS+=($GLANCE_CEPH_POOL)
KEYS+=("client.$GLANCE_CEPH_USER")
config_glance
fi
if is_ceph_enabled_for_service cinder; then
POOLS+=($CINDER_CEPH_POOL)
KEYS+=("client.$CINDER_CEPH_USER")
fi
if is_ceph_enabled_for_service c-bak; then
POOLS+=($CINDER_BAK_CEPH_POOL)
KEYS+=("client.$CINDER_BAK_CEPH_USER")
fi
if is_ceph_enabled_for_service nova; then
POOLS+=($NOVA_CEPH_POOL)
KEYS+=("client.$CINDER_CEPH_USER")
config_nova
fi
[ "$ENABLE_CEPH_RGW" == "True" ] && SERVICES+=('rgw')
[ "$ENABLE_CEPH_RBD_MIRROR" == "True" ] && SERVICES+=('rbdmirror')
enable_services
if [[ "$REMOTE_CEPH" = "False" ]]; then
add_pools
create_keys
fi
client_config
import_libvirt_secret_ceph
if [[ "$DISABLE_CEPHADM_POST_DEPLOY" == "True" ]]; then
disable_cephadm
fi
}
# Hack: remove this function at some point
function configure_ceph_manila {
# noop
:
}
function cleanup_ceph {
# Cleanup the service.
if [[ "$REMOTE_CEPH" == "True" ]]; then
echo "Remote Ceph cluster, skipping stop_ceph and delete_osd_dev"
else
stop_ceph
delete_osd_dev
fi
# purge ceph config file and keys
$SUDO rm -f ${CEPH_CONF_DIR}/*
if is_ceph_enabled_for_service nova; then
_undefine_virsh_secret
fi
if [[ "$CEPHADM_DEPLOY_NFS" != "True" ]]; then
stop_nfs_ganesha
cleanup_nfs_ganesha
cleanup_repo_nfs_ganesha
fi
}
function disable_cephadm {
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph orch set backend
$SUDO "$CEPHADM" shell --fsid $FSID --config $CEPH_CONFIG \
--keyring $CEPH_KEYRING -- ceph mgr module disable cephadm
}
# is_ceph_enabled_for_service() - checks whether the OpenStack service
# specified as an argument is enabled with Ceph as its storage backend.
function is_ceph_enabled_for_service {
local config config_name enabled service
enabled=1
service=$1
# Construct the global variable ENABLE_CEPH_.* corresponding to a
# $service.
config_name=ENABLE_CEPH_$(echo $service | \
tr '[:lower:]' '[:upper:]' | tr '-' '_')
config=$(eval echo "\$$config_name")
if (is_service_enabled $service) && [[ $config == 'True' ]]; then
enabled=0
fi
return $enabled
}
# Restore xtrace
$XTRACE
# Tell emacs to use shell-script-mode
## Local variables:
## mode: shell-script
## End: