6 if [ -n "$VSTART_DEST" ]; then
8 SRC_PATH=`(cd $SRC_PATH; pwd)`
14 CEPH_CONF_PATH=$VSTART_DEST
15 CEPH_DEV_DIR=$VSTART_DEST/dev
16 CEPH_OUT_DIR=$VSTART_DEST/out
19 # for running out of the CMake build directory
20 if [ -e CMakeCache.txt ]; then
21 # Out of tree build, learn source location from CMakeCache.txt
22 CEPH_ROOT=`grep ceph_SOURCE_DIR CMakeCache.txt | cut -d "=" -f 2`
24 [ -z "$MGR_PYTHON_PATH" ] && MGR_PYTHON_PATH=$CEPH_ROOT/src/pybind/mgr
27 # use CEPH_BUILD_ROOT to vstart from a 'make install'
28 if [ -n "$CEPH_BUILD_ROOT" ]; then
29 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_ROOT/bin
30 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_ROOT/lib
31 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB/erasure-code
32 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB/rados-classes
33 elif [ -n "$CEPH_ROOT" ]; then
34 [ -z "$PYBIND" ] && PYBIND=$CEPH_ROOT/src/pybind
35 [ -z "$CEPH_BIN" ] && CEPH_BIN=$CEPH_BUILD_DIR/bin
36 [ -z "$CEPH_ADM" ] && CEPH_ADM=$CEPH_BIN/ceph
37 [ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BUILD_DIR/bin/init-ceph
38 [ -z "$CEPH_LIB" ] && CEPH_LIB=$CEPH_BUILD_DIR/lib
39 [ -z "$OBJCLASS_PATH" ] && OBJCLASS_PATH=$CEPH_LIB
40 [ -z "$EC_PATH" ] && EC_PATH=$CEPH_LIB
43 if [ -z "${CEPH_VSTART_WRAPPER}" ]; then
47 [ -z "$PYBIND" ] && PYBIND=./pybind
49 export PYTHONPATH=$PYBIND:$CEPH_LIB/cython_modules/lib.2:$PYTHONPATH
50 export LD_LIBRARY_PATH=$CEPH_LIB:$LD_LIBRARY_PATH
51 export DYLD_LIBRARY_PATH=$CEPH_LIB:$DYLD_LIBRARY_PATH
52 # Suppress logging for regular use that indicated that we are using a
53 # development version. vstart.sh is only used during testing and
57 [ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON="$MON"
58 [ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD="$OSD"
59 [ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS="$MDS"
60 [ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR="$MGR"
61 [ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS="$FS"
62 [ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW="$RGW"
64 # if none of the CEPH_NUM_* number is specified, kill the existing
66 if [ -z "$CEPH_NUM_MON" -a \
67 -z "$CEPH_NUM_OSD" -a \
68 -z "$CEPH_NUM_MDS" -a \
69 -z "$CEPH_NUM_MGR" ]; then
75 [ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON=3
76 [ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD=3
77 [ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS=3
78 [ -z "$CEPH_NUM_MGR" ] && CEPH_NUM_MGR=1
79 [ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS=1
80 [ -z "$CEPH_MAX_MDS" ] && CEPH_MAX_MDS=1
81 [ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW=0
83 [ -z "$CEPH_DIR" ] && CEPH_DIR="$PWD"
84 [ -z "$CEPH_DEV_DIR" ] && CEPH_DEV_DIR="$CEPH_DIR/dev"
85 [ -z "$CEPH_OUT_DIR" ] && CEPH_OUT_DIR="$CEPH_DIR/out"
86 [ -z "$CEPH_RGW_PORT" ] && CEPH_RGW_PORT=8000
87 [ -z "$CEPH_CONF_PATH" ] && CEPH_CONF_PATH=$CEPH_DIR
89 if [ $CEPH_NUM_OSD -gt 3 ]; then
90 OSD_POOL_DEFAULT_SIZE=3
92 OSD_POOL_DEFAULT_SIZE=$CEPH_NUM_OSD
106 cephx=1 #turn cephx on by default
110 rgw_frontend="civetweb"
112 lockdep=${LOCKDEP:-1}
116 VSTART_SEC="client.vstart.sh"
122 conf_fn="$CEPH_CONF_PATH/ceph.conf"
123 keyring_fn="$CEPH_CONF_PATH/keyring"
124 osdmap_fn="/tmp/ceph_osdmap.$$"
125 monmap_fn="/tmp/ceph_monmap.$$"
127 usage="usage: $0 [option]... \nex: $0 -n -d --mon_num 3 --osd_num 3 --mds_num 1 --rgw_num 1\n"
128 usage=$usage"options:\n"
129 usage=$usage"\t-d, --debug\n"
130 usage=$usage"\t-s, --standby_mds: Generate standby-replay MDS for each active\n"
131 usage=$usage"\t-l, --localhost: use localhost instead of hostname\n"
132 usage=$usage"\t-i <ip>: bind to specific ip\n"
133 usage=$usage"\t-n, --new\n"
134 usage=$usage"\t-N, --not-new: reuse existing cluster config (default)\n"
135 usage=$usage"\t--valgrind[_{osd,mds,mon,rgw}] 'toolname args...'\n"
136 usage=$usage"\t--nodaemon: use ceph-run as wrapper for mon/osd/mds\n"
137 usage=$usage"\t--smallmds: limit mds cache size\n"
138 usage=$usage"\t-m ip:port\t\tspecify monitor address\n"
139 usage=$usage"\t-k keep old configuration files\n"
140 usage=$usage"\t-x enable cephx (on by default)\n"
141 usage=$usage"\t-X disable cephx\n"
142 usage=$usage"\t--hitset <pool> <hit_set_type>: enable hitset tracking\n"
143 usage=$usage"\t-e : create an erasure pool\n";
144 usage=$usage"\t-o config\t\t add extra config parameters to all sections\n"
145 usage=$usage"\t--mon_num specify ceph monitor count\n"
146 usage=$usage"\t--osd_num specify ceph osd count\n"
147 usage=$usage"\t--mds_num specify ceph mds count\n"
148 usage=$usage"\t--rgw_num specify ceph rgw count\n"
149 usage=$usage"\t--mgr_num specify ceph mgr count\n"
150 usage=$usage"\t--rgw_port specify ceph rgw http listen port\n"
151 usage=$usage"\t--rgw_frontend specify the rgw frontend configuration\n"
152 usage=$usage"\t--rgw_compression specify the rgw compression plugin\n"
153 usage=$usage"\t-b, --bluestore use bluestore as the osd objectstore backend\n"
154 usage=$usage"\t--memstore use memstore as the osd objectstore backend\n"
155 usage=$usage"\t--cache <pool>: enable cache tiering on pool\n"
156 usage=$usage"\t--short: short object names only; necessary for ext4 dev\n"
157 usage=$usage"\t--nolockdep disable lockdep\n"
158 usage=$usage"\t--multimds <count> allow multimds with maximum active count\n"
165 while [ $# -ge 1 ]; do
177 [ -z "$2" ] && usage_exit
194 [ -z "$2" ] && usage_exit
203 [ -z "$2" ] && usage_exit
208 [ -z "$2" ] && usage_exit
213 [ -z "$2" ] && usage_exit
218 [ -z "$2" ] && usage_exit
223 [ -z "$2" ] && usage_exit
271 [ -z "$2" ] && usage_exit
276 cephx=1 # this is on be default, flag exists for historical consistency
282 if [ ! -r $conf_fn ]; then
283 echo "cannot use old configuration: $conf_fn not readable." >&2
295 hitset="$hitset $2 $3"
300 extra_conf="$extra_conf $2
305 if [ -z "$cache" ]; then
325 if [ $kill_all -eq 1 ]; then
326 $SUDO $INIT_CEPH stop
329 if [ "$overwrite_conf" -eq 0 ]; then
330 CEPH_ASOK_DIR=`dirname $($CEPH_BIN/ceph-conf --show-config-value admin_socket)`
331 mkdir -p $CEPH_ASOK_DIR
332 MON=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_mon 2>/dev/null` && \
334 OSD=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_osd 2>/dev/null` && \
336 MDS=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_mds 2>/dev/null` && \
338 MGR=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_mgr 2>/dev/null` && \
340 RGW=`$CEPH_BIN/ceph-conf -c $conf_fn --name $VSTART_SEC num_rgw 2>/dev/null` && \
343 if [ "$new" -ne 0 ]; then
345 asok_dir=`dirname $($CEPH_BIN/ceph-conf --show-config-value admin_socket)`
346 if [ $asok_dir != /var/run/ceph ]; then
347 [ -d $asok_dir ] && rm -f $asok_dir/* && rmdir $asok_dir
349 if [ -z "$CEPH_ASOK_DIR" ]; then
350 CEPH_ASOK_DIR=`mktemp -u -d "${TMPDIR:-/tmp}/ceph-asok.XXXXXX"`
352 [ -e "$conf_fn" ] && rm -- "$conf_fn"
354 CEPH_ASOK_DIR=`dirname $($CEPH_BIN/ceph-conf --show-config-value admin_socket)`
355 # -k is implied... (doesn't make sense otherwise)
375 eval "valg=\$valgrind_$type"
376 [ -z "$valg" ] && valg="$valgrind"
378 if [ -n "$valg" ]; then
379 prunb valgrind --tool="$valg" $valgrind_args "$@" -f
382 if [ "$nodaemon" -eq 0 ]; then
385 prunb ./ceph-run "$@" -f
391 if [ "$overwrite_conf" -eq 1 ]; then
398 log file = $CEPH_OUT_DIR/\$name.log
399 admin socket = $CEPH_ASOK_DIR/\$name.asok
401 pid file = $CEPH_OUT_DIR/\$name.pid
402 heartbeat file = $CEPH_OUT_DIR/\$name.heartbeat
406 ; generated by vstart.sh on `date`
408 num mon = $CEPH_NUM_MON
409 num osd = $CEPH_NUM_OSD
410 num mds = $CEPH_NUM_MDS
411 num mgr = $CEPH_NUM_MGR
412 num rgw = $CEPH_NUM_RGW
417 osd pgp bits = 5 ; (invalid, but ceph should cope!)
418 osd pool default size = $OSD_POOL_DEFAULT_SIZE
419 osd crush chooseleaf type = 0
420 osd pool default min size = 1
421 osd failsafe full ratio = .99
422 mon osd nearfull ratio = .99
423 mon osd backfillfull ratio = .99
424 mon osd reporter subtree level = osd
425 mon osd full ratio = .99
426 mon data avail warn = 2
427 mon data avail crit = 1
428 erasure code dir = $EC_PATH
429 plugin dir = $CEPH_LIB
430 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
431 rgw frontends = $rgw_frontend port=$CEPH_RGW_PORT
433 rgw crypt s3 kms encryption keys = testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
434 rgw crypt require ssl = false
435 rgw lc debug interval = 10
436 filestore fd cache size = 32
437 run dir = $CEPH_OUT_DIR
438 enable experimental unrecoverable data corrupting features = *
440 if [ "$lockdep" -eq 1 ] ; then
445 if [ "$cephx" -eq 1 ] ; then
447 auth cluster required = cephx
448 auth service required = cephx
449 auth client required = cephx
453 auth cluster required = none
454 auth service required = none
455 auth client required = none
458 if [ "$short" -eq 1 ]; then
459 COSDSHORT=" osd max object name len = 460
460 osd max object namespace len = 64"
464 keyring = $keyring_fn
465 log file = $CEPH_OUT_DIR/\$name.\$pid.log
466 admin socket = $CEPH_ASOK_DIR/\$name.\$pid.asok
473 mds debug frag = true
474 mds debug auth pins = true
475 mds debug subtrees = true
476 mds data = $CEPH_DEV_DIR/mds.\$id
477 mds root ino uid = `id -u`
478 mds root ino gid = `id -g`
481 mgr data = $CEPH_DEV_DIR/mgr.\$id
482 mgr module path = $MGR_PYTHON_PATH
483 mon reweight min pgs per osd = 4
484 mon pg warn min per osd = 3
490 osd_check_max_object_name_len_on_startup = false
491 osd data = $CEPH_DEV_DIR/osd\$id
492 osd journal = $CEPH_DEV_DIR/osd\$id/journal
493 osd journal size = 100
495 osd class dir = $OBJCLASS_PATH
496 osd class load list = *
497 osd class default list = *
498 osd scrub load threshold = 2000.0
499 osd debug op order = true
500 osd debug misdirected ops = true
501 filestore wbthrottle xfs ios start flusher = 10
502 filestore wbthrottle xfs ios hard limit = 20
503 filestore wbthrottle xfs inodes hard limit = 30
504 filestore wbthrottle btrfs ios start flusher = 10
505 filestore wbthrottle btrfs ios hard limit = 20
506 filestore wbthrottle btrfs inodes hard limit = 30
507 osd copyfrom max chunk = 524288
508 bluestore fsck on mount = true
509 bluestore block create = true
510 bluestore block db path = $CEPH_DEV_DIR/osd\$id/block.db.file
511 bluestore block db size = 67108864
512 bluestore block db create = true
513 bluestore block wal path = $CEPH_DEV_DIR/osd\$id/block.wal.file
514 bluestore block wal size = 1048576000
515 bluestore block wal create = true
521 mgr initial modules = restful status dashboard balancer
522 mon pg warn min per osd = 3
523 mon osd allow primary affinity = true
524 mon reweight min pgs per osd = 4
525 mon osd prime pg temp = true
526 crushtool = $CEPH_BIN/crushtool
527 mon allow pool delete = true
531 mon cluster log file = $CEPH_OUT_DIR/cluster.mon.\$id.log
540 for f in a b c d e f g h i j k l m n o p q r s t u v w x y z
542 [ $count -eq $CEPH_NUM_MON ] && break;
543 count=$(($count + 1))
552 if [ "$new" -eq 1 ]; then
553 if [ `echo $IP | grep '^127\\.'` ]
556 echo "NOTE: hostname resolves to loopback; remote hosts will not be able to"
557 echo " connect. either adjust /etc/hosts, or edit this script to use your"
558 echo " machine's real IP."
562 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name=mon. "$keyring_fn" --cap mon 'allow *'
563 prun $SUDO "$CEPH_BIN/ceph-authtool" --gen-key --name=client.admin --set-uid=0 \
564 --cap mon 'allow *' \
565 --cap osd 'allow *' \
566 --cap mds 'allow *' \
567 --cap mgr 'allow *' \
570 prun $SUDO "$CEPH_BIN/ceph-authtool" --gen-key --name=client.rgw \
571 --cap mon 'allow rw' \
572 --cap osd 'allow rwx' \
573 --cap mgr 'allow rw' \
576 # build a fresh fs monmap, mon fs
581 str="$str --add $f $IP:$(($CEPH_PORT+$count))"
585 mon data = $CEPH_DEV_DIR/mon.$f
586 mon addr = $IP:$(($CEPH_PORT+$count))
588 count=$(($count + 1))
590 prun "$CEPH_BIN/monmaptool" --create --clobber $str --print "$monmap_fn"
594 prun rm -rf -- "$CEPH_DEV_DIR/mon.$f"
595 prun mkdir -p "$CEPH_DEV_DIR/mon.$f"
596 prun "$CEPH_BIN/ceph-mon" --mkfs -c "$conf_fn" -i "$f" --monmap="$monmap_fn" --keyring="$keyring_fn"
599 prun rm -- "$monmap_fn"
605 run 'mon' $CEPH_BIN/ceph-mon -i $f $ARGS $CMON_ARGS
610 for osd in `seq 0 $((CEPH_NUM_OSD-1))`
612 if [ "$new" -eq 1 ]; then
618 rm -rf $CEPH_DEV_DIR/osd$osd || true
619 if command -v btrfs > /dev/null; then
620 for f in $CEPH_DEV_DIR/osd$osd/*; do btrfs sub delete $f &> /dev/null || true; done
622 if [ -n "$filestore_path" ]; then
623 ln -s $filestore_path $CEPH_DEV_DIR/osd$osd
625 mkdir -p $CEPH_DEV_DIR/osd$osd
629 echo "add osd$osd $uuid"
630 ceph_adm osd create $uuid
631 ceph_adm osd crush add osd.$osd 1.0 host=$HOSTNAME root=default
632 OSD_SECRET=$($CEPH_BIN/ceph-authtool --gen-print-key)
633 $SUDO $CEPH_BIN/ceph-osd -i $osd $ARGS --mkfs --key $OSD_SECRET --osd-uuid $uuid
635 local key_fn=$CEPH_DEV_DIR/osd$osd/keyring
640 echo adding osd$osd key to auth repository
641 ceph_adm -i "$key_fn" auth add osd.$osd osd "allow *" mon "allow profile osd" mgr "allow profile osd"
644 run 'osd' $SUDO $CEPH_BIN/ceph-osd -i $osd $ARGS $COSD_ARGS
650 # avoid monitors on nearby ports (which test/*.sh use extensively)
651 MGR_PORT=$(($CEPH_PORT + 1000))
652 for name in x y z a b c d e f g h i j k l m n o p
654 [ $mgr -eq $CEPH_NUM_MGR ] && break
656 if [ "$new" -eq 1 ]; then
657 mkdir -p $CEPH_DEV_DIR/mgr.$name
658 key_fn=$CEPH_DEV_DIR/mgr.$name/keyring
659 $SUDO $CEPH_BIN/ceph-authtool --create-keyring --gen-key --name=mgr.$name $key_fn
660 ceph_adm -i $key_fn auth add mgr.$name mon 'allow profile mgr' mds 'allow *' osd 'allow *'
668 ceph_adm config-key set mgr/dashboard/$name/server_port $MGR_PORT
669 DASH_URLS+="http://$IP:$MGR_PORT/"
670 MGR_PORT=$(($MGR_PORT + 1000))
672 ceph_adm config-key set mgr/restful/$name/server_port $MGR_PORT
674 RESTFUL_URLS+="https://$IP:$MGR_PORT"
675 MGR_PORT=$(($MGR_PORT + 1000))
677 echo "Starting mgr.${name}"
678 run 'mgr' $CEPH_BIN/ceph-mgr -i $name $ARGS
681 # use tell mgr here because the first mgr might not have activated yet
682 # to register the python module commands.
683 if ceph_adm tell mgr restful create-self-signed-cert; then
685 ceph_adm restful create-key admin -o $SF
686 RESTFUL_SECRET=`cat $SF`
689 echo MGR Restful is not working, perhaps the package is not installed?
694 if [ $new -eq 1 ]; then
695 if [ "$CEPH_NUM_FS" -gt "0" ] ; then
696 if [ "$CEPH_NUM_FS" -gt "1" ] ; then
697 ceph_adm fs flag set enable_multiple true --yes-i-really-mean-it
701 for name in a b c d e f g h i j k l m n o p
703 ceph_adm osd pool create "cephfs_data_${name}" 8
704 ceph_adm osd pool create "cephfs_metadata_${name}" 8
705 ceph_adm fs new "cephfs_${name}" "cephfs_metadata_${name}" "cephfs_data_${name}"
707 [ $fs -eq $CEPH_NUM_FS ] && break
713 for name in a b c d e f g h i j k l m n o p
715 [ $mds -eq $CEPH_NUM_MDS ] && break
718 if [ "$new" -eq 1 ]; then
719 prun mkdir -p "$CEPH_DEV_DIR/mds.$name"
720 key_fn=$CEPH_DEV_DIR/mds.$name/keyring
725 if [ "$standby" -eq 1 ]; then
726 mkdir -p $CEPH_DEV_DIR/mds.${name}s
728 mds standby for rank = $mds
730 mds standby replay = true
731 mds standby for name = ${name}
734 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.$name" "$key_fn"
735 ceph_adm -i "$key_fn" auth add "mds.$name" mon 'allow profile mds' osd 'allow *' mds 'allow' mgr 'allow profile mds'
736 if [ "$standby" -eq 1 ]; then
737 prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.${name}s" \
738 "$CEPH_DEV_DIR/mds.${name}s/keyring"
739 ceph_adm -i "$CEPH_DEV_DIR/mds.${name}s/keyring" auth add "mds.${name}s" \
740 mon 'allow profile mds' osd 'allow *' mds 'allow' mgr 'allow profile mds'
744 run 'mds' $CEPH_BIN/ceph-mds -i $name $ARGS $CMDS_ARGS
745 if [ "$standby" -eq 1 ]; then
746 run 'mds' $CEPH_BIN/ceph-mds -i ${name}s $ARGS $CMDS_ARGS
749 #valgrind --tool=massif $CEPH_BIN/ceph-mds $ARGS --mds_log_max_segments 2 --mds_thrash_fragments 0 --mds_thrash_exports 0 > m #--debug_ms 20
750 #$CEPH_BIN/ceph-mds -d $ARGS --mds_thrash_fragments 0 --mds_thrash_exports 0 #--debug_ms 20
751 #ceph_adm mds set max_mds 2
755 if [ "$debug" -eq 0 ]; then
766 echo "** going verbose **"
794 mds debug scatterstat = true
795 mds verify scatter = true
796 mds log max segments = 2'
804 if [ -n "$MON_ADDR" ]; then
805 CMON_ARGS=" -m "$MON_ADDR
806 COSD_ARGS=" -m "$MON_ADDR
807 CMDS_ARGS=" -m "$MON_ADDR
810 if [ "$memstore" -eq 1 ]; then
812 osd objectstore = memstore'
814 if [ "$bluestore" -eq 1 ]; then
816 osd objectstore = bluestore'
819 if [ -z "$CEPH_PORT" ]; then
821 [ -e ".ceph_port" ] && CEPH_PORT=`cat .ceph_port`
824 [ -z "$INIT_CEPH" ] && INIT_CEPH=$CEPH_BIN/init-ceph
827 test -d $CEPH_DEV_DIR/osd0/. && test -e $CEPH_DEV_DIR/sudo && SUDO="sudo"
829 prun $SUDO rm -f core*
831 test -d $CEPH_ASOK_DIR || mkdir $CEPH_ASOK_DIR
832 test -d $CEPH_OUT_DIR || mkdir $CEPH_OUT_DIR
833 test -d $CEPH_DEV_DIR || mkdir $CEPH_DEV_DIR
834 $SUDO rm -rf $CEPH_OUT_DIR/*
835 test -d gmon && $SUDO rm -rf gmon/*
837 [ "$cephx" -eq 1 ] && [ "$new" -eq 1 ] && test -e $keyring_fn && rm $keyring_fn
840 # figure machine's ip
841 HOSTNAME=`hostname -s`
842 if [ -n "$ip" ]; then
845 echo hostname $HOSTNAME
846 if [ -x "$(which ip 2>/dev/null)" ]; then
851 # filter out IPv6 and localhost addresses
852 IP="$($IP_CMD | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' | head -n1)"
853 # if nothing left, try using localhost address, it might work
854 if [ -z "$IP" ]; then IP="127.0.0.1"; fi
857 echo "port $CEPH_PORT"
860 [ -z $CEPH_ADM ] && CEPH_ADM=$CEPH_BIN/ceph
863 if [ "$cephx" -eq 1 ]; then
864 prun $SUDO "$CEPH_ADM" -c "$conf_fn" -k "$keyring_fn" "$@"
866 prun $SUDO "$CEPH_ADM" -c "$conf_fn" "$@"
870 if [ "$new" -eq 1 ]; then
874 if [ $CEPH_NUM_MON -gt 0 ]; then
878 if [ $CEPH_NUM_MGR -gt 0 ]; then
883 if [ $CEPH_NUM_OSD -gt 0 ]; then
888 if [ "$smallmds" -eq 1 ]; then
891 mds log max segments = 2
892 mds cache size = 10000
896 if [ $CEPH_NUM_MDS -gt 0 ]; then
900 # Don't set max_mds until all the daemons are started, otherwise
901 # the intended standbys might end up in active roles.
902 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
903 sleep 5 # wait for daemons to make it into FSMap before increasing max_mds
906 for name in a b c d e f g h i j k l m n o p
908 [ $fs -eq $CEPH_NUM_FS ] && break
910 if [ "$CEPH_MAX_MDS" -gt 1 ]; then
911 ceph_adm fs set "cephfs_${name}" allow_multimds true --yes-i-really-mean-it
912 ceph_adm fs set "cephfs_${name}" max_mds "$CEPH_MAX_MDS"
918 if [ "$ec" -eq 1 ]; then
920 osd erasure-code-profile set ec-profile m=2 k=2
921 osd pool create ec 8 8 erasure ec-profile
926 while [ -n "$*" ]; do
929 echo "creating cache for pool $p ..."
931 osd pool create ${p}-cache 8
932 osd tier add $p ${p}-cache
933 osd tier cache-mode ${p}-cache writeback
934 osd tier set-overlay $p ${p}-cache
941 while [ -n "$*" ]; do
946 echo "setting hit_set on pool $pool type $type ..."
948 osd pool set $pool hit_set_type $type
949 osd pool set $pool hit_set_count 8
950 osd pool set $pool hit_set_period 30
956 do_rgw_create_users()
959 local akey='0555b35654ad1656d804'
960 local skey='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
961 echo "setting up user testid"
962 $CEPH_BIN/radosgw-admin user create --uid testid --access-key $akey --secret $skey --display-name 'M. Tester' --email tester@ceph.com -c $conf_fn > /dev/null
964 # Create S3-test users
965 # See: https://github.com/ceph/s3-tests
966 echo "setting up s3-test users"
967 $CEPH_BIN/radosgw-admin user create \
968 --uid 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
969 --access-key ABCDEFGHIJKLMNOPQRST \
970 --secret abcdefghijklmnopqrstuvwxyzabcdefghijklmn \
971 --display-name youruseridhere \
972 --email s3@example.com -c $conf_fn > /dev/null
973 $CEPH_BIN/radosgw-admin user create \
974 --uid 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234 \
975 --access-key NOPQRSTUVWXYZABCDEFG \
976 --secret nopqrstuvwxyzabcdefghijklmnabcdefghijklm \
977 --display-name john.doe \
978 --email john.doe@example.com -c $conf_fn > /dev/null
979 $CEPH_BIN/radosgw-admin user create \
981 --uid 9876543210abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
982 --access-key HIJKLMNOPQRSTUVWXYZA \
983 --secret opqrstuvwxyzabcdefghijklmnopqrstuvwxyzab \
984 --display-name tenanteduser \
985 --email tenanteduser@example.com -c $conf_fn > /dev/null
988 echo "setting up user tester"
989 $CEPH_BIN/radosgw-admin user create -c $conf_fn --subuser=test:tester --display-name=Tester-Subuser --key-type=swift --secret=testing --access=full > /dev/null
993 echo " access key: $akey"
994 echo " secret key: $skey"
996 echo "Swift User Info:"
997 echo " account : test"
998 echo " user : tester"
999 echo " password : testing"
1005 if [ "$new" -eq 1 ]; then
1007 if [ -n "$rgw_compression" ]; then
1008 echo "setting compression type=$rgw_compression"
1009 $CEPH_BIN/radosgw-admin zone placement modify -c $conf_fn --rgw-zone=default --placement-id=default-placement --compression=$rgw_compression > /dev/null
1014 if [ "$debug" -ne 0 ]; then
1015 RGWDEBUG="--debug-rgw=20"
1019 [ $CEPH_RGW_PORT -lt 1024 ] && RGWSUDO=sudo
1020 n=$(($CEPH_NUM_RGW - 1))
1022 for rgw in j k l m n o p q r s t u v; do
1023 echo start rgw on http://localhost:$((CEPH_RGW_PORT + i))
1024 run 'rgw' $RGWSUDO $CEPH_BIN/radosgw -c $conf_fn --log-file=${CEPH_OUT_DIR}/rgw.$rgw.log ${RGWDEBUG} --debug-ms=1 -n client.rgw "--rgw_frontends=${rgw_frontend} port=$((CEPH_RGW_PORT + i))"
1026 [ $i -eq $CEPH_NUM_RGW ] && break
1029 if [ "$CEPH_NUM_RGW" -gt 0 ]; then
1033 echo "started. stop.sh to stop. see out/* (e.g. 'tail -f out/????') for debug output."
1036 echo "dashboard urls: $DASH_URLS"
1037 echo " restful urls: $RESTFUL_URLS"
1038 echo " w/ user/pass: admin / $RESTFUL_SECRET"
1040 echo "export PYTHONPATH=./pybind:$PYTHONPATH"
1041 echo "export LD_LIBRARY_PATH=$CEPH_LIB"
1043 if [ "$CEPH_DIR" != "$PWD" ]; then
1044 echo "export CEPH_CONF=$conf_fn"
1045 echo "export CEPH_KEYRING=$keyring_fn"