[mcp] Bring in deployment scripts 67/35267/3
authorMichael Polenchuk <mpolenchuk@mirantis.com>
Wed, 24 May 2017 09:52:02 +0000 (13:52 +0400)
committerMichael Polenchuk <mpolenchuk@mirantis.com>
Thu, 25 May 2017 10:38:56 +0000 (14:38 +0400)
Change-Id: I7d2af958e447a5892f7cd1f6c6fb8616951e2ff3
Signed-off-by: Michael Polenchuk <mpolenchuk@mirantis.com>
ci/deploy.sh
mcp/reclass/scripts/create-config-drive.sh [new file with mode: 0755]
mcp/reclass/scripts/infra.sh [new file with mode: 0755]
mcp/reclass/scripts/net_internal.xml [new file with mode: 0644]
mcp/reclass/scripts/net_mgmt.xml [new file with mode: 0644]
mcp/reclass/scripts/net_public.xml [new file with mode: 0644]
mcp/reclass/scripts/net_pxe.xml [new file with mode: 0644]
mcp/reclass/scripts/openstack.sh [new file with mode: 0755]
mcp/reclass/scripts/salt.sh [new file with mode: 0755]
mcp/reclass/scripts/user-data.sh [new file with mode: 0755]

index 8411714..bdcd15d 100755 (executable)
@@ -111,7 +111,7 @@ clean() {
 # BEGIN of shorthand variables for internal use
 #
 SCRIPT_PATH=$(readlink -f $(dirname ${BASH_SOURCE[0]}))
-DEPLOY_DIR=$(cd ${SCRIPT_PATH}/../deploy; pwd)
+DEPLOY_DIR=$(cd ${SCRIPT_PATH}/../mcp/reclass/scripts; pwd)
 PXE_BRIDGE=''
 NO_HEALTH_CHECK=''
 USE_EXISTING_FUEL=''
@@ -240,23 +240,29 @@ pushd ${DEPLOY_DIR} > /dev/null
 # Prepare the deploy config files based on lab/pod information, deployment
 # scenario, etc.
 
-echo "python deploy-config.py -dha ${BASE_CONFIG_URI}/labs/${TARGET_LAB}/${TARGET_POD}/fuel/config/dha.yaml -deab file://${DEPLOY_DIR}/config/dea_base.yaml -deao ${BASE_CONFIG_URI}/labs/${TARGET_LAB}/${TARGET_POD}/fuel/config/dea-pod-override.yaml -scenario-base-uri file://${DEPLOY_DIR}/scenario -scenario ${DEPLOY_SCENARIO} -plugins file://${DEPLOY_DIR}/config/plugins -output ${SCRIPT_PATH}/config"
+./infra.sh
+./salt.sh
+./openstack.sh
 
-python deploy-config.py -dha ${BASE_CONFIG_URI}/labs/${TARGET_LAB}/${TARGET_POD}/fuel/config/dha.yaml -deab file://${DEPLOY_DIR}/config/dea_base.yaml -deao ${BASE_CONFIG_URI}/labs/${TARGET_LAB}/${TARGET_POD}/fuel/config/dea-pod-override.yaml -scenario-base-uri file://${DEPLOY_DIR}/scenario -scenario ${DEPLOY_SCENARIO} -plugins file://${DEPLOY_DIR}/config/plugins -output ${SCRIPT_PATH}/config
-
-if [ $DRY_RUN -eq 0 ]; then
-    # Download iso if it doesn't already exists locally
-    if [[ $ISO == file://* ]]; then
-        ISO=${ISO#file://}
-    else
-        mkdir -p ${SCRIPT_PATH}/ISO
-        curl -o ${SCRIPT_PATH}/ISO/image.iso $ISO
-        ISO=${SCRIPT_PATH}/ISO/image.iso
-    fi
-    # Start deployment
-    echo "python deploy.py $DEPLOY_LOG $STORAGE_DIR $PXE_BRIDGE $USE_EXISTING_FUEL $FUEL_CREATION_ONLY $NO_HEALTH_CHECK $NO_DEPLOY_ENVIRONMENT -dea ${SCRIPT_PATH}/config/dea.yaml -dha ${SCRIPT_PATH}/config/dha.yaml -iso $ISO $DEPLOY_TIMEOUT"
-    python deploy.py $DEPLOY_LOG $STORAGE_DIR $PXE_BRIDGE $USE_EXISTING_FUEL $FUEL_CREATION_ONLY $NO_HEALTH_CHECK $NO_DEPLOY_ENVIRONMENT -dea ${SCRIPT_PATH}/config/dea.yaml -dha ${SCRIPT_PATH}/config/dha.yaml -iso $ISO $DEPLOY_TIMEOUT
-fi
+## Disable Fuel deployment engine
+#
+# echo "python deploy-config.py -dha ${BASE_CONFIG_URI}/labs/${TARGET_LAB}/${TARGET_POD}/fuel/config/dha.yaml -deab file://${DEPLOY_DIR}/config/dea_base.yaml -deao ${BASE_CONFIG_URI}/labs/${TARGET_LAB}/${TARGET_POD}/fuel/config/dea-pod-override.yaml -scenario-base-uri file://${DEPLOY_DIR}/scenario -scenario ${DEPLOY_SCENARIO} -plugins file://${DEPLOY_DIR}/config/plugins -output ${SCRIPT_PATH}/config"
+#
+# python deploy-config.py -dha ${BASE_CONFIG_URI}/labs/${TARGET_LAB}/${TARGET_POD}/fuel/config/dha.yaml -deab file://${DEPLOY_DIR}/config/dea_base.yaml -deao ${BASE_CONFIG_URI}/labs/${TARGET_LAB}/${TARGET_POD}/fuel/config/dea-pod-override.yaml -scenario-base-uri file://${DEPLOY_DIR}/scenario -scenario ${DEPLOY_SCENARIO} -plugins file://${DEPLOY_DIR}/config/plugins -output ${SCRIPT_PATH}/config
+#
+# if [ $DRY_RUN -eq 0 ]; then
+#     # Download iso if it doesn't already exists locally
+#     if [[ $ISO == file://* ]]; then
+#         ISO=${ISO#file://}
+#     else
+#         mkdir -p ${SCRIPT_PATH}/ISO
+#         curl -o ${SCRIPT_PATH}/ISO/image.iso $ISO
+#         ISO=${SCRIPT_PATH}/ISO/image.iso
+#     fi
+#     # Start deployment
+#     echo "python deploy.py $DEPLOY_LOG $STORAGE_DIR $PXE_BRIDGE $USE_EXISTING_FUEL $FUEL_CREATION_ONLY $NO_HEALTH_CHECK $NO_DEPLOY_ENVIRONMENT -dea ${SCRIPT_PATH}/config/dea.yaml -dha ${SCRIPT_PATH}/config/dha.yaml -iso $ISO $DEPLOY_TIMEOUT"
+#     python deploy.py $DEPLOY_LOG $STORAGE_DIR $PXE_BRIDGE $USE_EXISTING_FUEL $FUEL_CREATION_ONLY $NO_HEALTH_CHECK $NO_DEPLOY_ENVIRONMENT -dea ${SCRIPT_PATH}/config/dea.yaml -dha ${SCRIPT_PATH}/config/dha.yaml -iso $ISO $DEPLOY_TIMEOUT
+# fi
 popd > /dev/null
 
 #
diff --git a/mcp/reclass/scripts/create-config-drive.sh b/mcp/reclass/scripts/create-config-drive.sh
new file mode 100755 (executable)
index 0000000..cf87150
--- /dev/null
@@ -0,0 +1,102 @@
+#!/bin/bash
+
+# This will generate a openstack-style config drive image suitable for
+# use with cloud-init.  You may optionally pass in an ssh public key
+# (using the -k/--ssh-key option) and a user-data blog (using the
+# -u/--user-data option).
+
+usage () {
+       echo "usage: ${0##*/}: [--ssh-key <pubkey>] [--vendor-data <file>] [--user-data <file>] [--hostname <hostname>] <imagename>"
+}
+
+ARGS=$(getopt \
+       -o k:u:v:h: \
+       --long help,hostname:,ssh-key:,user-data:,vendor-data: -n ${0##*/} \
+       -- "$@")
+
+if [ $? -ne 0 ]; then
+       usage >&2
+       exit 2
+fi
+
+eval set -- "$ARGS"
+
+while :; do
+       case "$1" in
+               --help)
+                       usage
+                       exit 0
+                       ;;
+               -k|--ssh-key)
+                       ssh_key="$2"
+                       shift 2
+                       ;;
+               -u|--user-data)
+                       user_data="$2"
+                       shift 2
+                       ;;
+               -v|--vendor-data)
+                       vendor_data="$2"
+                       shift 2
+                       ;;
+               -h|--hostname)
+                       hostname="$2"
+                       shift 2
+                       ;;
+               --)     shift
+                       break
+                       ;;
+       esac
+done
+
+config_image=$1
+shift
+
+if [ "$ssh_key" ] && [ -f "$ssh_key" ]; then
+       echo "adding pubkey from $ssh_key"
+       ssh_key_data=$(cat "$ssh_key")
+fi
+
+uuid=$(uuidgen)
+if ! [ "$hostname" ]; then
+       hostname="$uuid"
+fi
+
+trap 'rm -rf $config_dir' EXIT
+config_dir=$(mktemp -t -d configXXXXXX)
+
+if [ "$user_data" ] && [ -f "$user_data" ]; then
+       echo "adding user data from $user_data"
+       cp $user_data $config_dir/user-data
+else
+       touch $config_dir/user-data
+fi
+
+if [ "$vendor_data" ] && [ -f "$vendor_data" ]; then
+       echo "adding vendor data from $vendor_data"
+       cp $vendor_data $config_dir/vendor-data
+fi
+
+cat > $config_dir/meta-data <<-EOF
+instance-id: $uuid
+hostname: $hostname
+local-hostname: $hostname
+EOF
+
+if [ "$ssh_key_data" ]; then
+       cat >> $config_dir/meta-data <<-EOF
+       public-keys:
+         - |
+           $ssh_key_data
+       EOF
+fi
+
+#PS1="debug> " bash --norc
+
+echo "generating configuration image at $config_image"
+if ! mkisofs -o $config_image -V cidata -r -J --quiet $config_dir; then
+       echo "ERROR: failed to create $config_image" >&2
+       exit 1
+fi
+chmod a+r $config_image
+
diff --git a/mcp/reclass/scripts/infra.sh b/mcp/reclass/scripts/infra.sh
new file mode 100755 (executable)
index 0000000..182d906
--- /dev/null
@@ -0,0 +1,75 @@
+#!/bin/bash
+
+SSH_KEY=mcp.rsa
+SALT_MASTER=192.168.10.100
+BASE_IMAGE=https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+declare -A NODES=( [cfg01]=4096 [ctl01]=6144 [ctl02]=6144 [ctl03]=6144 [gtw01]=2048 [cmp01]=2048 )
+
+[ -f $SSH_KEY ] || ssh-keygen -f $SSH_KEY -N ''
+
+# get base image
+mkdir -p images
+wget -nc $BASE_IMAGE
+
+for node in "${!NODES[@]}"; do
+  # clean up existing nodes
+  if [ "$(virsh domstate $node 2>/dev/null)" == 'running' ]; then
+    virsh destroy $node
+    virsh undefine $node
+  fi
+
+  # create/prepare images
+  [ -f images/mcp_${node}.iso ] || ./create-config-drive.sh -k ${SSH_KEY}.pub -u user-data.sh -h ${node} images/mcp_${node}.iso
+  cp ${BASE_IMAGE/*\/} images/mcp_${node}.qcow2
+  qemu-img resize images/mcp_${node}.qcow2 100G
+done
+
+# create required networks
+for net in pxe mgmt internal public; do
+  if virsh net-info $net >/dev/null 2>&1; then
+    virsh net-destroy ${net}
+    virsh net-undefine ${net}
+  fi
+  virsh net-define net_${net}.xml
+  virsh net-autostart ${net}
+  virsh net-start ${net}
+done
+
+# create vms with specified options
+for node in "${!NODES[@]}"; do
+  virt-install --name ${node} --ram ${NODES[$node]} --vcpus=2 --cpu host --accelerate \
+  --network network:pxe,model=virtio \
+  --network network:mgmt,model=virtio \
+  --network network:internal,model=virtio \
+  --network network:public,model=virtio \
+  --disk path=$(pwd)/images/mcp_${node}.qcow2,format=qcow2,bus=virtio,cache=none,io=native \
+  --boot hd --vnc --console pty --autostart --noreboot \
+  --disk path=$(pwd)/images/mcp_${node}.iso,device=cdrom
+done
+
+# set static ip address for salt master node
+virsh net-update pxe add ip-dhcp-host \
+"<host mac='$(virsh domiflist cfg01 | awk '/pxe/ {print $5}')' name='cfg01' ip='$SALT_MASTER'/>" --live
+
+# start vms
+for node in "${!NODES[@]}"; do
+  virsh start ${node}
+  sleep $[RANDOM%5+1]
+done
+
+CONNECTION_ATTEMPTS=20
+SLEEP=15
+
+# wait until ssh on Salt master is available
+echo "Attempting to ssh to Salt master ..."
+ATTEMPT=1
+
+while (($ATTEMPT <= $CONNECTION_ATTEMPTS)); do
+  ssh -i ${SSH_KEY} ubuntu@$SALT_MASTER uptime
+  case $? in
+    (0) echo "${ATTEMPT}> Success"; break ;;
+    (*) echo "${ATTEMPT}/${CONNECTION_ATTEMPTS}> ssh server ain't ready yet, waiting for ${SLEEP} seconds ..." ;;
+  esac
+  sleep $SLEEP
+  ((ATTEMPT+=1))
+done
diff --git a/mcp/reclass/scripts/net_internal.xml b/mcp/reclass/scripts/net_internal.xml
new file mode 100644 (file)
index 0000000..a9abece
--- /dev/null
@@ -0,0 +1,4 @@
+<network>
+  <name>internal</name>
+  <bridge name="internal"/>
+</network>
diff --git a/mcp/reclass/scripts/net_mgmt.xml b/mcp/reclass/scripts/net_mgmt.xml
new file mode 100644 (file)
index 0000000..fde2a23
--- /dev/null
@@ -0,0 +1,4 @@
+<network>
+  <name>mgmt</name>
+  <bridge name="mgmt"/>
+</network>
diff --git a/mcp/reclass/scripts/net_public.xml b/mcp/reclass/scripts/net_public.xml
new file mode 100644 (file)
index 0000000..61650d5
--- /dev/null
@@ -0,0 +1,6 @@
+<network>
+  <name>public</name>
+  <bridge name="public"/>
+  <forward mode="nat"/>
+  <ip address="10.16.0.1" netmask="255.255.255.0" />
+</network>
diff --git a/mcp/reclass/scripts/net_pxe.xml b/mcp/reclass/scripts/net_pxe.xml
new file mode 100644 (file)
index 0000000..92eaa6b
--- /dev/null
@@ -0,0 +1,10 @@
+<network>
+  <name>pxe</name>
+  <bridge name="pxe"/>
+  <forward mode="nat"/>
+  <ip address="192.168.10.1" netmask="255.255.255.0">
+    <dhcp>
+      <range start="192.168.10.100" end="192.168.10.254"/>
+    </dhcp>
+  </ip>
+</network>
diff --git a/mcp/reclass/scripts/openstack.sh b/mcp/reclass/scripts/openstack.sh
new file mode 100755 (executable)
index 0000000..b757e8e
--- /dev/null
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+# Deploy Openstack
+#
+
+ssh -i mcp.rsa ubuntu@192.168.10.100 bash -s << OPENSTACK_INSTALL_END
+  sudo -i
+
+  salt-call state.apply salt
+  salt '*' state.apply salt
+
+  salt -C 'I@salt:master' state.sls linux
+  salt -C '* and not cfg01*' state.sls linux
+
+  salt '*' state.sls ntp
+
+  salt -C 'I@keepalived:cluster' state.sls keepalived -b 1
+
+  salt -C 'I@rabbitmq:server' state.sls rabbitmq
+  salt -C 'I@rabbitmq:server' cmd.run "rabbitmqctl cluster_status"
+
+  salt -C 'I@glusterfs:server' state.sls glusterfs.server.service
+  salt -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+  salt -C 'I@glusterfs:server' cmd.run "gluster peer status; gluster volume status" -b 1
+
+  salt -C 'I@galera:master' state.sls galera
+  salt -C 'I@galera:slave' state.sls galera
+  salt -C 'I@galera:master' mysql.status | grep -A1 wsrep_cluster_size
+
+  salt -C 'I@haproxy:proxy' state.sls haproxy
+  salt -C 'I@memcached:server' state.sls memcached
+
+  salt -C 'I@keystone:server' state.sls keystone.server -b 1
+  salt -C 'I@keystone:server' cmd.run "service apache2 restart"
+  salt -C 'I@keystone:client' state.sls keystone.client
+  salt -C 'I@keystone:server' cmd.run ". /root/keystonercv3; openstack user list"
+
+  salt -C 'I@glance:server' state.sls glance -b 1
+  salt -C 'I@nova:controller' state.sls nova -b 1
+  salt -C 'I@heat:server' state.sls heat -b 1
+  salt -C 'I@cinder:controller' state.sls cinder -b 1
+
+  salt -C 'I@neutron:server' state.sls neutron -b 1
+  salt -C 'I@neutron:gateway' state.sls neutron
+
+  salt -C 'I@nova:compute' state.sls nova
+  salt -C 'I@neutron:compute' state.sls neutron
+
+  salt -C 'I@keystone:server' cmd.run ". /root/keystonercv3; nova service-list"
+  salt -C 'I@keystone:server' cmd.run ". /root/keystonercv3; neutron agent-list"
+  salt -C 'I@keystone:server' cmd.run ". /root/keystonercv3; heat stack-list"
+  salt -C 'I@keystone:server' cmd.run ". /root/keystonercv3; cinder list"
+OPENSTACK_INSTALL_END
diff --git a/mcp/reclass/scripts/salt.sh b/mcp/reclass/scripts/salt.sh
new file mode 100755 (executable)
index 0000000..c202ab5
--- /dev/null
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Deploy Salt Master
+#
+
+# ssh to cfg01
+ssh -i mcp.rsa ubuntu@192.168.10.100 bash -s << SALT_INSTALL_END
+  sudo -i
+
+  apt-get update
+  apt-get install -y git curl subversion
+
+  svn export --force https://github.com/salt-formulas/salt-formulas/trunk/deploy/scripts /srv/salt/scripts
+  git clone --depth=1 https://git.opnfv.org/fuel
+  ln -s fuel/mcp/reclass /srv/salt/reclass
+
+  cd /srv/salt/scripts
+  MASTER_HOSTNAME=cfg01.virtual-mcp-ocata-ovs.local ./salt-master-init.sh
+  salt-key -Ay
+SALT_INSTALL_END
diff --git a/mcp/reclass/scripts/user-data.sh b/mcp/reclass/scripts/user-data.sh
new file mode 100755 (executable)
index 0000000..2b9b684
--- /dev/null
@@ -0,0 +1,10 @@
+#!/bin/bash
+wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub | sudo apt-key add -
+echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest xenial main" > /etc/apt/sources.list.d/salt.list
+apt update
+apt-get install -y salt-minion
+rm /etc/salt/minion_id
+rm -f /etc/salt/pki/minion/minion_master.pub
+echo "id: $(hostname).virtual-mcp-ocata-ovs.local" > /etc/salt/minion
+echo "master: 192.168.10.100" >> /etc/salt/minion
+service salt-minion restart