Rewrite infra deployment scripts 19/36719/3
authorMichael Polenchuk <mpolenchuk@mirantis.com>
Thu, 29 Jun 2017 11:52:28 +0000 (15:52 +0400)
committerMichael Polenchuk <mpolenchuk@mirantis.com>
Mon, 3 Jul 2017 07:18:25 +0000 (11:18 +0400)
* bring in scenario files
* shift infra code into functions

Change-Id: I650a26d03d842c3afcc7fcb97b84ef4826827a38
Signed-off-by: Michael Polenchuk <mpolenchuk@mirantis.com>
16 files changed:
ci/deploy.sh
mcp/config/defaults.yaml [new file with mode: 0644]
mcp/config/os-nosdn-nofeature-noha.yaml [new file with mode: 0644]
mcp/config/os-nosdn-ovs-noha.yaml [new file with mode: 0644]
mcp/config/os-odl_l2-nofeature-noha.yaml [new file with mode: 0644]
mcp/reclass/scripts/infra.sh [deleted file]
mcp/scripts/create-config-drive.sh [moved from mcp/reclass/scripts/create-config-drive.sh with 95% similarity]
mcp/scripts/dpdk.sh [moved from mcp/reclass/scripts/dpdk.sh with 84% similarity]
mcp/scripts/lib.sh [new file with mode: 0644]
mcp/scripts/net_internal.xml [moved from mcp/reclass/scripts/net_internal.xml with 100% similarity]
mcp/scripts/net_mgmt.xml [moved from mcp/reclass/scripts/net_mgmt.xml with 100% similarity]
mcp/scripts/net_public.xml [moved from mcp/reclass/scripts/net_public.xml with 100% similarity]
mcp/scripts/net_pxe.xml [moved from mcp/reclass/scripts/net_pxe.xml with 100% similarity]
mcp/scripts/openstack.sh [moved from mcp/reclass/scripts/openstack.sh with 95% similarity]
mcp/scripts/salt.sh [moved from mcp/reclass/scripts/salt.sh with 90% similarity]
mcp/scripts/user-data.template [moved from mcp/reclass/scripts/user-data.template with 100% similarity]

index 8389bc0..77030a4 100755 (executable)
@@ -1,7 +1,7 @@
 #!/bin/bash
 set -ex
 ##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
+# Copyright (c) 2017 Ericsson AB, Mirantis Inc. and others.
 # jonas.bjurel@ericsson.com
 # All rights reserved. This program and the accompanying materials
 # are made available under the terms of the Apache License, Version 2.0
@@ -111,7 +111,7 @@ clean() {
 # BEGIN of shorthand variables for internal use
 #
 SCRIPT_PATH=$(readlink -f $(dirname ${BASH_SOURCE[0]}))
-DEPLOY_DIR=$(cd ${SCRIPT_PATH}/../mcp/reclass/scripts; pwd)
+DEPLOY_DIR=$(cd ${SCRIPT_PATH}/../mcp/scripts; pwd)
 PXE_BRIDGE=''
 NO_HEALTH_CHECK=''
 USE_EXISTING_FUEL=''
@@ -217,17 +217,6 @@ if [[ $EUID -ne 0 ]]; then
     exit 1
 fi
 
-if [ -z $BASE_CONFIG_URI ] || [ -z $TARGET_LAB ] || \
-   [ -z $TARGET_POD ] || [ -z $DEPLOY_SCENARIO ] || \
-   [ -z $ISO ]; then
-    echo "Arguments not according to new argument style"
-    echo "Trying old-style compatibility mode"
-    pushd ${DEPLOY_DIR} > /dev/null
-    python deploy.py "$@"
-    popd > /dev/null
-    exit 0
-fi
-
 # Enable the automatic exit trap
 trap do_exit SIGINT SIGTERM EXIT
 
@@ -240,23 +229,47 @@ pushd ${DEPLOY_DIR} > /dev/null
 # Prepare the deploy config files based on lab/pod information, deployment
 # scenario, etc.
 
-# Set cluster domain
-case $DEPLOY_SCENARIO in
-    *dpdk*) CLUSTER_DOMAIN=virtual-mcp-ocata-ovs-dpdk.local ;;
-    *) CLUSTER_DOMAIN=virtual-mcp-ocata-ovs.local ;;
-esac
+# Install required packages
+[ -n "$(command -v apt-get)" ] && apt-get install -y mkisofs curl virtinst cpu-checker qemu-kvm
+[ -n "$(command -v yum)" ] && yum install -y genisoimage curl virt-install qemu-kvm
+
+# Check scenario file existence
+if [[ ! -f  ../config/${DEPLOY_SCENARIO}.yaml ]]; then
+    echo "[WARN] ${DEPLOY_SCENARIO}.yaml not found, setting simplest scenario"
+    DEPLOY_SCENARIO='os-nosdn-nofeature-noha'
+fi
+
+# Get required infra deployment data
+source lib.sh
+eval $(parse_yaml ../config/defaults.yaml)
+eval $(parse_yaml ../config/${DEPLOY_SCENARIO}.yaml)
+
+declare -A virtual_nodes_ram
+for node in "${virtual_nodes[@]}"; do
+    virtual_custom_ram="virtual_${node}_ram"
+    virtual_nodes_ram[$node]=${!virtual_custom_ram:-$virtual_default_ram}
+done
 
-export CLUSTER_DOMAIN
+export CLUSTER_DOMAIN=$cluster_domain
 export SSH_KEY=${SSH_KEY:-mcp.rsa}
 export SALT_MASTER=${SALT_MASTER_IP:-192.168.10.100}
 export SSH_OPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ${SSH_KEY}"
 
-./infra.sh
+# Infra setup
+generate_ssh_key
+prepare_vms virtual_nodes $base_image
+create_networks
+create_vms virtual_nodes virtual_nodes_ram
+update_pxe_network
+start_vms virtual_nodes
+check_connection
+
+# Openstack cluster setup
 ./salt.sh
 ./openstack.sh
 
-# enable dpdk on computes
-[[ "$DEPLOY_SCENARIO" =~ dpdk ]] && ./dpdk.sh
+# Enable dpdk on computes
+[[ "$DEPLOY_SCENARIO" =~ (ovs|dpdk) ]] && ./dpdk.sh
 
 ## Disable Fuel deployment engine
 #
diff --git a/mcp/config/defaults.yaml b/mcp/config/defaults.yaml
new file mode 100644 (file)
index 0000000..b841e88
--- /dev/null
@@ -0,0 +1,6 @@
+base_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+virtual:
+  default:
+    vcpus: 2
+    ram: 4096
+
diff --git a/mcp/config/os-nosdn-nofeature-noha.yaml b/mcp/config/os-nosdn-nofeature-noha.yaml
new file mode 100644 (file)
index 0000000..526ea57
--- /dev/null
@@ -0,0 +1,14 @@
+cluster:
+  domain: virtual-mcp-ocata-ovs.local
+virtual:
+  nodes:
+    - cfg01
+    - ctl01
+    - cmp01
+    - cmp02
+    - gtw01
+  ctl01:
+    vcpus: 4
+    ram: 14336
+  gtw01:
+    ram: 2048
diff --git a/mcp/config/os-nosdn-ovs-noha.yaml b/mcp/config/os-nosdn-ovs-noha.yaml
new file mode 100644 (file)
index 0000000..ef35d72
--- /dev/null
@@ -0,0 +1,18 @@
+cluster:
+  domain: virtual-mcp-ocata-ovs-dpdk.local
+virtual:
+  nodes:
+    - cfg01
+    - ctl01
+    - cmp01
+    - cmp02
+    - gtw01
+  ctl01:
+    vcpus: 4
+    ram: 14336
+  gtw01:
+    ram: 2048
+  cmp01:
+    ram: 6144
+  cmp02:
+    ram: 6144
diff --git a/mcp/config/os-odl_l2-nofeature-noha.yaml b/mcp/config/os-odl_l2-nofeature-noha.yaml
new file mode 100644 (file)
index 0000000..d981079
--- /dev/null
@@ -0,0 +1,17 @@
+cluster:
+  domain: virtual-mcp-ocata-odl.local
+virtual:
+  nodes:
+    - cfg01
+    - ctl01
+    - cmp01
+    - gtw01
+    - odl01
+  ctl01:
+    vcpus: 4
+    ram: 14336
+  gtw01:
+    ram: 2048
+  odl01:
+    vcpus: 4
+    ram: 5120
diff --git a/mcp/reclass/scripts/infra.sh b/mcp/reclass/scripts/infra.sh
deleted file mode 100755 (executable)
index 72ad5aa..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/bin/bash
-
-BASE_IMAGE=https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-declare -A NODES=( [cfg01]=4096 [ctl01]=14336 [gtw01]=2048 [cmp01]=6144 [cmp02]=6144 )
-
-# get required packages
-apt-get install -y mkisofs curl virtinst cpu-checker qemu-kvm
-
-# generate ssh key
-[ -f $SSH_KEY ] || ssh-keygen -f $SSH_KEY -N ''
-install -o jenkins -m 0600 ${SSH_KEY} /tmp/
-
-# get base image
-mkdir -p images
-wget -P /tmp -nc $BASE_IMAGE
-
-# generate cloud-init user data
-envsubst < user-data.template > user-data.sh
-
-for node in "${!NODES[@]}"; do
-  # clean up existing nodes
-  if [ "$(virsh domstate $node 2>/dev/null)" == 'running' ]; then
-    virsh destroy $node
-    virsh undefine $node
-  fi
-
-  # create/prepare images
-  [ -f images/mcp_${node}.iso ] || ./create-config-drive.sh -k ${SSH_KEY}.pub -u user-data.sh -h ${node} images/mcp_${node}.iso
-  cp /tmp/${BASE_IMAGE/*\/} images/mcp_${node}.qcow2
-  qemu-img resize images/mcp_${node}.qcow2 100G
-done
-
-# create required networks
-for net in pxe mgmt internal public; do
-  if virsh net-info $net >/dev/null 2>&1; then
-    virsh net-destroy ${net}
-    virsh net-undefine ${net}
-  fi
-  virsh net-define net_${net}.xml
-  virsh net-autostart ${net}
-  virsh net-start ${net}
-done
-
-# create vms with specified options
-for node in "${!NODES[@]}"; do
-  virt-install --name ${node} --ram ${NODES[$node]} --vcpus=2 --cpu host --accelerate \
-  --network network:pxe,model=virtio \
-  --network network:mgmt,model=virtio \
-  --network network:internal,model=virtio \
-  --network network:public,model=virtio \
-  --disk path=$(pwd)/images/mcp_${node}.qcow2,format=qcow2,bus=virtio,cache=none,io=native \
-  --os-type linux --os-variant none \
-  --boot hd --vnc --console pty --autostart --noreboot \
-  --disk path=$(pwd)/images/mcp_${node}.iso,device=cdrom
-done
-
-# set static ip address for salt master node
-virsh net-update pxe add ip-dhcp-host \
-"<host mac='$(virsh domiflist cfg01 | awk '/pxe/ {print $5}')' name='cfg01' ip='$SALT_MASTER'/>" --live
-
-# start vms
-for node in "${!NODES[@]}"; do
-  virsh start ${node}
-  sleep $[RANDOM%5+1]
-done
-
-CONNECTION_ATTEMPTS=60
-SLEEP=5
-
-# wait until ssh on Salt master is available
-echo "Attempting to ssh to Salt master ..."
-ATTEMPT=1
-
-while (($ATTEMPT <= $CONNECTION_ATTEMPTS)); do
-  ssh $SSH_OPTS ubuntu@$SALT_MASTER uptime
-  case $? in
-    (0) echo "${ATTEMPT}> Success"; break ;;
-    (*) echo "${ATTEMPT}/${CONNECTION_ATTEMPTS}> ssh server ain't ready yet, waiting for ${SLEEP} seconds ..." ;;
-  esac
-  sleep $SLEEP
-  ((ATTEMPT+=1))
-done
similarity index 95%
rename from mcp/reclass/scripts/create-config-drive.sh
rename to mcp/scripts/create-config-drive.sh
index cf87150..df3f72f 100755 (executable)
@@ -67,14 +67,14 @@ config_dir=$(mktemp -t -d configXXXXXX)
 
 if [ "$user_data" ] && [ -f "$user_data" ]; then
        echo "adding user data from $user_data"
-       cp $user_data $config_dir/user-data
+       cp ${user_data} ${config_dir}/user-data
 else
        touch $config_dir/user-data
 fi
 
 if [ "$vendor_data" ] && [ -f "$vendor_data" ]; then
        echo "adding vendor data from $vendor_data"
-       cp $vendor_data $config_dir/vendor-data
+       cp ${vendor_data} ${config_dir}/vendor-data
 fi
 
 cat > $config_dir/meta-data <<-EOF
similarity index 84%
rename from mcp/reclass/scripts/dpdk.sh
rename to mcp/scripts/dpdk.sh
index 4e4cd0b..faa4390 100755 (executable)
@@ -3,7 +3,7 @@
 # Enable DPDK on compute nodes
 #
 
-ssh $SSH_OPTS ubuntu@$SALT_MASTER bash -s << DPDK_INSTALL_END
+ssh ${SSH_OPTS} ubuntu@${SALT_MASTER} bash -s << DPDK_INSTALL_END
   sudo -i
 
   salt -C 'I@nova:compute' system.reboot
diff --git a/mcp/scripts/lib.sh b/mcp/scripts/lib.sh
new file mode 100644 (file)
index 0000000..50f441a
--- /dev/null
@@ -0,0 +1,128 @@
+#
+# Library of shell functions
+#
+
+generate_ssh_key() {
+  [ -f "$SSH_KEY" ] || ssh-keygen -f ${SSH_KEY} -N ''
+  install -o jenkins -m 0600 ${SSH_KEY} /tmp/
+}
+
+get_base_image() {
+  local base_image=$1
+
+  mkdir -p images
+  wget -P /tmp -nc $base_image
+}
+
+cleanup_vms() {
+  # clean up existing nodes
+  for node in $(virsh list --name | grep -P '\w{3}\d{2}'); do
+    virsh destroy $node
+    virsh undefine $node
+  done
+}
+
+prepare_vms() {
+  local -n vnodes=$1
+  local base_image=$2
+
+  cleanup_vms
+  get_base_image $base_image
+  envsubst < user-data.template > user-data.sh
+
+  for node in "${vnodes[@]}"; do
+    # create/prepare images
+    ./create-config-drive.sh -k ${SSH_KEY}.pub -u user-data.sh -h ${node} images/mcp_${node}.iso
+    cp /tmp/${base_image/*\/} images/mcp_${node}.qcow2
+    qemu-img resize images/mcp_${node}.qcow2 100G
+  done
+}
+
+create_networks() {
+  # create required networks
+  for net in pxe mgmt internal public; do
+    if virsh net-info $net >/dev/null 2>&1; then
+      virsh net-destroy ${net}
+      virsh net-undefine ${net}
+    fi
+    virsh net-define net_${net}.xml
+    virsh net-autostart ${net}
+    virsh net-start ${net}
+  done
+}
+
+create_vms() {
+  local -n vnodes=$1
+  local -n vnodes_ram=$2
+
+  # create vms with specified options
+  for node in "${vnodes[@]}"; do
+    virt-install --name ${node} --ram ${vnodes_ram[$node]} --vcpus=2 --cpu host --accelerate \
+    --network network:pxe,model=virtio \
+    --network network:mgmt,model=virtio \
+    --network network:internal,model=virtio \
+    --network network:public,model=virtio \
+    --disk path=$(pwd)/images/mcp_${node}.qcow2,format=qcow2,bus=virtio,cache=none,io=native \
+    --os-type linux --os-variant none \
+    --boot hd --vnc --console pty --autostart --noreboot \
+    --disk path=$(pwd)/images/mcp_${node}.iso,device=cdrom
+  done
+}
+
+update_pxe_network() {
+  # set static ip address for salt master node
+  virsh net-update pxe add ip-dhcp-host \
+  "<host mac='$(virsh domiflist cfg01 | awk '/pxe/ {print $5}')' name='cfg01' ip='$SALT_MASTER'/>" --live
+}
+
+start_vms() {
+  local -n vnodes=$1
+
+  # start vms
+  for node in "${vnodes[@]}"; do
+    virsh start ${node}
+    sleep $[RANDOM%5+1]
+  done
+}
+
+check_connection() {
+  local total_attempts=60
+  local sleep_time=5
+  local attempt=1
+
+  set +e
+  echo '[INFO] Attempting to get into Salt master ...'
+
+  # wait until ssh on Salt master is available
+  while (($attempt <= $total_attempts)); do
+    ssh -i ${SSH_KEY} ubuntu@${SALT_MASTER} uptime
+    case $? in
+      0) echo "${attempt}> Success"; break ;;
+      *) echo "${attempt}/${total_attempts}> ssh server ain't ready yet, waiting for ${sleep_time} seconds ..." ;;
+    esac
+    sleep $sleep_time
+    ((attempt+=1))
+  done
+  set -e
+}
+
+parse_yaml() {
+  local prefix=$2
+  local s
+  local w
+  local fs
+  s='[[:space:]]*'
+  w='[a-zA-Z0-9_]*'
+  fs="$(echo @|tr @ '\034')"
+  sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \
+      -e "s|^\($s\)\($w\)$s[:-]$s\(.*\)$s\$|\1$fs\2$fs\3|p" "$1" |
+  awk -F"$fs" '{
+  indent = length($1)/2;
+  vname[indent] = $2;
+  for (i in vname) {if (i > indent) {delete vname[i]}}
+      if (length($3) > 0) {
+          vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
+          printf("%s%s%s=(\"%s\")\n", "'"$prefix"'",vn, $2, $3);
+      }
+  }' | sed 's/_=/+=/g'
+}
similarity index 95%
rename from mcp/reclass/scripts/openstack.sh
rename to mcp/scripts/openstack.sh
index 9e636dd..88db83d 100755 (executable)
@@ -3,7 +3,7 @@
 # Deploy Openstack
 #
 
-ssh $SSH_OPTS ubuntu@$SALT_MASTER bash -s << OPENSTACK_INSTALL_END
+ssh ${SSH_OPTS} ubuntu@${SALT_MASTER} bash -s << OPENSTACK_INSTALL_END
   sudo -i
 
   salt-call state.apply salt
similarity index 90%
rename from mcp/reclass/scripts/salt.sh
rename to mcp/scripts/salt.sh
index 3b6fa99..56a6fb3 100755 (executable)
@@ -4,7 +4,7 @@
 #
 
 # ssh to cfg01
-ssh $SSH_OPTS ubuntu@$SALT_MASTER bash -s << SALT_INSTALL_END
+ssh ${SSH_OPTS} ubuntu@${SALT_MASTER} bash -s << SALT_INSTALL_END
   sudo -i
 
   echo -n 'Checking out cloud-init has finished running ...'