Print undercloud IP after deployment
[apex.git] / ci / deploy.sh
index d43960e..7de3c7f 100755 (executable)
@@ -22,7 +22,6 @@ blue=$(tput setaf 4 || echo "")
 red=$(tput setaf 1 || echo "")
 green=$(tput setaf 2 || echo "")
 
-vm_index=4
 interactive="FALSE"
 ping_site="8.8.8.8"
 ntp_server="pool.ntp.org"
@@ -38,11 +37,15 @@ declare -A NET_MAP
 
 SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
 DEPLOY_OPTIONS=""
-RESOURCES=${RESOURCES:-'/var/opt/opnfv/images'}
 CONFIG=${CONFIG:-'/var/opt/opnfv'}
+RESOURCES=${RESOURCES:-"$CONFIG/images"}
+LIB=${LIB:-"$CONFIG/lib"}
 OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_network"
+
 VM_CPUS=4
 VM_RAM=8
+VM_COMPUTES=2
+
 # Netmap used to map networks to OVS bridge names
 NET_MAP['admin_network']="br-admin"
 NET_MAP['private_network']="br-private"
@@ -51,6 +54,19 @@ NET_MAP['storage_network']="br-storage"
 ext_net_type="interface"
 ip_address_family=4
 
+# Libraries
+lib_files=(
+$LIB/common-functions.sh
+$LIB/utility-functions.sh
+$LIB/installer/onos/onos_gw_mac_update.sh
+)
+for lib_file in ${lib_files[@]}; do
+  if ! source $lib_file; then
+    echo -e "${red}ERROR: Failed to source $lib_file${reset}"
+    exit 1
+  fi
+done
+
 ##FUNCTIONS
 ##translates yaml into variables
 ##params: filename, prefix (ex. "config_")
@@ -106,7 +122,8 @@ parse_setting_value() {
 
 ##parses network settings yaml into globals
 parse_network_settings() {
-  if local output=$(python3.4 -B $CONFIG/lib/python/apex-python-utils.py parse_net_settings -n $NETSETS -i $net_isolation_enabled); then
+  local output
+  if output=$(python3.4 -B $LIB/python/apex-python-utils.py parse-net-settings -s $NETSETS -i $net_isolation_enabled -e $CONFIG/network-environment.yaml); then
       echo -e "${blue}${output}${reset}"
       eval "$output"
   else
@@ -117,7 +134,8 @@ parse_network_settings() {
 
 ##parses deploy settings yaml into globals
 parse_deploy_settings() {
-  if local output=$(python3.4 -B $CONFIG/lib/python/apex-python-utils.py parse-deploy-settings -f $DEPLOY_SETTINGS_FILE); then
+  local output
+  if output=$(python3.4 -B $LIB/python/apex-python-utils.py parse-deploy-settings -f $DEPLOY_SETTINGS_FILE); then
       echo -e "${blue}${output}${reset}"
       eval "$output"
   else
@@ -156,7 +174,7 @@ parse_inventory_file() {
 
   node_total=$node_count
 
-  if [[ "$node_total" -lt 5 && ( ha_enabled == "TRUE" || "$ha_enabled" == "true" ) ]]; then
+  if [[ "$node_total" -lt 5 && "$ha_enabled" == "True" ]]; then
     echo -e "${red}ERROR: You must provide at least 5 nodes for HA baremetal deployment${reset}"
     exit 1
   elif [[ "$node_total" -lt 2 ]]; then
@@ -273,7 +291,7 @@ function configure_deps {
   virsh net-list --all | grep -E "default\s+active\s+yes" > /dev/null || virsh net-autostart --network default
 
   if [[ -z "$virtual" || "$virtual" == "FALSE" ]]; then
-    for network in ${OPNFV_NETWORK_TYPES}; do
+    for network in ${enabled_network_list}; do
       echo "${blue}INFO: Creating Virsh Network: $network & OVS Bridge: ${NET_MAP[$network]}${reset}"
       ovs-vsctl list-br | grep "^${NET_MAP[$network]}$" > /dev/null || ovs-vsctl add-br ${NET_MAP[$network]}
       virsh net-list --all | grep $network > /dev/null || (cat > ${libvirt_dir}/apex-virsh-net.xml && virsh net-define ${libvirt_dir}/apex-virsh-net.xml) << EOF
@@ -492,10 +510,20 @@ function setup_virtual_baremetal {
 EOF
 
   # next create the virtual machines and add their definitions to the file
+  if [ "$ha_enabled" == "False" ]; then
+      # 1 controller + computes
+      # zero based so just pass compute count
+      vm_index=$VM_COMPUTES
+  else
+      # 3 controller + computes
+      # zero based so add 2 to compute count
+      vm_index=$((2+$VM_COMPUTES))
+  fi
+
   for i in $(seq 0 $vm_index); do
     if ! virsh list --all | grep baremetal${i} > /dev/null; then
       define_vm baremetal${i} network 41 'admin_network' $vcpus $ramsize
-      for n in private_network public_network storage_network; do
+      for n in private_network public_network storage_network api_network; do
         if [[ $enabled_network_list =~ $n ]]; then
           echo -n "$n "
           virsh attach-interface --domain baremetal${i} --type network --source $n --model rtl8139 --config
@@ -507,6 +535,13 @@ EOF
     #virsh vol-list default | grep baremetal${i} 2>&1> /dev/null || virsh vol-create-as default baremetal${i}.qcow2 41G --format qcow2
     mac=$(virsh domiflist baremetal${i} | grep admin_network | awk '{ print $5 }')
 
+    if [ "$VM_COMPUTES" -gt 0 ]; then
+      capability="profile:compute"
+      VM_COMPUTES=$((VM_COMPUTES - 1))
+    else
+      capability="profile:control"
+    fi
+
     cat >> $CONFIG/instackenv-virt.json << EOF
     {
       "pm_addr": "192.168.122.1",
@@ -519,7 +554,8 @@ EOF
       "cpu": "$vcpus",
       "memory": "$ramsize",
       "disk": "41",
-      "arch": "x86_64"
+      "arch": "x86_64",
+      "capabilities": "$capability"
     },
 EOF
   done
@@ -539,6 +575,12 @@ EOF
   "ssh-user": "root"
 }
 EOF
+  #Overwrite the tripleo-inclubator domain.xml with our own, keeping a backup.
+  if [ ! -f /usr/share/tripleo/templates/domain.xml.bak ]; then
+    /usr/bin/mv -f /usr/share/tripleo/templates/domain.xml /usr/share/tripleo/templates/domain.xml.bak
+  fi
+
+  /usr/bin/cp -f $LIB/installer/domain.xml /usr/share/tripleo/templates/domain.xml
 }
 
 ##Create virtual nodes in virsh
@@ -589,71 +631,38 @@ function define_vm () {
                                               --baremetal-interface $4
 }
 
-##Set network-environment settings
-##params: network-environment file to edit
-function configure_network_environment {
-  local tht_dir
-  tht_dir=/usr/share/openstack-tripleo-heat-templates/network
-
-  sed -i '/ControlPlaneSubnetCidr/c\\  ControlPlaneSubnetCidr: "'${admin_network_cidr##*/}'"' $1
-  sed -i '/ControlPlaneDefaultRoute/c\\  ControlPlaneDefaultRoute: '${admin_network_provisioner_ip}'' $1
-  sed -i '/ExternalNetCidr/c\\  ExternalNetCidr: '${public_network_cidr}'' $1
-  sed -i "/ExternalAllocationPools/c\\  ExternalAllocationPools: [{'start': '${public_network_usable_ip_range%%,*}', 'end': '${public_network_usable_ip_range##*,}'}]" $1
-  sed -i '/ExternalInterfaceDefaultRoute/c\\  ExternalInterfaceDefaultRoute: '${public_network_gateway}'' $1
-  sed -i '/EC2MetadataIp/c\\  EC2MetadataIp: '${admin_network_provisioner_ip}'' $1
-
-  # check for private network
-  if [[ ! -z "$private_network_enabled" && "$private_network_enabled" == "True" ]]; then
-      sed -i 's#^.*Network::Tenant.*$#  OS::TripleO::Network::Tenant: '${tht_dir}'/tenant.yaml#' $1
-      sed -i 's#^.*Controller::Ports::TenantPort:.*$#  OS::TripleO::Controller::Ports::TenantPort: '${tht_dir}'/ports/tenant.yaml#' $1
-      sed -i 's#^.*Compute::Ports::TenantPort:.*$#  OS::TripleO::Compute::Ports::TenantPort: '${tht_dir}'/ports/tenant.yaml#' $1
-      sed -i "/TenantAllocationPools/c\\  TenantAllocationPools: [{'start': '${private_network_usable_ip_range%%,*}', 'end': '${private_network_usable_ip_range##*,}'}]" $1
-      sed -i '/TenantNetCidr/c\\  TenantNetCidr: '${private_network_cidr}'' $1
-  else
-      sed -i 's#^.*Network::Tenant.*$#  OS::TripleO::Network::Tenant: '${tht_dir}'/noop.yaml#' $1
-      sed -i 's#^.*Controller::Ports::TenantPort:.*$#  OS::TripleO::Controller::Ports::TenantPort: '${tht_dir}'/ports/noop.yaml#' $1
-      sed -i 's#^.*Compute::Ports::TenantPort:.*$#  OS::TripleO::Compute::Ports::TenantPort: '${tht_dir}'/ports/noop.yaml#' $1
-  fi
-
-  # check for storage network
-  if [[ ! -z "$storage_network_enabled" && "$storage_network_enabled" == "True" ]]; then
-      sed -i 's#^.*Network::Storage:.*$#  OS::TripleO::Network::Storage: '${tht_dir}'/storage.yaml#' $1
-      sed -i 's#^.*Network::Ports::StorageVipPort:.*$#  OS::TripleO::Network::Ports::StorageVipPort: '${tht_dir}'/ports/storage.yaml#' $1
-      sed -i 's#^.*Controller::Ports::StoragePort:.*$#  OS::TripleO::Controller::Ports::StoragePort: '${tht_dir}'/ports/storage.yaml#' $1
-      sed -i 's#^.*Compute::Ports::StoragePort:.*$#  OS::TripleO::Compute::Ports::StoragePort: '${tht_dir}'/ports/storage.yaml#' $1
-      sed -i "/StorageAllocationPools/c\\  StorageAllocationPools: [{'start': '${storage_network_usable_ip_range%%,*}', 'end': '${storage_network_usable_ip_range##*,}'}]" $1
-      sed -i '/StorageNetCidr/c\\  StorageNetCidr: '${storage_network_cidr}'' $1
-  else
-      sed -i 's#^.*Network::Storage:.*$#  OS::TripleO::Network::Storage: '${tht_dir}'/noop.yaml#' $1
-      sed -i 's#^.*Network::Ports::StorageVipPort:.*$#  OS::TripleO::Network::Ports::StorageVipPort: '${tht_dir}'/ports/noop.yaml#' $1
-      sed -i 's#^.*Controller::Ports::StoragePort:.*$#  OS::TripleO::Controller::Ports::StoragePort: '${tht_dir}'/ports/noop.yaml#' $1
-      sed -i 's#^.*Compute::Ports::StoragePort:.*$#  OS::TripleO::Compute::Ports::StoragePort: '${tht_dir}'/ports/noop.yaml#' $1
-  fi
-
-  # check for ODL L3
-  if [ "${deploy_options_array['sdn_l3']}" == 'true' ]; then
-      ext_net_type=br-ex
-  fi
-
-}
 ##Copy over the glance images and instackenv json file
 ##params: none
 function configure_undercloud {
-
+  local controller_nic_template compute_nic_template
   echo
   echo "Copying configuration files to Undercloud"
   if [[ "$net_isolation_enabled" == "TRUE" ]]; then
-    configure_network_environment $CONFIG/network-environment.yaml
     echo -e "${blue}Network Environment set for Deployment: ${reset}"
-    cat $CONFIG/network-environment.yaml
-    scp ${SSH_OPTIONS[@]} $CONFIG/network-environment.yaml "stack@$UNDERCLOUD":
+    cat /tmp/network-environment.yaml
+    scp ${SSH_OPTIONS[@]} /tmp/network-environment.yaml "stack@$UNDERCLOUD":
+
+    # check for ODL L3/ONOS
+    if [ "${deploy_options_array['sdn_l3']}" == 'True' ]; then
+      ext_net_type=br-ex
+    fi
+
+    if ! controller_nic_template=$(python3.4 -B $LIB/python/apex-python-utils.py nic-template -t $CONFIG/nics-controller.yaml.jinja2 -n "$enabled_network_list" -e $ext_net_type -af $ip_addr_family); then
+      echo -e "${red}ERROR: Failed to generate controller NIC heat template ${reset}"
+      exit 1
+    fi
+
+    if ! compute_nic_template=$(python3.4 -B $LIB/python/apex-python-utils.py nic-template -t $CONFIG/nics-compute.yaml.jinja2 -n "$enabled_network_list" -e $ext_net_type -af $ip_addr_family); then
+      echo -e "${red}ERROR: Failed to generate compute NIC heat template ${reset}"
+      exit 1
+    fi
     ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
 mkdir nics/
 cat > nics/controller.yaml << EOF
-$(python3.4 -B $CONFIG/lib/python/apex-python-utils.py nic_template -d $CONFIG -f nics-controller.yaml.jinja2 -n "$enabled_network_list" -e $ext_net_type -af $ip_addr_family)
+$controller_nic_template
 EOF
 cat > nics/compute.yaml << EOF
-$(python3.4 -B $CONFIG/lib/python/apex-python-utils.py nic_template -d $CONFIG -f nics-compute.yaml.jinja2 -n "$enabled_network_list" -e $ext_net_type -af $ip_addr_family)
+$compute_nic_template
 EOF
 EOI
   fi
@@ -758,17 +767,17 @@ sleep 15
 ##params: none
 function undercloud_prep_overcloud_deploy {
   if [[ "${#deploy_options_array[@]}" -eq 0 || "${deploy_options_array['sdn_controller']}" == 'opendaylight' ]]; then
-    if [ "${deploy_options_array['sdn_l3']}" == 'true' ]; then
+    if [ "${deploy_options_array['sdn_l3']}" == 'True' ]; then
       DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_l3.yaml"
-    elif [ "${deploy_options_array['sfc']}" == 'true' ]; then
+    elif [ "${deploy_options_array['sfc']}" == 'True' ]; then
       DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_sfc.yaml"
-    elif [ "${deploy_options_array['vpn']}" == 'true' ]; then
+    elif [ "${deploy_options_array['vpn']}" == 'True' ]; then
       DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight_sdnvpn.yaml"
     else
       DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/opendaylight.yaml"
     fi
     SDN_IMAGE=opendaylight
-    if [ "${deploy_options_array['sfc']}" == 'true' ]; then
+    if [ "${deploy_options_array['sfc']}" == 'True' ]; then
       SDN_IMAGE+=-sfc
       if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
           echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute an SFC deployment."
@@ -794,6 +803,8 @@ function undercloud_prep_overcloud_deploy {
     exit 1
   fi
 
+
+
   # Make sure the correct overcloud image is available
   if [ ! -f $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 ]; then
       echo "${red} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 is required to execute your deployment."
@@ -806,14 +817,86 @@ function undercloud_prep_overcloud_deploy {
   ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f overcloud-full.qcow2"
   scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full-${SDN_IMAGE}.qcow2 "stack@$UNDERCLOUD":overcloud-full.qcow2
 
-  # Push performance options to subscript to modify per-role images as needed
-  for option in "${performance_options[@]}" ; do
-    echo -e "${blue}Setting performance option $option${reset}"
-    ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "bash build_perf_image.sh $option"
-  done
+  # Install ovs-dpdk inside the overcloud image if it is enabled.
+  if [ "${deploy_options_array['dataplane']}" == 'ovs_dpdk' ]; then
+    # install dpdk packages before ovs
+    echo -e "${blue}INFO: Enabling kernel modules for dpdk inside overcloud image${reset}"
+
+    ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
+      cat << EOF > vfio_pci.modules
+#!/bin/bash
+exec /sbin/modprobe vfio_pci >/dev/null 2>&1
+EOF
+
+      cat << EOF > uio_pci_generic.modules
+#!/bin/bash
+exec /sbin/modprobe uio_pci_generic >/dev/null 2>&1
+EOF
+
+      LIBGUESTFS_BACKEND=direct virt-customize --upload vfio_pci.modules:/etc/sysconfig/modules/ \
+                                               --upload uio_pci_generic.modules:/etc/sysconfig/modules/ \
+                                               --run-command "chmod 0755 /etc/sysconfig/modules/vfio_pci.modules" \
+                                               --run-command "chmod 0755 /etc/sysconfig/modules/uio_pci_generic.modules" \
+                                               --run-command "yum install -y /root/dpdk_rpms/*" \
+                                               -a overcloud-full.qcow2
+EOI
+  elif [ "${deploy_options_array['dataplane']}" != 'ovs' ]; then
+    echo "${red}${deploy_options_array['dataplane']} not supported${reset}"
+    exit 1
+  fi
+
+  # Set ODL version accordingly
+  if [[ "${deploy_options_array['sdn_controller']}" == 'opendaylight' && "${deploy_options_array['odl_version']}" == 'boron' ]]; then
+    ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
+      LIBGUESTFS_BACKEND=direct virt-customize --run-command "yum -y remove opendaylight" \
+                                               --run-command "yum -y install /root/boron/*" \
+                                               -a overcloud-full.qcow2
+EOI
+  fi
 
   # Add performance deploy options if they have been set
   if [ ! -z "${deploy_options_array['performance']}" ]; then
+
+    # Remove previous kernel args files per role
+    ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f Compute-kernel_params.txt"
+    ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "rm -f Controller-kernel_params.txt"
+
+    # Push performance options to subscript to modify per-role images as needed
+    for option in "${performance_options[@]}" ; do
+      echo -e "${blue}Setting performance option $option${reset}"
+      ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" "bash build_perf_image.sh $option"
+    done
+
+    # Build IPA kernel option ramdisks
+    ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" <<EOI
+/bin/cp -f /home/stack/ironic-python-agent.initramfs /root/
+mkdir -p ipa/
+pushd ipa
+gunzip -c ../ironic-python-agent.initramfs | cpio -i
+if [ ! -f /home/stack/Compute-kernel_params.txt ]; then
+  touch /home/stack/Compute-kernel_params.txt
+  chown stack /home/stack/Compute-kernel_params.txt
+fi
+/bin/cp -f /home/stack/Compute-kernel_params.txt tmp/kernel_params.txt
+echo "Compute params set: "
+cat tmp/kernel_params.txt
+/bin/cp -f /root/image.py usr/lib/python2.7/site-packages/ironic_python_agent/extensions/image.py
+/bin/cp -f /root/image.pyc usr/lib/python2.7/site-packages/ironic_python_agent/extensions/image.pyc
+find . | cpio -o -H newc | gzip > /home/stack/Compute-ironic-python-agent.initramfs
+chown stack /home/stack/Compute-ironic-python-agent.initramfs
+if [ ! -f /home/stack/Controller-kernel_params.txt ]; then
+  touch /home/stack/Controller-kernel_params.txt
+  chown stack /home/stack/Controller-kernel_params.txt
+fi
+/bin/cp -f /home/stack/Controller-kernel_params.txt tmp/kernel_params.txt
+echo "Controller params set: "
+cat tmp/kernel_params.txt
+find . | cpio -o -H newc | gzip > /home/stack/Controller-ironic-python-agent.initramfs
+chown stack /home/stack/Controller-ironic-python-agent.initramfs
+popd
+/bin/rm -rf ipa/
+EOI
+
     DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml"
   fi
 
@@ -824,7 +907,7 @@ function undercloud_prep_overcloud_deploy {
   total_nodes=$(ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "cat /home/stack/instackenv.json | grep -c memory")
 
   # check if HA is enabled
-  if [[ "$ha_enabled" == "TRUE" || "$ha_enabled" == "true" ]]; then
+  if [[ "$ha_enabled" == "True" ]]; then
      DEPLOY_OPTIONS+=" --control-scale 3"
      compute_nodes=$((total_nodes - 3))
      DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/puppet-pacemaker.yaml"
@@ -845,7 +928,7 @@ function undercloud_prep_overcloud_deploy {
      DEPLOY_OPTIONS+=" -e network-environment.yaml"
   fi
 
-  if [[ "$ha_enabled" == "TRUE" || "$ha_enabled" == "true"  ]] || [[ "$net_isolation_enabled" == "TRUE" ]]; then
+  if [[ "$ha_enabled" == "True" ]] || [[ "$net_isolation_enabled" == "TRUE" ]]; then
      DEPLOY_OPTIONS+=" --ntp-server $ntp_server"
   fi
 
@@ -869,11 +952,10 @@ set -o errexit
 echo "Uploading overcloud glance images"
 openstack overcloud image upload
 
-bash -x set_perf_images.sh ${performance_roles}
-
 echo "Configuring undercloud and discovering nodes"
 openstack baremetal import --json instackenv.json
 openstack baremetal configure boot
+bash -x set_perf_images.sh ${performance_roles[@]}
 #if [[ -z "$virtual" ]]; then
 #  openstack baremetal introspection bulk start
 #fi
@@ -944,6 +1026,12 @@ set -o errexit
 echo "Configuring Neutron external network"
 neutron net-create external --router:external=True --tenant-id \$(keystone tenant-get service | grep id | awk '{ print \$4 }')
 neutron subnet-create --name external-net --tenant-id \$(keystone tenant-get service | grep id | awk '{ print \$4 }') --disable-dhcp external --gateway ${public_network_gateway} --allocation-pool start=${public_network_floating_ip_range%%,*},end=${public_network_floating_ip_range##*,} ${public_network_cidr}
+
+echo "Removing swift endpoint and service"
+swift_service_id=\$(keystone service-list | grep swift | cut -d ' ' -f 2)
+swift_endpoint_id=\$(keystone endpoint-list | grep \$swift_service_id | cut -d ' ' -f 2)
+keystone endpoint-delete \$swift_endpoint_id
+keystone service-delete \$swift_service_id
 EOI
 
   echo -e "${blue}INFO: Checking if OVS bridges have IP addresses...${reset}"
@@ -982,7 +1070,7 @@ EOI
   fi
 
   # for sfc deployments we need the vxlan workaround
-  if [ "${deploy_options_array['sfc']}" == 'true' ]; then
+  if [ "${deploy_options_array['sfc']}" == 'True' ]; then
       ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI
 source stackrc
 set -o errexit
@@ -1022,8 +1110,9 @@ fi
 EOF
 done
 
-# Print out the dashboard URL
+# Print out the undercloud IP and dashboard URL
 source stackrc
+echo "Undercloud IP: $UNDERCLOUD, please connect by doing 'opnfv-util undercloud'"
 echo "Overcloud dashboard available at http://\$(heat output-show overcloud PublicVip | sed 's/"//g')/dashboard"
 EOI
 
@@ -1031,15 +1120,12 @@ EOI
 
 display_usage() {
   echo -e "Usage:\n$0 [arguments] \n"
-  echo -e "   -c|--config : Directory to configuration files. Optional.  Defaults to /var/opt/opnfv/ \n"
-  echo -e "   -d|--deploy-settings : Full path to deploy settings yaml file. Optional.  Defaults to null \n"
-  echo -e "   -i|--inventory : Full path to inventory yaml file. Required only for baremetal \n"
-  echo -e "   -n|--net-settings : Full path to network settings file. Optional. \n"
-  echo -e "   -p|--ping-site : site to use to verify IP connectivity. Optional. Defaults to 8.8.8.8 \n"
-  echo -e "   -r|--resources : Directory to deployment resources. Optional.  Defaults to /var/opt/opnfv/stack \n"
-  echo -e "   -v|--virtual : Virtualize overcloud nodes instead of using baremetal. \n"
-  echo -e "   --no-ha : disable High Availability deployment scheme, this assumes a single controller and single compute node \n"
-  echo -e "   --flat : disable Network Isolation and use a single flat network for the underlay network.\n"
+  echo -e "   -d|--deploy-settings : Full path to deploy settings yaml file. Optional.  Defaults to null"
+  echo -e "   -i|--inventory : Full path to inventory yaml file. Required only for baremetal"
+  echo -e "   -n|--net-settings : Full path to network settings file. Optional."
+  echo -e "   -p|--ping-site : site to use to verify IP connectivity. Optional. Defaults to 8.8.8.8"
+  echo -e "   -v|--virtual : Virtualize overcloud nodes instead of using baremetal."
+  echo -e "   --flat : disable Network Isolation and use a single flat network for the underlay network."
   echo -e "   --no-post-config : disable Post Install configuration."
   echo -e "   --debug : enable debug output."
   echo -e "   --interactive : enable interactive deployment mode which requires user to confirm steps of deployment."
@@ -1062,11 +1148,6 @@ parse_cmdline() {
                 display_usage
                 exit 0
             ;;
-        -c|--config)
-                CONFIG=$2
-                echo "Deployment Configuration Directory Overridden to: $2"
-                shift 2
-            ;;
         -d|--deploy-settings)
                 DEPLOY_SETTINGS_FILE=$2
                 echo "Deployment Configuration file: $2"
@@ -1086,22 +1167,11 @@ parse_cmdline() {
                 echo "Using $2 as the ping site"
                 shift 2
             ;;
-        -r|--resources)
-                RESOURCES=$2
-                echo "Deployment Resources Directory Overridden to: $2"
-                shift 2
-            ;;
         -v|--virtual)
                 virtual="TRUE"
                 echo "Executing a Virtual Deployment"
                 shift 1
             ;;
-        --no-ha )
-                ha_enabled="FALSE"
-                vm_index=1
-                echo "HA Deployment Disabled"
-                shift 1
-            ;;
         --flat )
                 net_isolation_enabled="FALSE"
                 echo "Underlay Network Isolation Disabled: using flat configuration"
@@ -1132,6 +1202,11 @@ parse_cmdline() {
                 echo "Amount of RAM per VM set to $VM_RAM"
                 shift 2
             ;;
+        --virtual-computes )
+                VM_COMPUTES=$2
+                echo "Virtual Compute nodes set to $VM_COMPUTES"
+                shift 2
+            ;;
         *)
                 display_usage
                 exit 1
@@ -1176,19 +1251,11 @@ parse_cmdline() {
     post_config="FALSE"
   fi
 
-  ##LIBRARIES
-  # Do this after cli parse so that $CONFIG is set properly
-  source $CONFIG/lib/common-functions.sh
-  source $CONFIG/lib/utility-functions.sh
-  source $CONFIG/lib/installer/onos/onos_gw_mac_update.sh
-
 }
 
 ##END FUNCTIONS
 
 main() {
-  # Make sure jinja2 is installed
-  easy_install-3.4 jinja2 > /dev/null
   parse_cmdline "$@"
   echo -e "${blue}INFO: Parsing network settings file...${reset}"
   parse_network_settings