Identify jump host bridges based on IDF / PDF nets 79/44279/1
authorAlexandru Avadanii <Alexandru.Avadanii@enea.com>
Sat, 16 Sep 2017 01:22:38 +0000 (03:22 +0200)
committerAlexandru Avadanii <Alexandru.Avadanii@enea.com>
Wed, 4 Oct 2017 20:27:53 +0000 (20:27 +0000)
- minor refactor of runtime templates parsing to allow var expansion;
- parse <pod_config.yml> into shell vars, match dynamically networks
  from PDF to IP addresses on bridges of current jumphost;
- keep old '-B' parameter in <ci/deploy.sh>, use it for providing
  fallback values in case there's no bridge name specified via IDF
  and no IP on the jumphost for one or more of the PDF networks;
- re-enable dry-run to ease testing of the above;
- add sample 'idf-pod1.yaml' to <mcp/config/labs/local>;

The new behavior will try to determine the jump host bridge names:
1. Based on IDF mapping, if available
2. Based on PDF network matching with IP addrs on jumphost;
3. Fallback to values passed via '-B';
4. Fallback to default values hardcoded in the deploy script;

Later, we will drop MaaS network env vars in favor of PDF vars,
once the PDF template is generating them.

Change-Id: If9cd65d310c02965b2e2bfa06a0d7e0f97f1dd48
Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
(cherry picked from commit 8ec927497b7ee0fd3b7346e957878173b080ef6a)

ci/deploy.sh
mcp/config/labs/local/idf-pod1.yaml [new file with mode: 0644]
mcp/patches/pharos/0001-Add-IDF-mappings-to-installer-adapter.patch [new file with mode: 0644]
mcp/reclass/classes/cluster/all-mcp-ocata-common/opnfv/runtime.yml.template
mcp/reclass/classes/cluster/baremetal-mcp-ocata-odl-ha/infra/maas.yml
mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-dpdk-ha/infra/maas.yml
mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/infra/maas.yml
mcp/salt-formulas/maas/pxe_route.sls
mcp/scripts/lib.sh
mcp/scripts/net_mcpcontrol.xml.template
mcp/scripts/pharos

index 418ce26..3c4d9e4 100755 (executable)
@@ -33,11 +33,12 @@ $(notify "$(basename "$0"): Deploy the Fuel@OPNFV MCP stack" 3)
 $(notify "USAGE:" 2)
   $(basename "$0") -b base-uri -l lab-name -p pod-name -s deploy-scenario \\
     [-B PXE Bridge [-B Mgmt Bridge [-B Internal Bridge [-B Public Bridge]]]] \\
-    [-S storage-dir] [-L /path/to/log/file.tar.gz]
+    [-S storage-dir] [-L /path/to/log/file.tar.gz] [-f] [-F] [-e] [-d]
 
 $(notify "OPTIONS:" 2)
   -b  Base-uri for the stack-configuration structure
   -B  Bridge(s): 1st usage = PXE, 2nd = Mgmt, 3rd = Internal, 4th = Public
+  -d  Dry-run
   -e  Do not launch environment deployment
   -f  Deploy on existing Salt master
   -F  Do only create a Salt master
@@ -49,7 +50,6 @@ $(notify "OPTIONS:" 2)
   -L  Deployment log path and file name
 
 $(notify "DISABLED OPTIONS (not yet supported with MCP):" 3)
-  -d  (disabled) Dry-run
   -i  (disabled) iso url
   -T  (disabled) Timeout, in minutes, for the deploy.
 
@@ -77,6 +77,7 @@ $(notify "Input parameters to the build script are:" 2)
    For baremetal deploys, PXE bridge is used for baremetal node provisioning,
    while "mcpcontrol" is used to provision the infrastructure VMs only.
    The default is 'pxebr'.
+-d Dry-run - Produce deploy config files, but do not execute deploy
 -e Do not launch environment deployment
 -f Deploy on existing Salt master
 -F Do only create a Salt master
@@ -89,7 +90,6 @@ $(notify "Input parameters to the build script are:" 2)
 -S Storage dir for VM images, default is mcp/deploy/images
 
 $(notify "Disabled input parameters (not yet supported with MCP):" 3)
--d (disabled) Dry-run - Produce deploy config files, but do not execute deploy
 -T (disabled) Timeout, in minutes, for the deploy.
    It defaults to using the DEPLOY_TIMEOUT environment variable when defined.
 -i (disabled) .iso image to be deployed (needs to be provided in a URI
@@ -144,26 +144,26 @@ OPNFV_BRIDGES=('pxebr' 'mgmt' 'internal' 'public')
 URI_REGEXP='(file|https?|ftp)://.*'
 BASE_CONFIG_URI="file://${REPO_ROOT_PATH}/mcp/config"
 
+# Customize deploy workflow
+DRY_RUN=${DRY_RUN:-0}
+USE_EXISTING_INFRA=${USE_EXISTING_INFRA:-0}
+INFRA_CREATION_ONLY=${INFRA_CREATION_ONLY:-0}
+NO_DEPLOY_ENVIRONMENT=${NO_DEPLOY_ENVIRONMENT:-0}
+
 export SSH_KEY=${SSH_KEY:-"/var/lib/opnfv/mcp.rsa"}
 export SALT_MASTER=${INSTALLER_IP:-10.20.0.2}
 export SALT_MASTER_USER=${SALT_MASTER_USER:-ubuntu}
 export MAAS_IP=${MAAS_IP:-${SALT_MASTER%.*}.3}
+
+# These should be determined from PDF later
 export MAAS_PXE_NETWORK=${MAAS_PXE_NETWORK:-192.168.11.0}
 
 # Derivated from above global vars
-export MCP_CTRL_NETWORK_ROOTSTR=${SALT_MASTER%.*}
-export MAAS_PXE_NETWORK_ROOTSTR=${MAAS_PXE_NETWORK%.*}
 export SSH_OPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ${SSH_KEY}"
 export SSH_SALT="${SALT_MASTER_USER}@${SALT_MASTER}"
 
-# Customize deploy workflow
-export USE_EXISTING_INFRA=${USE_EXISTING_INFRA:-0}
-export INFRA_CREATION_ONLY=${INFRA_CREATION_ONLY:-0}
-export NO_DEPLOY_ENVIRONMENT=${NO_DEPLOY_ENVIRONMENT:-0}
-
 # Variables below are disabled for now, to be re-introduced or removed later
 set +x
-DRY_RUN=0
 if ! [ -z "${DEPLOY_TIMEOUT}" ]; then
     DEPLOY_TIMEOUT="-dt ${DEPLOY_TIMEOUT}"
 else
@@ -203,7 +203,6 @@ do
             IFS=${OIFS}
             ;;
         d)
-            notify '' 3 "${OPTION}"; continue
             DRY_RUN=1
             ;;
         f)
@@ -310,7 +309,9 @@ make -C "${REPO_ROOT_PATH}/mcp/patches" deepclean patches-import
 PHAROS_GEN_CONFIG_SCRIPT="./pharos/config/utils/generate_config.py"
 PHAROS_INSTALLER_ADAPTER="./pharos/config/installers/fuel/pod_config.yml.j2"
 BASE_CONFIG_PDF="${BASE_CONFIG_URI}/labs/${TARGET_LAB}/${TARGET_POD}.yaml"
+BASE_CONFIG_IDF="${BASE_CONFIG_URI}/labs/${TARGET_LAB}/idf-${TARGET_POD}.yaml"
 LOCAL_PDF="${STORAGE_DIR}/$(basename "${BASE_CONFIG_PDF}")"
+LOCAL_IDF="${STORAGE_DIR}/$(basename "${BASE_CONFIG_IDF}")"
 LOCAL_PDF_RECLASS="${STORAGE_DIR}/pod_config.yml"
 if ! curl --create-dirs -o "${LOCAL_PDF}" "${BASE_CONFIG_PDF}"; then
     if [ "${DEPLOY_TYPE}" = 'baremetal' ]; then
@@ -319,6 +320,8 @@ if ! curl --create-dirs -o "${LOCAL_PDF}" "${BASE_CONFIG_PDF}"; then
     else
         notify "[WARN] Could not retrieve PDF (Pod Descriptor File)!\n" 3
     fi
+elif ! curl -o "${LOCAL_IDF}" "${BASE_CONFIG_IDF}"; then
+    notify "[WARN] POD has no IDF (Installer Descriptor File)!\n" 3
 elif ! "${PHAROS_GEN_CONFIG_SCRIPT}" -y "${LOCAL_PDF}" \
     -j "${PHAROS_INSTALLER_ADAPTER}" > "${LOCAL_PDF_RECLASS}"; then
     notify "[ERROR] Could not convert PDF to reclass model input!\n" 1>&2
@@ -347,6 +350,7 @@ fi
 source lib.sh
 eval "$(parse_yaml "${SCENARIO_DIR}/defaults-$(uname -i).yaml")"
 eval "$(parse_yaml "${SCENARIO_DIR}/${DEPLOY_TYPE}/${DEPLOY_SCENARIO}.yaml")"
+eval "$(parse_yaml "${LOCAL_PDF_RECLASS}")"
 
 export CLUSTER_DOMAIN=${cluster_domain}
 
@@ -360,10 +364,41 @@ done
 
 # Expand reclass and virsh network templates
 for tp in "${RECLASS_CLUSTER_DIR}/all-mcp-ocata-common/opnfv/"*.template \
-    net_*.template; do envsubst < "${tp}" > "${tp%.template}"; done
+    net_*.template; do
+        eval "cat <<-EOF
+               $(<"${tp}")
+               EOF" 2> /dev/null > "${tp%.template}"
+done
+
+# Map PDF networks 'admin', 'mgmt', 'private' and 'public' to bridge names
+BR_NAMES=('admin' 'mgmt' 'private' 'public')
+BR_NETS=( \
+    "${parameters__param_opnfv_maas_pxe_address}" \
+    "${parameters__param_opnfv_infra_config_address}" \
+    "${parameters__param_opnfv_openstack_compute_node01_tenant_address}" \
+    "${parameters__param_opnfv_openstack_compute_node01_external_address}" \
+)
+for ((i = 0; i < ${#BR_NETS[@]}; i++)); do
+    br_jump=$(eval echo "\$parameters__param_opnfv_jump_bridge_${BR_NAMES[i]}")
+    if [ -n "${br_jump}" ] && [ "${br_jump}" != 'None' ] && \
+       [ -d "/sys/class/net/${br_jump}/bridge" ]; then
+            notify "[OK] Bridge found for '${BR_NAMES[i]}': ${br_jump}\n" 2
+            OPNFV_BRIDGES[${i}]="${br_jump}"
+    elif [ -n "${BR_NETS[i]}" ]; then
+        bridge=$(ip addr | awk "/${BR_NETS[i]%.*}./ {print \$NF; exit}")
+        if [ -n "${bridge}" ] && [ -d "/sys/class/net/${bridge}/bridge" ]; then
+            notify "[OK] Bridge found for net ${BR_NETS[i]%.*}.0: ${bridge}\n" 2
+            OPNFV_BRIDGES[${i}]="${bridge}"
+        fi
+    fi
+done
+notify "[NOTE] Using bridges: ${OPNFV_BRIDGES[*]}\n" 2
 
 # Infra setup
-if [ ${USE_EXISTING_INFRA} -eq 1 ]; then
+if [ ${DRY_RUN} -eq 1 ]; then
+    notify "Dry run, skipping all deployment tasks\n" 2 1>&2
+    exit 0
+elif [ ${USE_EXISTING_INFRA} -eq 1 ]; then
     notify "Use existing infra\n" 2 1>&2
     check_connection
 else
@@ -379,7 +414,7 @@ else
 fi
 
 if [ ${INFRA_CREATION_ONLY} -eq 1 ] || [ ${NO_DEPLOY_ENVIRONMENT} -eq 1 ]; then
-    echo "Skip openstack cluster setup\n" 2
+    notify "Skip openstack cluster setup\n" 2
 else
     # Openstack cluster setup
     for state in "${cluster_states[@]}"; do
diff --git a/mcp/config/labs/local/idf-pod1.yaml b/mcp/config/labs/local/idf-pod1.yaml
new file mode 100644 (file)
index 0000000..7932f88
--- /dev/null
@@ -0,0 +1,12 @@
+---
+### LF POD 2 installer descriptor file ###
+
+idf:
+  version: 0.1
+  fuel:
+    jumphost:
+      bridges:
+        admin: 'pxebr'
+        mgmt: 'br-ctl'
+        private: ''
+        public: ''
diff --git a/mcp/patches/pharos/0001-Add-IDF-mappings-to-installer-adapter.patch b/mcp/patches/pharos/0001-Add-IDF-mappings-to-installer-adapter.patch
new file mode 100644 (file)
index 0000000..59a0e97
--- /dev/null
@@ -0,0 +1,27 @@
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Tue, 3 Oct 2017 02:41:41 +0200
+Subject: [PATCH] Add IDF mappings to installer adapter
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ config/installers/fuel/pod_config.yml.j2 | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/config/installers/fuel/pod_config.yml.j2 b/config/installers/fuel/pod_config.yml.j2
+index 99434f9..676ead3 100644
+--- a/config/installers/fuel/pod_config.yml.j2
++++ b/config/installers/fuel/pod_config.yml.j2
+@@ -26,6 +26,13 @@
+ parameters:
+   _param:
+
++{%- if conf.idf is defined %}
++    opnfv_jump_bridge_admin: {{ conf['idf']['fuel']['jumphost']['bridges']['admin'] }}
++    opnfv_jump_bridge_mgmt: {{ conf['idf']['fuel']['jumphost']['bridges']['mgmt'] }}
++    opnfv_jump_bridge_private: {{ conf['idf']['fuel']['jumphost']['bridges']['private'] }}
++    opnfv_jump_bridge_public: {{ conf['idf']['fuel']['jumphost']['bridges']['public'] }}
++{%- endif %}
++
+     opnfv_infra_config_address: {{ net_mgmt | ipaddr_index('100') }}
+     opnfv_infra_maas_node01_address: {{ net_mgmt | ipaddr_index('3') }}
+     opnfv_infra_maas_node01_deploy_address: {{ net_admin | ipaddr_index('3') }}
index b07ae13..1488da2 100644 (file)
@@ -1,9 +1,10 @@
 parameters:
   _param:
     reclass_config_master: ${SALT_MASTER}
-
     opnfv_maas_mcp_address: ${MAAS_IP}
-    opnfv_maas_pxe_network: ${MAAS_PXE_NETWORK}
-    opnfv_maas_pxe_address: ${MAAS_PXE_NETWORK_ROOTSTR}.3
-    opnfv_maas_pxe_iprange_start: ${MAAS_PXE_NETWORK_ROOTSTR}.5
-    opnfv_maas_pxe_iprange_end: ${MAAS_PXE_NETWORK_ROOTSTR}.250
+
+    # These should be moved to pod_config.yml and read based on PDF admin net
+    opnfv_maas_pxe_network_address: ${MAAS_PXE_NETWORK}
+    opnfv_maas_pxe_address: ${MAAS_PXE_NETWORK%.*}.3
+    opnfv_maas_pxe_start_address: ${MAAS_PXE_NETWORK%.*}.5
+    opnfv_maas_pxe_end_address: ${MAAS_PXE_NETWORK%.*}.250
index 737d6cb..2e39588 100644 (file)
@@ -33,12 +33,12 @@ parameters:
         default_min_hwe_kernel: 'hwe-16.04'
       subnets:
         opnfv_maas_pxe:
-          name: ${_param:opnfv_maas_pxe_network}/24
-          cidr: ${_param:opnfv_maas_pxe_network}/24
+          name: ${_param:opnfv_maas_pxe_network_address}/24
+          cidr: ${_param:opnfv_maas_pxe_network_address}/24
           gateway_ip: ${_param:single_address}
           iprange:
-            start: ${_param:opnfv_maas_pxe_iprange_start}
-            end: ${_param:opnfv_maas_pxe_iprange_end}
+            start: ${_param:opnfv_maas_pxe_start_address}
+            end: ${_param:opnfv_maas_pxe_end_address}
             type: dynamic
           vlans:
             untagged:
index 00ee552..db48750 100644 (file)
@@ -33,12 +33,12 @@ parameters:
         default_min_hwe_kernel: 'hwe-16.04'
       subnets:
         opnfv_maas_pxe:
-          name: ${_param:opnfv_maas_pxe_network}/24
-          cidr: ${_param:opnfv_maas_pxe_network}/24
+          name: ${_param:opnfv_maas_pxe_network_address}/24
+          cidr: ${_param:opnfv_maas_pxe_network_address}/24
           gateway_ip: ${_param:single_address}
           iprange:
-            start: ${_param:opnfv_maas_pxe_iprange_start}
-            end: ${_param:opnfv_maas_pxe_iprange_end}
+            start: ${_param:opnfv_maas_pxe_start_address}
+            end: ${_param:opnfv_maas_pxe_end_address}
             type: dynamic
           vlans:
             untagged:
index cebc779..c0a4874 100644 (file)
@@ -33,12 +33,12 @@ parameters:
         default_min_hwe_kernel: 'hwe-16.04'
       subnets:
         opnfv_maas_pxe:
-          name: ${_param:opnfv_maas_pxe_network}/24
-          cidr: ${_param:opnfv_maas_pxe_network}/24
+          name: ${_param:opnfv_maas_pxe_network_address}/24
+          cidr: ${_param:opnfv_maas_pxe_network_address}/24
           gateway_ip: ${_param:single_address}
           iprange:
-            start: ${_param:opnfv_maas_pxe_iprange_start}
-            end: ${_param:opnfv_maas_pxe_iprange_end}
+            start: ${_param:opnfv_maas_pxe_start_address}
+            end: ${_param:opnfv_maas_pxe_end_address}
             type: dynamic
           vlans:
             untagged:
index 9c22b2a..e6e9a78 100644 (file)
@@ -3,6 +3,6 @@ routes:
     - name: {{ salt['pillar.get']('_param:opnfv_fn_vm_primary_interface') }}
     - routes:
       - name: maas_mcp_to_pxe_network
-        ipaddr: {{ salt['pillar.get']('_param:opnfv_maas_pxe_network') }}
+        ipaddr: {{ salt['pillar.get']('_param:opnfv_maas_pxe_network_address') }}
         netmask: 255.255.255.0
         gateway: {{ salt['pillar.get']('_param:opnfv_maas_mcp_address') }}
index dc4d9dc..fcc5d76 100644 (file)
@@ -103,13 +103,7 @@ create_vms() {
       vnode_networks[2]="${vnode_networks[0]}"
     fi
     for net in "${vnode_networks[@]:1}"; do
-      net_type="bridge"
-      # in case of custom network, host should already have the bridge in place
-      if [ -f "net_${net}.xml" ] && \
-         [ ! -d "/sys/class/net/${net}/bridge" ]; then
-        net_type="network"
-      fi
-      net_args="${net_args} --network ${net_type}=${net},model=virtio"
+      net_args="${net_args} --network bridge=${net},model=virtio"
     done
 
     # shellcheck disable=SC2086
index 722a66a..ab58851 100644 (file)
@@ -2,9 +2,9 @@
   <name>mcpcontrol</name>
   <bridge name="mcpcontrol"/>
   <forward mode="nat"/>
-  <ip address="${MCP_CTRL_NETWORK_ROOTSTR}.1" netmask="255.255.255.0">
+  <ip address="${SALT_MASTER%.*}.1" netmask="255.255.255.0">
     <dhcp>
-      <range start="${MCP_CTRL_NETWORK_ROOTSTR}.2" end="${MCP_CTRL_NETWORK_ROOTSTR}.254"/>
+      <range start="${SALT_MASTER%.*}.2" end="${SALT_MASTER%.*}.254"/>
     </dhcp>
   </ip>
 </network>
index 908dab5..2c4fac2 160000 (submodule)
@@ -1 +1 @@
-Subproject commit 908dab58edbbe8f22db14a9261693a54e9b2d8fd
+Subproject commit 2c4fac2e41aaca9dd679b200ffc968eeb448b395