Adds some network parsing for baremetal deployments and other fixes 11/4811/32
authorTim Rozet <trozet@redhat.com>
Thu, 17 Dec 2015 21:59:05 +0000 (16:59 -0500)
committerDan Radez <dradez@redhat.com>
Wed, 6 Jan 2016 18:22:07 +0000 (13:22 -0500)
Changes include:
 - Fixes Intel Pod 2 inventory file
 - Check for DHCP server on the host and disable
 - Adds realistic+common network-settings file
 - Modifies baremetal deployments to bridge to correct interface
 - Adds private/storage network OVS bridges
 - Parses network-settings into valid network-environment variables
 - If certain network-settings are missing they will be auto-detected

Note: The actual settings set forth for deployment only include
admin/external networks at the moment.  Private/storage networks will be
handled in an upcoming patch.

JIRA: APEX-50

Change-Id: I0a1a86f37c08702a93fe167688c3149ba5573db4
Signed-off-by: Tim Rozet <trozet@redhat.com>
12 files changed:
build/Makefile
build/instack.sh
build/network-environment.yaml
build/nics/controller.yaml
build/opnfv-apex.spec
ci/clean.sh
ci/deploy.sh
config/deploy/deploy_settings.yaml
config/deploy/network/network-environment-example.yaml [deleted file]
config/deploy/network/network_settings.yaml [new file with mode: 0644]
config/inventory/intel_pod2_settings.yaml
lib/common-functions.sh [new file with mode: 0644]

index 87e8db0..f1341fb 100644 (file)
@@ -101,13 +101,15 @@ rpm:
        pushd ../ && git archive --format=tar --prefix=opnfv-apex-$(RPMVERS)/ HEAD > build/opnfv-apex.tar
        tar -u --xform="s:stack/instack.qcow2:opnfv-apex-$(RPMVERS)/build/instack.qcow2:" --file=opnfv-apex.tar stack/instack.qcow2
        tar -u --xform="s:instack.xml:opnfv-apex-$(RPMVERS)/build/instack.xml:" --file=opnfv-apex.tar instack.xml
-       tar -u --xform="s:baremetalbrbm_brbm1_0.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_0.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_0.xml
-       tar -u --xform="s:baremetalbrbm_brbm1_1.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_1.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_1.xml
-       tar -u --xform="s:baremetalbrbm_brbm1_2.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_2.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_2.xml
-       tar -u --xform="s:baremetalbrbm_brbm1_3.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_3.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_3.xml
-       tar -u --xform="s:baremetalbrbm_brbm1_4.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_4.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_4.xml
+       tar -u --xform="s:baremetalbrbm_brbm1_brbm2_brbm3_0.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_brbm2_brbm3_0.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_brbm2_brbm3_0.xml
+       tar -u --xform="s:baremetalbrbm_brbm1_brbm2_brbm3_1.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_brbm2_brbm3_1.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_brbm2_brbm3_1.xml
+       tar -u --xform="s:baremetalbrbm_brbm1_brbm2_brbm3_2.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_brbm2_brbm3_2.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_brbm2_brbm3_2.xml
+       tar -u --xform="s:baremetalbrbm_brbm1_brbm2_brbm3_3.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_brbm2_brbm3_3.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_brbm2_brbm3_3.xml
+       tar -u --xform="s:baremetalbrbm_brbm1_brbm2_brbm3_4.xml:opnfv-apex-$(RPMVERS)/build/baremetalbrbm_brbm1_brbm2_brbm3_4.xml:" --file=opnfv-apex.tar baremetalbrbm_brbm1_brbm2_brbm3_4.xml
        tar -u --xform="s:brbm-net.xml:opnfv-apex-$(RPMVERS)/build/brbm-net.xml:" --file=opnfv-apex.tar brbm-net.xml
        tar -u --xform="s:brbm1-net.xml:opnfv-apex-$(RPMVERS)/build/brbm1-net.xml:" --file=opnfv-apex.tar brbm1-net.xml
+       tar -u --xform="s:brbm2-net.xml:opnfv-apex-$(RPMVERS)/build/brbm2-net.xml:" --file=opnfv-apex.tar brbm2-net.xml
+       tar -u --xform="s:brbm3-net.xml:opnfv-apex-$(RPMVERS)/build/brbm3-net.xml:" --file=opnfv-apex.tar brbm3-net.xml
        tar -u --xform="s:default-pool.xml:opnfv-apex-$(RPMVERS)/build/default-pool.xml:" --file=opnfv-apex.tar default-pool.xml
        tar -u --xform="s:instackenv-virt.json:opnfv-apex-$(RPMVERS)/build/instackenv-virt.json:" --file=opnfv-apex.tar instackenv-virt.json
        tar -u --xform="s:stack/overcloud-full-odl.qcow2:opnfv-apex-$(RPMVERS)/build/stack/overcloud-full.qcow2:" --file=opnfv-apex.tar stack/overcloud-full-odl.qcow2
@@ -121,11 +123,11 @@ instack:
 .PHONY: instack-clean
 instack-clean:
        rm -f instackenv-virt.json
-       rm -f baremetalbrbm_brbm1_0.xml
-       rm -f baremetalbrbm_brbm1_1.xml
-       rm -f baremetalbrbm_brbm1_2.xml
-       rm -f baremetalbrbm_brbm1_3.xml
-       rm -f baremetalbrbm_brbm1_4.xml
+       rm -f baremetalbrbm_brbm1_brbm2_brbm3_0.xml
+       rm -f baremetalbrbm_brbm1_brbm2_brbm3_1.xml
+       rm -f baremetalbrbm_brbm1_brbm2_brbm3_2.xml
+       rm -f baremetalbrbm_brbm1_brbm2_brbm3_3.xml
+       rm -f baremetalbrbm_brbm1_brbm2_brbm3_4.xml
        rm -f instack.xml
 
 .PHONY: iso
index e52e3e5..39ab9b0 100755 (executable)
@@ -7,6 +7,7 @@ rdo_images_uri=https://ci.centos.org/artifacts/rdo/images/liberty/delorean/stabl
 vm_index=4
 RDO_RELEASE=liberty
 SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null)
+OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_network"
 
 # check for dependancy packages
 for i in rpm-build createrepo libguestfs-tools python-docutils bsdtar; do
@@ -88,7 +89,7 @@ sudo ../ci/clean.sh
 # and rebuild the bare undercloud VMs
 ssh -T ${SSH_OPTIONS[@]} stack@localhost <<EOI
 set -e
-NODE_COUNT=5 NODE_CPU=2 NODE_MEM=8192 TESTENV_ARGS="--baremetal-bridge-names 'brbm brbm1'" instack-virt-setup
+NODE_COUNT=5 NODE_CPU=2 NODE_MEM=8192 TESTENV_ARGS="--baremetal-bridge-names 'brbm brbm1 brbm2 brbm3'" instack-virt-setup
 EOI
 
 # let dhcp happen so we can get the ip
@@ -163,24 +164,28 @@ fi
 
 echo $'\nGenerating libvirt configuration'
 for i in \$(seq 0 $vm_index); do
-  virsh dumpxml baremetalbrbm_brbm1_\$i | awk '/model type='\''virtio'\''/{c++;if(c==2){sub("model type='\''virtio'\''","model type='\''rtl8139'\''");c=0}}1' > baremetalbrbm_brbm1_\$i.xml
+  virsh dumpxml baremetalbrbm_brbm1_brbm2_brbm3_\$i | awk '/model type='\''virtio'\''/{c++;if(c==2){sub("model type='\''virtio'\''","model type='\''rtl8139'\''");c=0}}1' > baremetalbrbm_brbm1_brbm2_brbm3_\$i.xml
 done
 
 virsh dumpxml instack > instack.xml
 virsh net-dumpxml brbm > brbm-net.xml
 virsh net-dumpxml brbm1 > brbm1-net.xml
+virsh net-dumpxml brbm2> brbm2-net.xml
+virsh net-dumpxml brbm3 > brbm3-net.xml
 virsh pool-dumpxml default > default-pool.xml
 EOI
 
 # copy off the instack artifacts
 echo "Copying instack files to build directory"
 for i in $(seq 0 $vm_index); do
-  scp ${SSH_OPTIONS[@]} stack@localhost:baremetalbrbm_brbm1_${i}.xml .
+  scp ${SSH_OPTIONS[@]} stack@localhost:baremetalbrbm_brbm1_brbm2_brbm3_${i}.xml .
 done
 
 scp ${SSH_OPTIONS[@]} stack@localhost:instack.xml .
 scp ${SSH_OPTIONS[@]} stack@localhost:brbm-net.xml .
 scp ${SSH_OPTIONS[@]} stack@localhost:brbm1-net.xml .
+scp ${SSH_OPTIONS[@]} stack@localhost:brbm2-net.xml .
+scp ${SSH_OPTIONS[@]} stack@localhost:brbm3-net.xml .
 scp ${SSH_OPTIONS[@]} stack@localhost:default-pool.xml .
 
 # pull down the the built images
@@ -254,22 +259,6 @@ LIBGUESTFS_BACKEND=direct virt-customize --upload ../opendaylight-puppet-neutron
 ## END WORK AROUND
 popd
 
-# resize instack machine
-echo "Checking if instack needs to be resized..."
-instack_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a stack/instack.qcow2 |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
-if [ "$instack_size" -lt 30 ]; then
-  qemu-img create -f qcow2 -o preallocation=metadata newinstack.qcow2 30G
-  LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 stack/instack.qcow2 newinstack.qcow2;
-  LIBGUESTFS_BACKEND=direct virt-customize -a newinstack.qcow2 --run-command 'xfs_growfs -d /dev/sda1 || true'
-  LIBGUESTFS_BACKEND=direct virt-sparsify newinstack.qcow2 stack/instack.qcow2
-  new_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a stack/instack.qcow2 |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
-  if [ "$new_size" -lt 30 ]; then
-    echo "Error resizing instack machine, disk size is ${new_size}"
-    exit 1
-  else
-    echo "instack successfully resized"
-  fi
-fi
 # move and Sanitize private keys from instack.json file
 mv stack/instackenv.json instackenv-virt.json
 sed -i '/pm_password/c\      "pm_password": "INSERT_STACK_USER_PRIV_KEY",' instackenv-virt.json
@@ -281,8 +270,8 @@ set -e
 virsh destroy instack 2> /dev/null || echo -n ''
 virsh undefine instack --remove-all-storage 2> /dev/null || echo -n ''
 for i in \$(seq 0 $vm_index); do
-  virsh destroy baremetalbrbm_brbm1_\$i 2> /dev/null || echo -n ''
-  virsh undefine baremetalbrbm_brbm1_\$i --remove-all-storage 2> /dev/null || echo -n ''
+  virsh destroy baremetalbrbm_brbm1_brbm2_brbm3_\$i 2> /dev/null || echo -n ''
+  virsh undefine baremetalbrbm_brbm1_brbm2_brbm3_\$i --remove-all-storage 2> /dev/null || echo -n ''
 done
 EOI
 
index a3d5602..a42a54b 100644 (file)
@@ -37,3 +37,34 @@ parameter_defaults:
   ExternalInterfaceDefaultRoute: 192.168.37.1
   EC2MetadataIp: 192.0.2.1
   DnsServers: ["8.8.8.8","8.8.4.4"]
+
+#  ServiceNetMap:
+#    NeutronTenantNetwork: tenant
+#    CeilometerApiNetwork: internal_api
+#    MongoDbNetwork: internal_api
+#    CinderApiNetwork: internal_api
+#    CinderIscsiNetwork: storage
+#    GlanceApiNetwork: storage
+#    GlanceRegistryNetwork: internal_api
+#    KeystoneAdminApiNetwork: internal_api
+#    KeystonePublicApiNetwork: internal_api
+#    NeutronApiNetwork: internal_api
+#    HeatApiNetwork: internal_api
+#    NovaApiNetwork: internal_api
+#    NovaMetadataNetwork: internal_api
+#    NovaVncProxyNetwork: internal_api
+#    SwiftMgmtNetwork: storage_mgmt
+#    SwiftProxyNetwork: storage
+#    HorizonNetwork: internal_api
+#    MemcachedNetwork: internal_api
+#    RabbitMqNetwork: internal_api
+#    RedisNetwork: internal_api
+#    MysqlNetwork: internal_api
+#    CephClusterNetwork: storage_mgmt
+#    CephPublicNetwork: storage
+#    # Define which network will be used for hostname resolution
+#    ControllerHostnameResolveNetwork: internal_api
+#    ComputeHostnameResolveNetwork: internal_api
+#    BlockStorageHostnameResolveNetwork: internal_api
+#    ObjectStorageHostnameResolveNetwork: internal_api
+#    CephStorageHostnameResolveNetwork: storage
index 1b421ac..336b34a 100644 (file)
@@ -106,7 +106,7 @@ resources:
               members:
                 -
                   type: interface
-                  name: nic2
+                  name: nic3
                   # force the MAC address of the bridge to this interface
                   primary: true
 
index 9b6a6f8..cb9f770 100644 (file)
@@ -1,5 +1,5 @@
 Name:          opnfv-apex
-Version:       2.6
+Version:       2.7
 Release:       %{release}
 Summary:       Scripts and Disk images for deployment
 
@@ -33,12 +33,16 @@ install ci/clean.sh %{buildroot}%{_bindir}/opnfv-clean
 
 mkdir -p %{buildroot}%{_var}/opt/opnfv/stack/
 mkdir -p %{buildroot}%{_var}/opt/opnfv/nics/
+mkdir -p %{buildroot}%{_var}/opt/opnfv/lib/
 
+install lib/common-functions.sh %{buildroot}%{_var}/opt/opnfv/lib/
 install build/instack.qcow2 %{buildroot}%{_var}/opt/opnfv/stack/
 install build/instack.xml %{buildroot}%{_var}/opt/opnfv/
-install build/baremetalbrbm_brbm1_*.xml %{buildroot}%{_var}/opt/opnfv/
+install build/baremetalbrbm_brbm1_brbm2_brbm3_*.xml %{buildroot}%{_var}/opt/opnfv/
 install build/brbm-net.xml %{buildroot}%{_var}/opt/opnfv/
 install build/brbm1-net.xml %{buildroot}%{_var}/opt/opnfv/
+install build/brbm2-net.xml %{buildroot}%{_var}/opt/opnfv/
+install build/brbm3-net.xml %{buildroot}%{_var}/opt/opnfv/
 install build/default-pool.xml %{buildroot}%{_var}/opt/opnfv/
 install build/network-environment.yaml %{buildroot}%{_var}/opt/opnfv/
 install build/nics/controller.yaml %{buildroot}%{_var}/opt/opnfv/nics/
@@ -55,15 +59,19 @@ install docs/installation-instructions.html %{buildroot}%{_docdir}/opnfv/
 install docs/release-notes/index.rst %{buildroot}%{_docdir}/opnfv/release-notes.rst
 install docs/release-notes.html %{buildroot}%{_docdir}/opnfv/
 install config/deploy/deploy_settings.yaml %{buildroot}%{_docdir}/opnfv/deploy_settings.yaml.example
+install config/deploy/network/network_settings.yaml %{buildroot}%{_docdir}/opnfv/network_settings.yaml.example
 
 %files
 %{_bindir}/opnfv-deploy
 %{_bindir}/opnfv-clean
+%{_var}/opt/opnfv/lib/common-functions.sh
 %{_var}/opt/opnfv/stack/instack.qcow2
 %{_var}/opt/opnfv/instack.xml
-%{_var}/opt/opnfv/baremetalbrbm_brbm1_*.xml
+%{_var}/opt/opnfv/baremetalbrbm_brbm1_brbm2_brbm3_*.xml
 %{_var}/opt/opnfv/brbm-net.xml
 %{_var}/opt/opnfv/brbm1-net.xml
+%{_var}/opt/opnfv/brbm2-net.xml
+%{_var}/opt/opnfv/brbm3-net.xml
 %{_var}/opt/opnfv/default-pool.xml
 %{_var}/opt/opnfv/network-environment.yaml
 %{_var}/opt/opnfv/nics/controller.yaml
@@ -77,9 +85,11 @@ install config/deploy/deploy_settings.yaml %{buildroot}%{_docdir}/opnfv/deploy_s
 %doc %{_docdir}/opnfv/release-notes.rst
 %doc %{_docdir}/opnfv/release-notes.html
 %doc %{_docdir}/opnfv/deploy_settings.yaml.example
-
+%doc %{_docdir}/opnfv/network_settings.yaml.example
 
 %changelog
+* Tue Dec 20 2015 Tim Rozet <trozet@redhat.com> - 2.7-1
+- Modifies networks to include OPNFV private/storage networks
 * Tue Dec 15 2015 Dan Radez <dradez@redhat.com> - 2.6-1
 - Added deploy settings for flat network config
 - cleaned up files that don't need to be in the rpm
index 2b48880..bc193a3 100755 (executable)
@@ -4,7 +4,7 @@
 #author: Dan Radez (dradez@redhat.com)
 #
 vm_index=4
-
+ovs_bridges="brbm brbm1 brbm2 brbm3"
 # Clean off instack VM
 virsh destroy instack 2> /dev/null || echo -n ''
 virsh undefine instack --remove-all-storage 2> /dev/null || echo -n ''
@@ -13,20 +13,18 @@ rm -f /var/lib/libvirt/images/instack.qcow2 2> /dev/null
 
 # Clean off baremetal VMs in case they exist
 for i in $(seq 0 $vm_index); do
-  virsh destroy baremetalbrbm_brbm1_$i 2> /dev/null || echo -n ''
-  virsh undefine baremetalbrbm_brbm1_$i --remove-all-storage 2> /dev/null || echo -n ''
-  virsh vol-delete baremetalbrbm_brbm1_${i}.qcow2 --pool default 2> /dev/null
-  rm -f /var/lib/libvirt/images/baremetalbrbm_brbm1_${i}.qcow2 2> /dev/null
+  virsh destroy baremetalbrbm_brbm1_brbm2_brbm3_$i 2> /dev/null || echo -n ''
+  virsh undefine baremetalbrbm_brbm1_brbm2_brbm3_$i --remove-all-storage 2> /dev/null || echo -n ''
+  virsh vol-delete baremetalbrbm_brbm1_brbm2_brbm3_${i}.qcow2 --pool default 2> /dev/null
+  rm -f /var/lib/libvirt/images/baremetalbrbm_brbm1_brbm2_brbm3_${i}.qcow2 2> /dev/null
 done
 
-# Clean off brbm bridges
-virsh net-destroy brbm 2> /dev/null
-virsh net-undefine brbm 2> /dev/null
-vs-vsctl del-br brbm 2> /dev/null
-
-virsh net-destroy brbm1 2> /dev/null
-virsh net-undefine brbm1 2> /dev/null
-vs-vsctl del-br brbm1 2> /dev/null
+# Clean off created bridges
+for bridge in ${ovs_bridges}; do
+  virsh net-destroy ${bridge} 2> /dev/null
+  virsh net-undefine ${bridge} 2> /dev/null
+  ovs-vsctl del-br ${bridge} 2> /dev/null
+done
 
 # clean pub keys from root's auth keys
 sed -i '/stack@instack.localdomain/d' /root/.ssh/authorized_keys
index 8b5fa53..118c763 100755 (executable)
@@ -30,13 +30,22 @@ net_isolation_enabled="TRUE"
 declare -i CNT
 declare UNDERCLOUD
 declare -A deploy_options_array
+declare -A NET_MAP
 
 SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
 DEPLOY_OPTIONS=""
 RESOURCES=/var/opt/opnfv/stack
 CONFIG=/var/opt/opnfv
 INSTACKENV=$CONFIG/instackenv.json
-NETENV=$CONFIG/network-environment.yaml
+OPNFV_NETWORK_TYPES="admin_network private_network public_network storage_network"
+# Netmap used to map networks to OVS bridge names
+NET_MAP['admin_network']="brbm"
+NET_MAP['private_network']="brbm1"
+NET_MAP['public_network']="brbm2"
+NET_MAP['storage_network']="brbm3"
+
+##LIBRARIES
+source $CONFIG/lib/common-functions.sh
 
 ##FUNCTIONS
 ##translates yaml into variables
@@ -90,6 +99,111 @@ parse_setting_value() {
   local mystr=$1
   echo $(echo $mystr | grep -Eo "\=.*$" | tr -d '=')
 }
+##parses network settings yaml into globals
+parse_network_settings() {
+  local required_network_settings="cidr"
+  local common_optional_network_settings="usable_ip_range"
+  local admin_network_optional_settings="provisioner_ip dhcp_range introspection_range"
+  local public_network_optional_settings="floating_ip_range gateway provisioner_ip"
+  local nic_value cidr
+
+  eval $(parse_yaml ${NETSETS})
+  for network in ${OPNFV_NETWORK_TYPES}; do
+    if [[ $(eval echo \${${network}_enabled}) == 'true' ]]; then
+      enabled_network_list+="${network} "
+    elif [ "${network}" == 'admin_network' ]; then
+      echo -e "${red}ERROR: You must enable admin_network and configure it explicitly or use auto-detection${reset}"
+      exit 1
+    elif [ "${network}" == 'public_network' ]; then
+      echo -e "${red}ERROR: You must enable public_network and configure it explicitly or use auto-detection${reset}"
+      exit 1
+    else
+      echo -e "${blue}INFO: Network: ${network} is disabled, will collapse into admin_network"
+    fi
+  done
+
+  # check for enabled network values
+  for enabled_network in ${enabled_network_list}; do
+    # detect required settings first to continue
+    echo -e "${blue}INFO: Detecting Required settings for: ${enabled_network}${reset}"
+    for setting in ${required_network_settings}; do
+      eval "setting_value=\${${enabled_network}_${setting}}"
+      if [ -z "${setting_value}" ]; then
+        # if setting is missing we try to autodetect
+        eval "nic_value=\${${enabled_network}_bridged_interface}"
+        if [ -n "$nic_value" ]; then
+          setting_value=$(eval find_${setting} ${nic_value})
+          if [ -n "$setting_value" ]; then
+            eval "${enabled_network}_${setting}=${setting_value}"
+            echo -e "${blue}INFO: Auto-detection: ${enabled_network}_${setting}: ${setting_value}${reset}"
+          else
+            echo -e "${red}ERROR: Auto-detection failed: ${setting} not found using interface: ${nic_value}${reset}"
+            exit 1
+          fi
+        else
+          echo -e "${red}ERROR: Required setting: ${setting} not found, and bridge interface not provided\
+for Auto-detection${reset}"
+          exit 1
+        fi
+      else
+        echo -e "${blue}INFO: ${enabled_network}_${setting}: ${setting_value}${reset}"
+      fi
+    done
+    echo -e "${blue}INFO: Detecting Common settings for: ${enabled_network}${reset}"
+    # detect optional common settings
+    # these settings can be auto-generated if missing
+    for setting in ${common_optional_network_settings}; do
+      eval "setting_value=\${${enabled_network}_${setting}}"
+      if [ -z "${setting_value}" ]; then
+        setting_value=$(eval find_${setting} ${nic_value})
+        if [ -n "$setting_value" ]; then
+          eval "${enabled_network}_${setting}=${setting_value}"
+          echo -e "${blue}INFO: Auto-detection: ${enabled_network}_${setting}: ${setting_value}${reset}"
+        else
+          # if Auto-detection fails we can auto-generate with CIDR
+          eval "cidr=\${${enabled_network}_cidr}"
+          setting_value=$(eval generate_${setting} ${cidr})
+          if [ -n "$setting_value" ]; then
+            eval "${enabled_network}_${setting}=${setting_value}"
+            echo -e "${blue}INFO: Auto-generated: ${enabled_network}_${setting}: ${setting_value}${reset}"
+          else
+            echo -e "${red}ERROR: Auto-generation failed: ${setting} not found${reset}"
+            exit 1
+          fi
+        fi
+      else
+        echo -e "${blue}INFO: ${enabled_network}_${setting}: ${setting_value}${reset}"
+      fi
+    done
+    echo -e "${blue}INFO: Detecting Network Specific settings for: ${enabled_network}${reset}"
+    # detect network specific settings
+    if [ -n $(eval echo \${${network}_optional_settings}) ]; then
+      eval "network_specific_settings=\${${enabled_network}_optional_settings}"
+      for setting in ${network_specific_settings}; do
+        eval "setting_value=\${${enabled_network}_${setting}}"
+        if [ -z "${setting_value}" ]; then
+          setting_value=$(eval find_${setting} ${nic_value})
+          if [ -n "$setting_value" ]; then
+            eval "${enabled_network}_${setting}=${setting_value}"
+            echo -e "${blue}INFO: Auto-detection: ${enabled_network}_${setting}: ${setting_value}${reset}"
+          else
+            eval "cidr=\${${enabled_network}_cidr}"
+            setting_value=$(eval generate_${setting} ${cidr})
+            if [ -n "$setting_value" ]; then
+              eval "${enabled_network}_${setting}=${setting_value}"
+              echo -e "${blue}INFO: Auto-generated: ${enabled_network}_${setting}: ${setting_value}${reset}"
+            else
+              echo -e "${red}ERROR: Auto-generation failed: ${setting} not found${reset}"
+              exit 1
+            fi
+          fi
+        else
+          echo -e "${blue}INFO: ${enabled_network}_${setting}: ${setting_value}${reset}"
+        fi
+      done
+    fi
+  done
+}
 ##parses deploy settings yaml into globals and options array
 ##params: none
 ##usage:  parse_deploy_settings
@@ -239,14 +353,50 @@ function configure_deps {
     sudo sh -c "echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.conf"
   fi
 
-  # ensure brbm networks are configured
+  # ensure no dhcp server is running on jumphost
+  if ! sudo systemctl status dhcpd | grep dead; then
+    echo "${red}WARN: DHCP Server detected on jumphost, disabling...${reset}"
+    sudo systemctl stop dhcpd
+    sudo systemctl disable dhcpd
+  fi
+
+  # ensure networks are configured
   systemctl start openvswitch
-  ovs-vsctl list-br | grep brbm > /dev/null || ovs-vsctl add-br brbm
-  virsh net-list --all | grep brbm > /dev/null || virsh net-create $CONFIG/brbm-net.xml
-  virsh net-list | grep -E "brbm\s+active" > /dev/null || virsh net-start brbm
-  ovs-vsctl list-br | grep brbm1 > /dev/null || ovs-vsctl add-br brbm1
-  virsh net-list --all | grep brbm1 > /dev/null || virsh net-create $CONFIG/brbm1-net.xml
-  virsh net-list | grep -E "brbm1\s+active" > /dev/null || virsh net-start brbm1
+
+  # If flat we only use admin network
+  if [[ "$net_isolation_enabled" == "FALSE" ]]; then
+    virsh_enabled_networks="admin_network"
+  # For baremetal we only need to create/attach instack to admin and public
+  elif [ "$virtual" == "FALSE" ]; then
+    virsh_enabled_networks="admin_network public_network"
+  else
+    virsh_enabled_neworks=$enabled_network_list
+  fi
+
+  for network in ${OPNFV_NETWORK_TYPES}; do
+    ovs-vsctl list-br | grep ${NET_MAP[$network]} > /dev/null || ovs-vsctl add-br ${NET_MAP[$network]}
+    virsh net-list --all | grep ${NET_MAP[$network]} > /dev/null || virsh net-create $CONFIG/${NET_MAP[$network]}-net.xml
+    virsh net-list | grep -E "${NET_MAP[$network]}\s+active" > /dev/null || virsh net-start ${NET_MAP[$network]}
+  done
+
+  echo -e "${blue}INFO: Bridges set: ${reset}"
+  ovs-vsctl list-br
+  echo -e "${blue}INFO: virsh networks set: ${reset}"
+  virsh net-list
+
+  if [[ -z "$virtual" || "$virtual" == "FALSE" ]]; then
+    # bridge interfaces to correct OVS instances for baremetal deployment
+    for network in ${enabled_network_list}; do
+      this_interface=$(eval echo \${${network}_bridged_interface})
+      # check if this a bridged interface for this network
+      if [[ -n "$this_interface" || "$this_interface" != "none" ]]; then
+        ovs-vsctl list-ports ${NET_MAP[$network]} | grep ${this_interface} || ovs-vsctl add-port ${NET_MAP[$network]} ${this_interface}
+      else
+        echo "${red}ERROR: Unable to determine interface to bridge to for enabled network: ${network}${reset}"
+        exit 1
+      fi
+    done
+  fi
 
   # ensure storage pool exists and is started
   virsh pool-list --all | grep default > /dev/null || virsh pool-create $CONFIG/default-pool.xml
@@ -306,7 +456,26 @@ function setup_instack_vm {
       #error: internal error: received hangup / error event on socket
       #error: Reconnected to the hypervisor
 
-      cp -f $RESOURCES/instack.qcow2 /var/lib/libvirt/images/instack.qcow2
+      instack_dst=/var/lib/libvirt/images/instack.qcow2
+      cp -f $RESOURCES/instack.qcow2 $instack_dst
+
+      # resize instack machine
+      echo "Checking if instack needs to be resized..."
+      instack_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $instack_dst |grep device | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
+      if [ "$instack_size" -lt 30 ]; then
+        qemu-img resize /var/lib/libvirt/images/instack.qcow2 +25G
+       LIBGUESTFS_BACKEND=direct virt-resize --expand /dev/sda1 $RESOURCES/instack.qcow2 $instack_dst
+       LIBGUESTFS_BACKEND=direct virt-customize -a $instack_dst --run-command 'xfs_growfs -d /dev/sda1 || true'
+        new_size=$(LIBGUESTFS_BACKEND=direct virt-filesystems --long -h --all -a $instack_dst |grep filesystem | grep -Eo "[0-9\.]+G" | sed -n 's/\([0-9][0-9]*\).*/\1/p')
+        if [ "$new_size" -lt 30 ]; then
+          echo "Error resizing instack machine, disk size is ${new_size}"
+          exit 1
+        else
+          echo "instack successfully resized"
+        fi
+      else
+        echo "skipped instack resize, upstream is large enough"
+      fi
 
   else
       echo "Found Instack VM, using existing VM"
@@ -374,11 +543,12 @@ function setup_instack_vm {
   # extra space to overwrite the previous connectivity output
   echo -e "${blue}\r                                                                 ${reset}"
 
-  #add the instack brbm1 interface
-  virsh attach-interface --domain instack --type network --source brbm1 --model rtl8139 --config --live
-  sleep 1
-  ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "if ! ip a s eth2 | grep 192.168.37.1 > /dev/null; then ip a a 192.168.37.1/24 dev eth2; ip link set up dev eth2; fi"
-
+  #add the instack public interface if net isolation is enabled (more than just admin network)
+  if [[ "$net_isolation_enabled" == "TRUE" ]]; then
+    virsh attach-interface --domain instack --type network --source ${NET_MAP['public_network']} --model rtl8139 --config --live
+    sleep 1
+    ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "if ! ip a s eth2 | grep ${public_network_provisioner_ip} > /dev/null; then ip a a ${public_network_provisioner_ip}/${public_network_cidr##*/} dev eth2; ip link set up dev eth2; fi"
+  fi
   # ssh key fix for stack user
   ssh -T ${SSH_OPTIONS[@]} "root@$UNDERCLOUD" "restorecon -r /home/stack"
 }
@@ -387,19 +557,34 @@ function setup_instack_vm {
 ##params: none
 function setup_virtual_baremetal {
   for i in $(seq 0 $vm_index); do
-    if ! virsh list --all | grep baremetalbrbm_brbm1_${i} > /dev/null; then
-      if [ ! -e $CONFIG/baremetalbrbm_brbm1_${i}.xml ]; then
-        define_virtual_node baremetalbrbm_brbm1_${i}
+    if ! virsh list --all | grep baremetalbrbm_brbm1_brbm2_brbm3_${i} > /dev/null; then
+      if [ ! -e $CONFIG/baremetalbrbm_brbm1_brbm2_brbm3_${i}.xml ]; then
+        define_virtual_node baremetalbrbm_brbm1_brbm2_brbm3_${i}
       fi
-      virsh define $CONFIG/baremetalbrbm_brbm1_${i}.xml
+      # Fix for ramdisk using wrong pxeboot interface
+      # TODO: revisit this and see if there's a more proper fix
+      sed -i "/^\s*<source network='brbm2'\/>/{
+        N
+        s/^\(.*\)virtio\(.*\)$/\1rtl8139\2/
+        }" $CONFIG/baremetalbrbm_brbm1_brbm2_brbm3_${i}.xml
+      virsh define $CONFIG/baremetalbrbm_brbm1_brbm2_brbm3_${i}.xml
     else
       echo "Found Baremetal ${i} VM, using existing VM"
     fi
-    virsh vol-list default | grep baremetalbrbm_brbm1_${i} 2>&1> /dev/null || virsh vol-create-as default baremetalbrbm_brbm1_${i}.qcow2 40G --format qcow2
+    virsh vol-list default | grep baremetalbrbm_brbm1_brbm2_brbm3_${i} 2>&1> /dev/null || virsh vol-create-as default baremetalbrbm_brbm1_brbm2_brbm3_${i}.qcow2 40G --format qcow2
   done
 
 }
 
+##Set network-environment settings
+##params: network-environment file to edit
+function configure_network_environment {
+  sed -i '/ControlPlaneSubnetCidr/c\\  ControlPlaneSubnetCidr: "'${admin_network_cidr##*/}'"' $1
+  sed -i '/ControlPlaneDefaultRoute/c\\  ControlPlaneDefaultRoute: '${admin_network_provisioner_ip}'' $1
+  sed -i '/ExternalNetCidr/c\\  ExternalNetCidr: '${public_network_cidr}'' $1
+  sed -i "/ExternalAllocationPools/c\\  ExternalAllocationPools: [{'start': '${public_network_usable_ip_range%%,*}', 'end': '${public_network_usable_ip_range##*,}'}]" $1
+  sed -i '/ExternalInterfaceDefaultRoute/c\\  ExternalInterfaceDefaultRoute: '${public_network_gateway}'' $1
+}
 ##Copy over the glance images and instack json file
 ##params: none
 function configure_undercloud {
@@ -407,7 +592,12 @@ function configure_undercloud {
   echo
   echo "Copying configuration file and disk images to instack"
   scp ${SSH_OPTIONS[@]} $RESOURCES/overcloud-full.qcow2 "stack@$UNDERCLOUD":
-  scp ${SSH_OPTIONS[@]} $NETENV "stack@$UNDERCLOUD":
+  if [[ "$net_isolation_enabled" == "TRUE" ]]; then
+    configure_network_environment $CONFIG/network-environment.yaml
+    echo -e "${blue}Network Environment set for Deployment: ${reset}"
+    cat $CONFIG/network-environment.yaml
+    scp ${SSH_OPTIONS[@]} $CONFIG/network-environment.yaml "stack@$UNDERCLOUD":
+  fi
   scp ${SSH_OPTIONS[@]} -r $CONFIG/nics/ "stack@$UNDERCLOUD":
 
   # ensure stack user on instack machine has an ssh key
@@ -427,7 +617,7 @@ data = json.load(open('$CONFIG/instackenv-virt.json'))
 print data['nodes'][$i]['mac'][0]"
 
         old_mac=$(python -c "$pyscript")
-        new_mac=$(virsh dumpxml baremetalbrbm_brbm1_$i | grep "mac address" | cut -d = -f2 | grep -Eo "[0-9a-f:]+")
+        new_mac=$(virsh dumpxml baremetalbrbm_brbm1_brbm2_brbm3_$i | grep "mac address" | cut -d = -f2 | grep -Eo "[0-9a-f:]+")
         # this doesn't work with multiple vnics on the vms
         #if [ "$old_mac" != "$new_mac" ]; then
         #  echo "${blue}Modifying MAC for node from $old_mac to ${new_mac}${reset}"
@@ -437,7 +627,6 @@ print data['nodes'][$i]['mac'][0]"
 
       DEPLOY_OPTIONS+=" --libvirt-type qemu"
       INSTACKENV=$CONFIG/instackenv-virt.json
-      NETENV=$CONFIG/network-environment.yaml
 
       # upload instackenv file to Instack for virtual deployment
       scp ${SSH_OPTIONS[@]} $INSTACKENV "stack@$UNDERCLOUD":instackenv.json
@@ -465,7 +654,7 @@ EOI
   echo "Running undercloud configuration."
   echo "Logging undercloud configuration to instack:/home/stack/apex-undercloud-install.log"
   ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" << EOI
-if [ -n "$DEPLOY_SETTINGS_FILE" ]; then
+if [[ "$net_isolation_enabled" == "TRUE" ]]; then
   sed -i 's/#local_ip/local_ip/' undercloud.conf
   sed -i 's/#network_gateway/network_gateway/' undercloud.conf
   sed -i 's/#network_cidr/network_cidr/' undercloud.conf
@@ -474,25 +663,28 @@ if [ -n "$DEPLOY_SETTINGS_FILE" ]; then
   sed -i 's/#inspection_iprange/inspection_iprange/' undercloud.conf
   sed -i 's/#undercloud_debug/undercloud_debug/' undercloud.conf
 
-  openstack-config --set undercloud.conf DEFAULT local_ip ${deploy_options_array['instack_ip']}/${deploy_options_array['provisioning_cidr']##*/}
-  openstack-config --set undercloud.conf DEFAULT network_gateway ${deploy_options_array['provisioning_gateway']}
-  openstack-config --set undercloud.conf DEFAULT network_cidr ${deploy_options_array['provisioning_cidr']}
-  openstack-config --set undercloud.conf DEFAULT dhcp_start ${deploy_options_array['provisioning_dhcp_start']}
-  openstack-config --set undercloud.conf DEFAULT dhcp_end ${deploy_options_array['provisioning_dhcp_end']}
-  openstack-config --set undercloud.conf DEFAULT inspection_iprange ${deploy_options_array['provisioning_inspection_iprange']}
+  openstack-config --set undercloud.conf DEFAULT local_ip ${admin_network_provisioner_ip}/${admin_network_cidr##*/}
+  openstack-config --set undercloud.conf DEFAULT network_gateway ${admin_network_provisioner_ip}
+  openstack-config --set undercloud.conf DEFAULT network_cidr ${admin_network_cidr}
+  openstack-config --set undercloud.conf DEFAULT dhcp_start ${admin_network_dhcp_range%%,*}
+  openstack-config --set undercloud.conf DEFAULT dhcp_end ${admin_network_dhcp_range##*,}
+  openstack-config --set undercloud.conf DEFAULT inspection_iprange ${admin_network_introspection_range}
   openstack-config --set undercloud.conf DEFAULT undercloud_debug false
-
-  if [ -n "$net_isolation_enabled" ]; then
-    sed -i '/ControlPlaneSubnetCidr/c\\  ControlPlaneSubnetCidr: "${deploy_options_array['provisioning_cidr']##*/}"' network-environment.yaml
-    sed -i '/ControlPlaneDefaultRoute/c\\  ControlPlaneDefaultRoute: ${deploy_options_array['provisioning_gateway']}' network-environment.yaml
-    sed -i '/ExternalNetCidr/c\\  ExternalNetCidr: ${deploy_options_array['ext_net_cidr']}' network-environment.yaml
-    sed -i '/ExternalAllocationPools/c\\  ExternalAllocationPools: [{'start': '${deploy_options_array['ext_allocation_pool_start']}', 'end': '${deploy_options_array['ext_allocation_pool_end']}'}]' network-environment.yaml
-    sed -i '/ExternalInterfaceDefaultRoute/c\\  ExternalInterfaceDefaultRoute: ${deploy_options_array['ext_gateway']}' network-environment.yaml
-  fi
 fi
 
 openstack undercloud install &> apex-undercloud-install.log
+sleep 30
+sudo systemctl restart openstack-glance-api
+sudo systemctl restart openstack-nova-conductor
+sudo systemctl restart openstack-nova-compute
 EOI
+# WORKAROUND: must restart the above services to fix sync problem with nova compute manager
+# TODO: revisit and file a bug if necessary. This should eventually be removed
+# as well as glance api problem
+echo -e "${blue}INFO: Sleeping 15 seconds while services come back from restart${reset}"
+sleep 15
+#TODO Fill in the rest of the network-environment values for other networks
+
 }
 
 ##preping it for deployment and launch the deploy
@@ -518,11 +710,11 @@ function undercloud_prep_overcloud_deploy {
   fi
 
   if [[ "$net_isolation_enabled" == "TRUE" ]]; then
-     DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml"
+     #DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml"
      DEPLOY_OPTIONS+=" -e network-environment.yaml"
   fi
 
-  if [[ "$ha_enabled" == "TRUE" ]] || [[ $net_isolation_enabled == "TRUE" ]]; then
+  if [[ "$ha_enabled" == "TRUE" ]] || [[ "$net_isolation_enabled" == "TRUE" ]]; then
      DEPLOY_OPTIONS+=" --ntp-server $ntp_server"
   fi
 
@@ -567,7 +759,7 @@ display_usage() {
   echo -e "   -c|--config : Directory to configuration files. Optional.  Defaults to /var/opt/opnfv/ \n"
   echo -e "   -d|--deploy-settings : Full path to deploy settings yaml file. Optional.  Defaults to null \n"
   echo -e "   -i|--inventory : Full path to inventory yaml file. Required only for baremetal \n"
-  echo -e "   -n|--netenv : Full path to network environment file. Optional. Defaults to \$CONFIG/network-environment.yaml \n"
+  echo -e "   -n|--net-settings : Full path to network settings file. Optional. \n"
   echo -e "   -p|--ping-site : site to use to verify IP connectivity. Optional. Defaults to 8.8.8.8 \n"
   echo -e "   -r|--resources : Directory to deployment resources. Optional.  Defaults to /var/opt/opnfv/stack \n"
   echo -e "   -v|--virtual : Virtualize overcloud nodes instead of using baremetal. \n"
@@ -604,8 +796,9 @@ parse_cmdline() {
                 INVENTORY_FILE=$2
                 shift 2
             ;;
-        -n|--netenv)
-                NETENV=$2
+        -n|--net-settings)
+                NETSETS=$2
+                echo "Network Settings Configuration file: $2"
                 shift 2
             ;;
         -p|--ping-site)
@@ -640,11 +833,10 @@ parse_cmdline() {
     esac
   done
 
-  if [[ ! -z "$NETENV" && "$net_isolation_enabled" == "FALSE" ]]; then
-    echo -e "${red}INFO: Single flat network requested. Ignoring any netenv settings!${reset}"
-  elif [[ ! -z "$NETENV" && ! -z "$DEPLOY_SETTINGS_FILE" ]]; then
-    echo -e "${red}WARN: deploy_settings and netenv specified.  Ignoring netenv settings! deploy_settings will contain \
-netenv${reset}"
+  if [[ ! -z "$NETSETS" && "$net_isolation_enabled" == "FALSE" ]]; then
+    echo -e "${red}INFO: Single flat network requested. Ignoring any network settings!${reset}"
+  elif [[ -z "$NETSETS" && "$net_isolation_enabled" == "TRUE" ]]; then
+    echo -e "${red}ERROR: You must provide a network_settings file with -n or use --flat to force a single flat network{reset}"
   fi
 
   if [[ -n "$virtual" && -n "$INVENTORY_FILE" ]]; then
@@ -657,8 +849,8 @@ netenv${reset}"
     exit 1
   fi
 
-  if [[ ! -z "$NETENV" && ! -f "$NETENV" ]]; then
-    echo -e "${red}ERROR: ${NETENV} does not exist! Exiting...${reset}"
+  if [[ ! -z "$NETSETS" && ! -f "$NETSETS" ]]; then
+    echo -e "${red}ERROR: ${NETSETS} does not exist! Exiting...${reset}"
     exit 1
   fi
 
@@ -677,8 +869,13 @@ netenv${reset}"
 
 main() {
   parse_cmdline "$@"
+  if [[ "$net_isolation_enabled" == "TRUE" ]]; then
+    echo -e "${blue}INFO: Parsing network settings file...${reset}"
+    parse_network_settings
+  fi
   if ! configure_deps; then
-    echo "Dependency Validation Failed, Exiting."
+    echo -e "${red}Dependency Validation Failed, Exiting.${reset}"
+    exit 1
   fi
   if [ -n "$DEPLOY_SETTINGS_FILE" ]; then
     parse_deploy_settings
index 52d3913..15157f1 100644 (file)
@@ -1,28 +1,7 @@
 global_params:
   ha_enabled: true
-  network_isolation_settings: network/network-environment-example.yaml
 
 deploy_options:
-  # instack_ip
-  # IP address given to instack on the provisioning network
-  instack_ip: 192.0.2.1
-  # CIDR of provisioning network
-  provisioning_cidr: 192.0.2.0/24
-  # gateway IP of provisioning network
-  provisioning_gateway: 192.0.2.1
-  # IP pool start used for provisioning overcloud nodes
-  provisioning_dhcp_start: 192.0.2.5
-  # IP pool end used for inspecting overcloud nodes
-  provisioning_dhcp_end: 192.0.2.24
-  # IP pool used for inspecting overcloud nodes on the provisioning network
-  provisioning_inspection_iprange: 192.0.2.100,192.0.2.124
   sdn_controller: opendaylight
   tacker: false
   congress: false
-  # CIDR used to for the external network
-  ext_net_cidr: 192.168.37.0/24
-  # Allocation pools for floating ip addresses on the ext net
-  ext_allocation_pool_start: 192.168.37.50
-  ext_allocation_pool_end: 192.168.37.99
-  # Default Gateway for External Network
-  ext_gateway: 192.168.37.1
diff --git a/config/deploy/network/network-environment-example.yaml b/config/deploy/network/network-environment-example.yaml
deleted file mode 100644 (file)
index f6c101f..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-resource_registry:
-  OS::TripleO::BlockStorage::Net::SoftwareConfig: /home/stack/nic-configs/cinder-storage.yaml
-  OS::TripleO::Compute::Net::SoftwareConfig: /home/stack/nic-configs/compute.yaml
-  OS::TripleO::Controller::Net::SoftwareConfig: /home/stack/nic-configs/controller.yaml
-  OS::TripleO::ObjectStorage::Net::SoftwareConfig: /home/stack/nic-configs/swift-storage.yaml
-  OS::TripleO::CephStorage::Net::SoftwareConfig: /home/stack/nic-configs/ceph-storage.yaml
-
-parameter_defaults:
-  # Customize all these values to match the local environment
-  InternalApiNetCidr: 172.17.0.0/24
-  StorageNetCidr: 172.18.0.0/24
-  StorageMgmtNetCidr: 172.19.0.0/24
-  TenantNetCidr: 172.16.0.0/24
-  ExternalNetCidr: 10.1.2.0/24
-  # CIDR subnet mask length for provisioning network
-  ControlPlaneSubnetCidr: 24
-  InternalApiAllocationPools: [{'start': '172.17.0.10', 'end': '172.17.0.200'}]
-  StorageAllocationPools: [{'start': '172.18.0.10', 'end': '172.18.0.200'}]
-  StorageMgmtAllocationPools: [{'start': '172.19.0.10', 'end': '172.19.0.200'}]
-  TenantAllocationPools: [{'start': '172.16.0.10', 'end': '172.16.0.200'}]
-  # Use an External allocation pool which will leave room for floating IPs
-  ExternalAllocationPools: [{'start': '10.1.2.10', 'end': '10.1.2.50'}]
-  # Set to the router gateway on the external network
-  ExternalInterfaceDefaultRoute: 10.1.2.1
-  # Gateway router for the provisioning network (or Undercloud IP)
-  ControlPlaneDefaultRoute: 192.0.2.254
-  # Generally the IP of the Undercloud
-  EC2MetadataIp: 192.0.2.1
-  # Define the DNS servers (maximum 2) for the overcloud nodes
-  DnsServers: ["8.8.8.8","8.8.4.4"]
-  InternalApiNetworkVlanID: 201
-  StorageNetworkVlanID: 202
-  StorageMgmtNetworkVlanID: 203
-  TenantNetworkVlanID: 204
-  ExternalNetworkVlanID: 100
-  # May set to br-ex if using floating IPs only on native VLAN on bridge br-ex
-  NeutronExternalNetworkBridge: "''"
-  # Customize bonding options if required (ignored if bonds are not used)
-  BondInterfaceOvsOptions:
-      "bond_mode=balance-tcp lacp=active other-config:lacp-fallback-ab=true"
-
-  ServiceNetMap:
-    NeutronTenantNetwork: tenant
-    CeilometerApiNetwork: internal_api
-    MongoDbNetwork: internal_api
-    CinderApiNetwork: internal_api
-    CinderIscsiNetwork: storage
-    GlanceApiNetwork: storage
-    GlanceRegistryNetwork: internal_api
-    KeystoneAdminApiNetwork: internal_api
-    KeystonePublicApiNetwork: internal_api
-    NeutronApiNetwork: internal_api
-    HeatApiNetwork: internal_api
-    NovaApiNetwork: internal_api
-    NovaMetadataNetwork: internal_api
-    NovaVncProxyNetwork: internal_api
-    SwiftMgmtNetwork: storage_mgmt
-    SwiftProxyNetwork: storage
-    HorizonNetwork: internal_api
-    MemcachedNetwork: internal_api
-    RabbitMqNetwork: internal_api
-    RedisNetwork: internal_api
-    MysqlNetwork: internal_api
-    CephClusterNetwork: storage_mgmt
-    CephPublicNetwork: storage
-    # Define which network will be used for hostname resolution
-    ControllerHostnameResolveNetwork: internal_api
-    ComputeHostnameResolveNetwork: internal_api
-    BlockStorageHostnameResolveNetwork: internal_api
-    ObjectStorageHostnameResolveNetwork: internal_api
-    CephStorageHostnameResolveNetwork: storage
diff --git a/config/deploy/network/network_settings.yaml b/config/deploy/network/network_settings.yaml
new file mode 100644 (file)
index 0000000..2560cc5
--- /dev/null
@@ -0,0 +1,53 @@
+# Defines Network Environment for a Baremetal Deployment
+# Any values missing will be auto-detected on the jumphost
+admin_network:
+  enabled: true
+  network_type: bridged
+  bridged_interface: ''
+  bond_interfaces: ''
+  vlan: native
+  usable_ip_range: 192.0.2.11,192.0.2.99
+  gateway: 192.0.2.1
+  provisioner_ip: 192.0.2.1
+  cidr: 192.0.2.0/24
+  dhcp_range: 192.0.2.2,192.0.2.10
+  introspection_range: 192.0.2.100,192.0.2.120
+private_network:
+  enabled: false
+public_network:
+  enabled: true
+  network_type: ''
+  bridged_interface: ''
+  cidr: 192.168.37.0/24
+  gateway: 192.168.37.1
+  floating_ip_range: 192.168.37.200,192.168.37.220
+  usable_ip_range: 192.168.37.10,192.168.37.199
+  provisioner_ip: 192.168.37.1
+storage_network:
+  enabled: false
+
+#admin_network:  #Required network, other networks can collapse into this network if not enabled
+#  enabled: true
+#  network_type: bridged                             #Indicates if this network will be bridged to an interface, or to a bond
+#  bridged_interface: ''                             #Interface to bridge to for installer VM
+#  bond_interfaces: ''                               #Interfaces to create bond with for installer VM
+#  vlan: native                                      #VLAN tag to use, native means none
+#  usable_ip_range: 192.0.2.11,192.0.2.99            #Usable ip range, if empty entire range is usable, ex. 192.168.1.10,192.168.1.20
+#  gateway: 192.0.2.1                                #Gateway (only needed when public_network is disabled), if empty it is auto-detected
+#  provisioner_ip: 192.0.2.1                         #installer VM IP, if empty it is the next available IP in the admin subnet
+#  cidr: 192.0.2.0/24                                #subnet in CIDR format 192.168.1.0/24, if empty it will be auto-detected
+#  dhcp_range: 192.0.2.2,192.0.2.10                  #dhcp range for the admin network, if empty it will be automatically provisioned
+#  introspection_range: 192.0.2.100,192.0.2.120      #Range used for introspection phase (examining nodes)
+#private_network:                                    #Network for internal API traffic for O/S services and internal tenant traffic
+#  enabled: false                                    #If disabled, internal api traffic will collapse to admin_network
+#public_network:                                     #Network for external API traffic and external tenant traffic
+#  enabled: true                                     #If disabled, public_network traffic will collapse to admin network
+#  network_type: ''
+#  bridged_interface: ''
+#  cidr: 192.168.37.0/24
+#  gateway: 192.168.37.1
+#  floating_ip_range: 192.168.37.200,192.168.37.220  #Range to allocate to floating IPs for the public network with Neutron
+#  usable_ip_range: 192.168.37.10,192.168.37.199     #Usable IP range on the public network, usually this is a shared subnet
+#  provisioner_ip: 192.168.37.1
+#storage_network:                                    #Network for Ceph storage traffic
+#  enabled: false                                    #If disabled, storage_network traffic will collapse to admin network
index 4545839..6058209 100644 (file)
@@ -5,7 +5,7 @@ nodes:
     ipmi_user: root
     ipmi_pass: root
     cpus: 2
-    memory: 2048
+    memory: 8192
     disk: 40
     arch: "x86_64"
     capabilities: "profile:control"
@@ -15,27 +15,27 @@ nodes:
     ipmi_user: root
     ipmi_pass: root
     cpus: 2
-    memory: 2048
+    memory: 8192
     disk: 40
     arch: "x86_64"
     capabilities: "profile:control"
   node3:
-    mac_address: "00:1e:67:4f:cc:f1"
+    mac_address: "00:1e:67:4f:cc:0b"
     ipmi_ip: 10.4.7.4
     ipmi_user: root
     ipmi_pass: root
     cpus: 2
-    memory: 2048
+    memory: 8192
     disk: 40
     arch: "x86_64"
     capabilities: "profile:control"
   node4:
-    mac_address: "00:1e:67:4f:cc:0b"
+    mac_address: "00:1e:67:4f:cc:f1"
     ipmi_ip: 10.4.7.5
     ipmi_user: root
     ipmi_pass: root
     cpus: 2
-    memory: 2048
+    memory: 8192
     disk: 40
     arch: "x86_64"
     capabilities: "profile:compute"
@@ -45,7 +45,7 @@ nodes:
     ipmi_user: root
     ipmi_pass: root
     cpus: 2
-    memory: 2048
+    memory: 8192
     disk: 40
     arch: "x86_64"
     capabilities: "profile:compute"
diff --git a/lib/common-functions.sh b/lib/common-functions.sh
new file mode 100644 (file)
index 0000000..9aa97e8
--- /dev/null
@@ -0,0 +1,314 @@
+#!/usr/bin/env bash
+# Common Functions used by  OPNFV Apex
+# author: Tim Rozet (trozet@redhat.com)
+
+##find ip of interface
+##params: interface name
+function find_ip {
+  ip addr show $1 | grep -Eo '^\s+inet\s+[\.0-9]+' | awk '{print $2}'
+}
+
+##finds subnet of ip and netmask
+##params: ip, netmask
+function find_subnet {
+  IFS=. read -r i1 i2 i3 i4 <<< "$1"
+  IFS=. read -r m1 m2 m3 m4 <<< "$2"
+  printf "%d.%d.%d.%d\n" "$((i1 & m1))" "$((i2 & m2))" "$((i3 & m3))" "$((i4 & m4))"
+}
+
+##verify subnet has at least n IPs
+##params: subnet mask, n IPs
+function verify_subnet_size {
+  IFS=. read -r i1 i2 i3 i4 <<< "$1"
+  num_ips_required=$2
+
+  ##this function assumes you would never need more than 254
+  ##we check here to make sure
+  if [ "$num_ips_required" -ge 254 ]; then
+    echo -e "\n\n${red}ERROR: allocating more than 254 IPs is unsupported...Exiting${reset}\n\n"
+    return 1
+  fi
+
+  ##we just return if 3rd octet is not 255
+  ##because we know the subnet is big enough
+  if [ "$i3" -ne 255 ]; then
+    return 0
+  elif [ $((254-$i4)) -ge "$num_ips_required" ]; then
+    return 0
+  else
+    echo -e "\n\n${red}ERROR: Subnet is too small${reset}\n\n"
+    return 1
+  fi
+}
+
+##finds last usable ip (broadcast minus 1) of a subnet from an IP and netmask
+## Warning: This function only works for IPv4 at the moment.
+##params: ip, netmask
+function find_last_ip_subnet {
+  IFS=. read -r i1 i2 i3 i4 <<< "$1"
+  IFS=. read -r m1 m2 m3 m4 <<< "$2"
+  IFS=. read -r s1 s2 s3 s4 <<< "$((i1 & m1)).$((i2 & m2)).$((i3 & m3)).$((i4 & m4))"
+  printf "%d.%d.%d.%d\n" "$((255 - $m1 + $s1))" "$((255 - $m2 + $s2))" "$((255 - $m3 + $s3))" "$((255 - $m4 + $s4 - 1))"
+}
+
+##increments subnet by a value
+##params: ip, value
+##assumes low value
+function increment_subnet {
+  IFS=. read -r i1 i2 i3 i4 <<< "$1"
+  printf "%d.%d.%d.%d\n" "$i1" "$i2" "$i3" "$((i4 | $2))"
+}
+
+##finds netmask of interface
+##params: interface
+##returns long format 255.255.x.x
+function find_netmask {
+  ifconfig $1 | grep -Eo 'netmask\s+[\.0-9]+' | awk '{print $2}'
+}
+
+##finds short netmask of interface
+##params: interface
+##returns short format, ex: /21
+function find_short_netmask {
+  echo "/$(ip addr show $1 | grep -Eo '^\s+inet\s+[\/\.0-9]+' | awk '{print $2}' | cut -d / -f2)"
+}
+
+##increments next IP
+##params: ip
+##assumes a /24 subnet
+function next_ip {
+  baseaddr="$(echo $1 | cut -d. -f1-3)"
+  lsv="$(echo $1 | cut -d. -f4)"
+  if [ "$lsv" -ge 254 ]; then
+    return 1
+  fi
+  ((lsv++))
+  echo $baseaddr.$lsv
+}
+
+##subtracts a value from an IP address
+##params: last ip, ip_count
+##assumes ip_count is less than the last octect of the address
+subtract_ip() {
+  IFS=. read -r i1 i2 i3 i4 <<< "$1"
+  ip_count=$2
+  if [ $i4 -lt $ip_count ]; then
+    echo -e "\n\n${red}ERROR: Can't subtract $ip_count from IP address $1  Exiting${reset}\n\n"
+    exit 1
+  fi
+  printf "%d.%d.%d.%d\n" "$i1" "$i2" "$i3" "$((i4 - $ip_count ))"
+}
+
+##check if IP is in use
+##params: ip
+##ping ip to get arp entry, then check arp
+function is_ip_used {
+  ping -c 5 $1 > /dev/null 2>&1
+  arp -n | grep "$1 " | grep -iv incomplete > /dev/null 2>&1
+}
+
+##find next usable IP
+##params: ip
+function next_usable_ip {
+  new_ip=$(next_ip $1)
+  while [ "$new_ip" ]; do
+    if ! is_ip_used $new_ip; then
+      echo $new_ip
+      return 0
+    fi
+    new_ip=$(next_ip $new_ip)
+  done
+  return 1
+}
+
+##increment ip by value
+##params: ip, amount to increment by
+##increment_ip $next_private_ip 10
+function increment_ip {
+  baseaddr="$(echo $1 | cut -d. -f1-3)"
+  lsv="$(echo $1 | cut -d. -f4)"
+  incrval=$2
+  lsv=$((lsv+incrval))
+  if [ "$lsv" -ge 254 ]; then
+    return 1
+  fi
+  echo $baseaddr.$lsv
+}
+
+##finds gateway on system
+##params: interface to validate gateway on (optional)
+##find_gateway em1
+function find_gateway {
+  local gw gw_interface
+  gw=$(ip route | grep default | awk '{print $3}')
+  gw_interface=$(ip route get $gw | awk '{print $3}')
+  if [ -n "$1" ]; then
+    if [ "$gw_interface" == "$1" ]; then
+      echo ${gw}
+    fi
+  fi
+}
+
+##finds subnet in CIDR notation for interface
+##params: interface to find CIDR
+function find_cidr {
+  local cidr network ip netmask short_mask
+  ip=$(find_ip $1)
+  netmask=$(find_netmask $1)
+  if [[ -z "$ip" || -z "$netmask" ]]; then
+    return 1
+  fi
+  network=$(find_subnet ${ip} ${netamsk})
+  short_mask=$(find_short_netmask $1)
+  if [[ -z "$network" || -z "$short_mask" ]]; then
+    return 1
+  fi
+  cidr="${subnet}'\'${short_mask}"
+  echo ${cidr}
+}
+
+##finds block of usable IP addresses for an interface
+##simply returns at the moment the correct format
+##after first 20 IPs, and leave 20 IPs at end of subnet (for floating ips, etc)
+##params: interface to find IP
+function find_usable_ip_range {
+  local interface_ip subnet_mask first_block_ip last_block_ip
+  interface_ip=$(find_ip $1)
+  subnet_mask=$(find_netmask $1)
+  if [[ -z "$interface_ip" || -z "$subnet_mask" ]]; then
+    return 1
+  fi
+  interface_ip=$(increment_ip ${interface_ip} 20)
+  first_block_ip=$(next_usable_ip ${interface_ip})
+  if [ -z "$first_block_ip" ]; then
+    return 1
+  fi
+  last_block_ip=$(find_last_ip_subnet ${interface_ip} ${subnet_mask})
+  if [ -z "$last_block_ip" ]; then
+    return 1
+  else
+    last_block_ip=$(subtract_ip ${last_block_ip} 20)
+    echo "${first_block_ip},${last_block_ip}"
+  fi
+
+}
+
+##generates usable IP range in correct format based on CIDR
+##assumes the first 20 IPs are used (by instack or otherwise)
+##params: cidr
+function generate_usable_ip_range {
+  local first_ip first_block_ip last_block_ip
+  first_ip=$(ipcalc  -nb $1 | grep HostMin: | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
+  first_block_ip=$(increment_ip ${first_ip} 20)
+  last_block_ip=$(ipcalc  -nb $1 | grep HostMax: | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
+  if [[ -z "$first_block_ip" || -z "$last_block_ip" ]]; then
+    return 1
+  else
+    last_block_ip=$(subtract_ip ${last_block_ip} 20)
+    echo "${first_block_ip},${last_block_ip}"
+  fi
+}
+
+##find the instack IP address
+##finds first usable IP on subnet
+##params: interface
+function find_provisioner_ip {
+  local interface_ip
+  interface_ip=$(find_ip $1)
+  if [ -z "$interface_ip" ]; then
+    return 1
+  fi
+  echo $(increment_ip ${interface_ip} 1)
+}
+
+##generates instack IP address based on CIDR
+##params: cidr
+function generate_provisioner_ip {
+  local provisioner_ip
+  provisioner_ip=$(ipcalc  -nb $1 | grep HostMin: | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
+}
+
+##finds the dhcp range available via interface
+##uses first 8 IPs, after 2nd IP
+##params: interface
+function find_dhcp_range {
+  local dhcp_range_start dhcp_range_end interface_ip
+  interface_ip=$(find_ip $1)
+  if [ -z "$interface_ip" ]; then
+    return 1
+  fi
+  dhcp_range_start=$(increment_ip ${interface_ip} 2)
+  dhcp_range_end=$(increment_ip ${dhcp_range_start} 8)
+  echo "${dhcp_range_start},${dhcp_range_end}"
+}
+
+##generates the dhcp range available via CIDR
+##uses first 8 IPs, after 1st IP
+##params: cidr
+function generate_dhcp_range {
+  local dhcp_range_start dhcp_range_end first_ip
+  first_ip=$(ipcalc  -nb $1 | grep HostMin: | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
+  if [ -z "$first_ip" ]; then
+    return 1
+  fi
+  dhcp_range_start=$(increment_ip ${first_ip} 1)
+  dhcp_range_end=$(increment_ip ${dhcp_range_start} 8)
+  echo "${dhcp_range_start},${dhcp_range_end}"
+}
+
+##finds the introspection range available via interface
+##uses 8 IPs, after the first 10 IPs
+##params: interface
+function find_introspection_range {
+  local inspect_range_start inspect_range_end interface_ip
+  interface_ip=$(find_ip $1)
+  if [ -z "$interface_ip" ]; then
+    return 1
+  fi
+  inspect_range_start=$(increment_ip ${interface_ip} 10)
+  inspect_range_end=$(increment_ip ${inspect_range_start} 8)
+  echo "${inspect_range_start},${inspect_range_end}"
+}
+
+##generate the introspection range available via CIDR
+##uses 8 IPs, after the first 10 IPs
+##params: cidr
+function generate_introspection_range {
+  local inspect_range_start inspect_range_end first_ip
+  first_ip=$(ipcalc  -nb $1 | grep HostMin: | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
+  if [ -z "$first_ip" ]; then
+    return 1
+  fi
+  inspect_range_start=$(increment_ip ${first_ip} 10)
+  inspect_range_end=$(increment_ip ${inspect_range_start} 8)
+  echo "${inspect_range_start},${inspect_range_end}"
+}
+
+##finds the floating ip range available via interface
+##uses last 20 IPs of a subnet
+##params: interface
+function find_floating_ip_range {
+  local float_range_start float_range_end interface_ip subnet_mask
+  interface_ip=$(find_ip $1)
+  subnet_mask=$(find_netmask $1)
+  if [[ -z "$interface_ip" || -z "$subnet_mask" ]]; then
+    return 1
+  fi
+  float_range_end=$(find_last_ip_subnet ${interface_ip} ${subnet_mask})
+  float_range_start=$(subtract_ip ${float_range_end} 19)
+  echo "${float_range_start},${float_range_end}"
+}
+
+##generate the floating range available via CIDR
+##uses last 20 IPs of subnet
+##params: cidr
+function generate_floating_ip_range {
+  local float_range_start float_range_end last_ip
+  last_ip=$(ipcalc  -nb $1 | grep HostMax: | grep -Eo "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+")
+  if [ -z "$last_ip" ]; then
+    return 1
+  fi
+  float_range_start=$(subtract_ip ${last_ip} 19)
+  float_range_end=${last_ip}
+  echo "${float_range_start},${float_range_end}"
+}