Merge "Adds installation instructions for Foreman/QuickStack"
authorDaniel Smith <daniel.smith@ericsson.com>
Mon, 11 May 2015 14:54:50 +0000 (14:54 +0000)
committerGerrit Code Review <gerrit@172.30.200.206>
Mon, 11 May 2015 14:54:50 +0000 (14:54 +0000)
foreman/ci/clean.sh [new file with mode: 0755]
foreman/ci/deploy.sh
foreman/ci/inventory/lf_pod2_ksgen_settings.yml [new file with mode: 0644]
fuel/ci/build.sh

diff --git a/foreman/ci/clean.sh b/foreman/ci/clean.sh
new file mode 100755 (executable)
index 0000000..25352a8
--- /dev/null
@@ -0,0 +1,151 @@
+#!/usr/bin/env bash
+
+#Clean script to uninstall provisioning server for Foreman/QuickStack
+#author: Tim Rozet (trozet@redhat.com)
+#
+#Uses Vagrant and VirtualBox
+#
+#Destroys Vagrant VM running in /tmp/bgs_vagrant
+#Shuts down all nodes found in Khaleesi settings
+#Removes hypervisor kernel modules (VirtualBox)
+
+##VARS
+reset=`tput sgr0`
+blue=`tput setaf 4`
+red=`tput setaf 1`
+green=`tput setaf 2`
+##END VARS
+
+##FUNCTIONS
+display_usage() {
+  echo -e "\n\n${blue}This script is used to uninstall Foreman/QuickStack Installer and Clean OPNFV Target System${reset}\n\n"
+  echo -e "\nUsage:\n$0 [arguments] \n"
+  echo -e "\n   -no_parse : No variable parsing into config. Flag. \n"
+  echo -e "\n   -base_config : Full path of ksgen settings file to parse. Required.  Will provide BMC info to shutdown hosts.  Example:  -base_config /opt/myinventory.yml \n"
+}
+
+##END FUNCTIONS
+
+if [[ ( $1 == "--help") ||  $1 == "-h" ]]; then
+    display_usage
+    exit 0
+fi
+
+echo -e "\n\n${blue}This script is used to uninstall Foreman/QuickStack Installer and Clean OPNFV Target System${reset}\n\n"
+echo "Use -h to display help"
+sleep 2
+
+while [ "`echo $1 | cut -c1`" = "-" ]
+do
+    echo $1
+    case "$1" in
+        -base_config)
+                base_config=$2
+                shift 2
+            ;;
+        *)
+                display_usage
+                exit 1
+            ;;
+esac
+done
+
+
+##install ipmitool
+if ! yum list installed | grep -i ipmitool; then
+  if ! yum -y install ipmitool; then
+    echo "${red}Unable to install ipmitool!${reset}"
+    exit 1
+  fi
+else
+  echo "${blue}Skipping ipmitool as it is already installed!${reset}"
+fi
+
+###find all the bmc IPs and number of nodes
+node_counter=0
+output=`grep bmc_ip $base_config | grep -Eo '[0-9]+.[0-9]+.[0-9]+.[0-9]+'`
+for line in ${output} ; do
+  bmc_ip[$node_counter]=$line
+  ((node_counter++))
+done
+
+max_nodes=$((node_counter-1))
+
+###find bmc_users per node
+node_counter=0
+output=`grep bmc_user $base_config | sed 's/\s*bmc_user:\s*//'`
+for line in ${output} ; do
+  bmc_user[$node_counter]=$line
+  ((node_counter++))
+done
+
+###find bmc_pass per node
+node_counter=0
+output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'`
+for line in ${output} ; do
+  bmc_pass[$node_counter]=$line
+  ((node_counter++)) 
+done
+
+for mynode in `seq 0 $max_nodes`; do
+  echo "${blue}Node: ${bmc_ip[$mynode]} ${bmc_user[$mynode]} ${bmc_pass[$mynode]} ${reset}"
+  if ipmitool -I lanplus -P ${bmc_pass[$mynode]} -U ${bmc_user[$mynode]} -H ${bmc_ip[$mynode]} chassis power off; then
+    echo "${blue}Node: $mynode, ${bmc_ip[$mynode]} powered off!${reset}"
+  else
+    echo "${red}Error: Unable to power off $mynode, ${bmc_ip[$mynode]} ${reset}"
+    exit 1
+  fi
+done
+
+###check to see if vbox is installed
+vboxpkg=`rpm -qa | grep VirtualBox`
+if [ $? -eq 0 ]; then
+  skip_vagrant=0
+else
+  skip_vagrant=1
+fi
+
+###destroy vagrant
+if [ $skip_vagrant -eq 0 ]; then
+  cd /tmp/bgs_vagrant
+  if vagrant destroy -f; then
+    echo "${blue}Successfully destroyed Foreman VM ${reset}"
+  else
+    echo "${red}Unable to destroy Foreman VM ${reset}"
+    echo "${blue}Checking if vagrant was already destroyed and no process is active...${reset}"
+    if ps axf | grep vagrant; then
+      echo "${red}Vagrant VM still exists...exiting ${reset}"
+      exit 1
+    else
+      echo "${blue}Vagrant process doesn't exist.  Moving on... ${reset}"
+    fi
+  fi
+
+  ###kill virtualbox
+  echo "${blue}Killing VirtualBox ${reset}"
+  killall virtualbox
+  killall VboxHeadless
+
+  ###remove virtualbox
+  echo "${blue}Removing VirtualBox ${reset}"
+  yum -y remove $vboxpkg
+
+else
+  echo "${blue}Skipping Vagrant destroy + Vbox Removal as VirtualBox package is already removed ${reset}"
+fi
+
+
+###remove kernel modules
+echo "${blue}Removing kernel modules ${reset}"
+for kernel_mod in vboxnetadp vboxnetflt vboxpci vboxdrv; do
+  if ! rmmod $kernel_mod; then
+    if rmmod $kernel_mod 2>&1 | grep -i 'not currently loaded'; then
+      echo "${blue} $kernel_mod is not currently loaded! ${reset}"
+    else
+      echo "${red}Error trying to remove Kernel Module: $kernel_mod ${reset}"
+      exit 1
+    fi
+  else
+    echo "${blue}Removed Kernel Module: $kernel_mod ${reset}"
+  fi
+done
index 49e1590..ae585b0 100755 (executable)
@@ -24,6 +24,7 @@ blue=`tput setaf 4`
 red=`tput setaf 1`
 green=`tput setaf 2`
 
+declare -A interface_arr
 ##END VARS
 
 ##FUNCTIONS
@@ -206,6 +207,14 @@ else
   printf '%s\n' 'deploy.sh: Skipping kernel module for virtualbox.  Already Installed'
 fi
 
+##install Ansible
+if ! yum list installed | grep -i ansible; then
+  if ! yum -y install ansible; then
+    printf '%s\n' 'deploy.sh: Unable to install Ansible package' >&2
+    exit 1
+  fi
+fi
+
 ##install Vagrant
 if ! rpm -qa | grep vagrant; then
   if ! rpm -Uvh https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.rpm; then
@@ -252,7 +261,7 @@ cd bgs_vagrant
 echo "${blue}Detecting network configuration...${reset}"
 ##detect host 1 or 3 interface configuration
 #output=`ip link show | grep -E "^[0-9]" | grep -Ev ": lo|tun|virbr|vboxnet" | awk '{print $2}' | sed 's/://'`
-output=`ifconfig | grep -E "^[a-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet:" | awk '{print $1}' | sed 's/://'`
+output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'`
 
 if [ ! "$output" ]; then
   printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2
@@ -274,6 +283,7 @@ for interface in ${output}; do
   if [ ! "$new_ip" ]; then
     continue
   fi
+  interface_arr[$interface]=$if_counter
   interface_ip_arr[$if_counter]=$new_ip
   subnet_mask=$(find_netmask $interface)
   if [ "$if_counter" -eq 1 ]; then
@@ -310,15 +320,47 @@ fi
 echo "${blue}Network detected: ${deployment_type}! ${reset}"
 
 if route | grep default; then
-  defaultgw=$(route | grep default | awk '{print $2}')
-  echo "${blue}Default gateway detected: $defaultgw ${reset}"
-  sed -i 's/^.*default_gw =.*$/  default_gw = '\""$defaultgw"\"'/' Vagrantfile
+  echo "${blue}Default Gateway Detected ${reset}"
+  host_default_gw=$(ip route | grep default | awk '{print $3}')
+  echo "${blue}Default Gateway: $host_default_gw ${reset}"
+  default_gw_interface=$(ip route get $host_default_gw | awk '{print $3}')
+  case "${interface_arr[$default_gw_interface]}" in
+           0)
+             echo "${blue}Default Gateway Detected on Admin Interface!${reset}"
+             sed -i 's/^.*default_gw =.*$/  default_gw = '\""$host_default_gw"\"'/' Vagrantfile
+             node_default_gw=$host_default_gw
+             ;;
+           1)
+             echo "${red}Default Gateway Detected on Private Interface!${reset}"
+             echo "${red}Private subnet should be private and not have Internet access!${reset}"
+             exit 1
+             ;;
+           2)
+             echo "${blue}Default Gateway Detected on Public Interface!${reset}"
+             sed -i 's/^.*default_gw =.*$/  default_gw = '\""$host_default_gw"\"'/' Vagrantfile
+             echo "${blue}Will setup NAT from Admin -> Public Network on VM!${reset}"
+             sed -i 's/^.*nat_flag =.*$/  nat_flag = true/' Vagrantfile
+             echo "${blue}Setting node gateway to be VM Admin IP${reset}"
+             node_default_gw=${interface_ip_arr[0]}
+             ;;
+           3)
+             echo "${red}Default Gateway Detected on Storage Interface!${reset}"
+             echo "${red}Storage subnet should be private and not have Internet access!${reset}"
+             exit 1
+             ;;
+           *)
+             echo "${red}Unable to determine which interface default gateway is on..Exiting!${reset}"
+             exit 1
+             ;;
+  esac
 else
-  defaultgw=`echo ${interface_arr_ip[0]} | cut -d. -f1-3`
+  #assumes 24 bit mask
+  defaultgw=`echo ${interface_ip_arr[0]} | cut -d. -f1-3`
   firstip=.1
   defaultgw=$defaultgw$firstip
   echo "${blue}Unable to find default gateway.  Assuming it is $defaultgw ${reset}"
   sed -i 's/^.*default_gw =.*$/  default_gw = '\""$defaultgw"\"'/' Vagrantfile
+  node_default_gw=$defaultgw
 fi
 
 if [ $base_config ]; then
@@ -339,7 +381,7 @@ echo "${blue}Gathering network parameters for Target System...this may take a fe
 ##if single node deployment all the variables will have the same ip
 ##interface names will be enp0s3, enp0s8, enp0s9 in chef/centos7
 
-sed -i 's/^.*default_gw:.*$/default_gw:'" $defaultgw"'/' opnfv_ksgen_settings.yml
+sed -i 's/^.*default_gw:.*$/default_gw:'" $node_default_gw"'/' opnfv_ksgen_settings.yml
 
 ##replace private interface parameter
 ##private interface will be of hosts, so we need to know the provisioned host interface name
diff --git a/foreman/ci/inventory/lf_pod2_ksgen_settings.yml b/foreman/ci/inventory/lf_pod2_ksgen_settings.yml
new file mode 100644 (file)
index 0000000..ff6e3e0
--- /dev/null
@@ -0,0 +1,349 @@
+global_params:
+  admin_email: opnfv@opnfv.com
+  ha_flag: "true"
+  odl_flag: "true"
+  private_network:
+  storage_network:
+  controllers_hostnames_array: oscontroller1,oscontroller2,oscontroller3
+  controllers_ip_array:
+  amqp_vip:
+  private_subnet:
+  cinder_admin_vip:
+  cinder_private_vip:
+  cinder_public_vip:
+  db_vip:
+  glance_admin_vip:
+  glance_private_vip:
+  glance_public_vip:
+  heat_admin_vip:
+  heat_private_vip:
+  heat_public_vip:
+  heat_cfn_admin_vip:
+  heat_cfn_private_vip:
+  heat_cfn_public_vip:
+  horizon_admin_vip:
+  horizon_private_vip:
+  horizon_public_vip:
+  keystone_admin_vip:
+  keystone_private_vip:
+  keystone_public_vip:
+  loadbalancer_vip:
+  neutron_admin_vip:
+  neutron_private_vip:
+  neutron_public_vip:
+  nova_admin_vip:
+  nova_private_vip:
+  nova_public_vip:
+network_type: multi_network
+default_gw:
+foreman:
+  seed_values:
+    - { name: heat_cfn, oldvalue: true, newvalue: false }
+workaround_puppet_version_lock: false
+opm_branch: master
+installer:
+  name: puppet
+  short_name: pupt
+  network:
+    auto_assign_floating_ip: false
+    variant:
+      short_name: m2vx
+    plugin:
+      name: neutron
+workaround_openstack_packstack_rpm: false
+tempest:
+  repo:
+    Fedora:
+      '19': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-19/
+      '20': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-20/
+    RedHat:
+       '7.0': https://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7/
+  use_virtual_env: false
+  public_allocation_end: 10.2.84.71
+  skip:
+    files: null
+    tests: null
+  public_allocation_start: 10.2.84.51
+  physnet: physnet1
+  use_custom_repo: false
+  public_subnet_cidr: 10.2.84.0/24
+  public_subnet_gateway: 10.2.84.1
+  additional_default_settings:
+  - section: compute
+    option: flavor_ref
+    value: 1
+  cirros_image_file: cirros-0.3.1-x86_64-disk.img
+  setup_method: tempest/rpm
+  test_name: all
+  rdo:
+     version: juno
+     rpm: http://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+  rpm:
+    version: 20141201
+  dir: ~{{ nodes.tempest.remote_user }}/tempest-dir
+tmp:
+  node_prefix: '{{ node.prefix | reject("none") | join("-") }}-'
+  anchors:
+  - https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+  - http://repos.fedorapeople.org/repos/openstack/openstack-juno/
+opm_repo: https://github.com/redhat-openstack/openstack-puppet-modules.git
+workaround_vif_plugging: false
+openstack_packstack_rpm: http://REPLACE_ME/brewroot/packages/openstack-puppet-modules/2013.2/9.el6ost/noarch/openstack-puppet-modules-2013.2-9.el6ost.noarch.rpm
+nodes:
+  compute1:
+    name: oscompute11.opnfv.com
+    hostname: oscompute11.opnfv.com
+    short_name: oscompute11
+    type: compute
+    host_type: baremetal
+    hostgroup: Compute
+    mac_address: "00:25:b5:a0:00:5e"
+    bmc_ip: 172.30.8.74
+    bmc_mac: "74:a2:e6:a4:14:9c"
+    bmc_user: admin
+    bmc_pass: octopus
+    ansible_ssh_pass: "Op3nStack"
+    admin_password: ""
+    groups:
+    - compute
+    - foreman_nodes
+    - puppet
+    - rdo
+    - neutron
+  compute2:
+    name: oscompute12.opnfv.com
+    hostname: oscompute12.opnfv.com
+    short_name: oscompute12
+    type: compute
+    host_type: baremetal
+    hostgroup: Compute
+    mac_address: "00:25:b5:a0:00:3e"
+    bmc_ip: 172.30.8.73
+    bmc_mac: "a8:9d:21:a0:15:9c"
+    bmc_user: admin
+    bmc_pass: octopus
+    ansible_ssh_pass: "Op3nStack"
+    admin_password: ""
+    groups:
+    - compute
+    - foreman_nodes
+    - puppet
+    - rdo
+    - neutron
+  controller1:
+    name: oscontroller1.opnfv.com
+    hostname: oscontroller1.opnfv.com
+    short_name: oscontroller1
+    type: controller
+    host_type: baremetal
+    hostgroup: Controller_Network_ODL
+    mac_address: "00:25:b5:a0:00:af"
+    bmc_ip: 172.30.8.66
+    bmc_mac: "a8:9d:21:c9:8b:56"
+    bmc_user: admin
+    bmc_pass: octopus
+    private_ip: controller1_private
+    private_mac: "00:25:b5:b0:00:1f"
+    ansible_ssh_pass: "Op3nStack"
+    admin_password: "octopus"
+    groups:
+    - controller
+    - foreman_nodes
+    - puppet
+    - rdo
+    - neutron
+  controller2:
+    name: oscontroller2.opnfv.com
+    hostname: oscontroller2.opnfv.com
+    short_name: oscontroller2
+    type: controller
+    host_type: baremetal
+    hostgroup: Controller_Network
+    mac_address: "00:25:b5:a0:00:9e"
+    bmc_ip: 172.30.8.75
+    bmc_mac: "a8:9d:21:c9:4d:26"
+    bmc_user: admin
+    bmc_pass: octopus
+    private_ip: controller2_private
+    private_mac: "00:25:b5:b0:00:de"
+    ansible_ssh_pass: "Op3nStack"
+    admin_password: "octopus"
+    groups:
+    - controller
+    - foreman_nodes
+    - puppet
+    - rdo
+    - neutron
+  controller3:
+    name: oscontroller3.opnfv.com
+    hostname: oscontroller3.opnfv.com
+    short_name: oscontroller3
+    type: controller
+    host_type: baremetal
+    hostgroup: Controller_Network
+    mac_address: "00:25:b5:a0:00:7e"
+    bmc_ip: 172.30.8.65
+    bmc_mac: "a8:9d:21:c9:3a:92"
+    bmc_user: admin
+    bmc_pass: octopus
+    private_ip: controller3_private
+    private_mac: "00:25:b5:b0:00:be"
+    ansible_ssh_pass: "Op3nStack"
+    admin_password: "octopus"
+    groups:
+    - controller
+    - foreman_nodes
+    - puppet
+    - rdo
+    - neutron
+workaround_mysql_centos7: true
+distro:
+  name: centos
+  centos:
+    '7.0':
+      repos: []
+  short_name: c
+  short_version: 70
+  version: '7.0'
+  rhel:
+    '7.0':
+      kickstart_url: http://REPLACE_ME/released/RHEL-7/7.0/Server/x86_64/os/
+      repos:
+      - section: rhel7-server-rpms
+        name: Packages for RHEL 7 - $basearch
+        baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0/x86_64/
+        gpgcheck: 0
+      - section: rhel-7-server-update-rpms
+        name: Update Packages for Enterprise Linux 7 - $basearch
+        baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0-z/x86_64/
+        gpgcheck: 0
+      - section: rhel-7-server-optional-rpms
+        name: Optional Packages for Enterprise Linux 7 - $basearch
+        baseurl: http://REPLACE_ME/released/RHEL-7/7.0/Server-optional/x86_64/os/
+        gpgcheck: 0
+      - section: rhel-7-server-extras-rpms
+        name: Optional Packages for Enterprise Linux 7 - $basearch
+        baseurl: http://REPLACE_ME/rel-eng/EXTRAS-7.0-RHEL-7-20140610.0/compose/Server/x86_64/os/
+        gpgcheck: 0
+    '6.5':
+      kickstart_url: http://REPLACE_ME/released/RHEL-6/6.5/Server/x86_64/os/
+      repos:
+      - section: rhel6.5-server-rpms
+        name: Packages for RHEL 6.5 - $basearch
+        baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/$basearch/os/Server
+        gpgcheck: 0
+      - section: rhel-6.5-server-update-rpms
+        name: Update Packages for Enterprise Linux 6.5 - $basearch
+        baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/$basearch/
+        gpgcheck: 0
+      - section: rhel-6.5-server-optional-rpms
+        name: Optional Packages for Enterprise Linux 6.5 - $basearch
+        baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/$basearch/os
+        gpgcheck: 0
+      - section: rhel6.5-server-rpms-32bit
+        name: Packages for RHEL 6.5 - i386
+        baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/i386/os/Server
+        gpgcheck: 0
+        enabled: 1
+      - section: rhel-6.5-server-update-rpms-32bit
+        name: Update Packages for Enterprise Linux 6.5 - i686
+        baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/i686/
+        gpgcheck: 0
+        enabled: 1
+      - section: rhel-6.5-server-optional-rpms-32bit
+        name: Optional Packages for Enterprise Linux 6.5 - i386
+        baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/i386/os
+        gpgcheck: 0
+        enabled: 1
+    subscription:
+      username: REPLACE_ME
+      password: HWj8TE28Qi0eP2c
+      pool: 8a85f9823e3d5e43013e3ddd4e2a0977
+  config:
+    selinux: permissive
+    ntp_server: 0.pool.ntp.org
+    dns_servers:
+    - 10.4.1.1
+    - 10.4.0.2
+    reboot_delay: 1
+    initial_boot_timeout: 180
+node:
+  prefix:
+  - rdo
+  - pupt
+  - ffqiotcxz1
+  - null
+product:
+  repo_type: production
+  name: rdo
+  short_name: rdo
+  rpm:
+    CentOS: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+    Fedora: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+    RedHat: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+  short_version: ju
+  repo:
+    production:
+      CentOS:
+        7.0.1406: http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+        '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+        '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+      Fedora:
+        '20': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-20
+        '21': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-21
+      RedHat:
+        '6.6': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+        '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+        '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+  version: juno
+  config:
+    enable_epel: y
+  short_repo: prod
+tester:
+  name: tempest
+distro_reboot_options: '--no-wall '' Reboot is triggered by Ansible'' '
+job:
+  verbosity: 1
+  archive:
+  - '{{ tempest.dir }}/etc/tempest.conf'
+  - '{{ tempest.dir }}/etc/tempest.conf.sample'
+  - '{{ tempest.dir }}/*.log'
+  - '{{ tempest.dir }}/*.xml'
+  - /root/
+  - /var/log/
+  - /etc/nova
+  - /etc/ceilometer
+  - /etc/cinder
+  - /etc/glance
+  - /etc/keystone
+  - /etc/neutron
+  - /etc/ntp
+  - /etc/puppet
+  - /etc/qpid
+  - /etc/qpidd.conf
+  - /root
+  - /etc/yum.repos.d
+  - /etc/yum.repos.d
+topology:
+  name: multinode
+  short_name: mt
+workaround_neutron_ovs_udev_loop: true
+workaround_glance_table_utf8: false
+verbosity:
+  debug: 0
+  info: 1
+  warning: 2
+  warn: 2
+  errors: 3
+provisioner:
+  username: admin
+  network:
+    type: nova
+    name: external
+  skip: skip_provision
+  foreman_url: https://10.2.84.2/api/v2/
+  password: octopus
+  type: foreman
+workaround_nova_compute_fix: false
+workarounds:
+  enabled: true
index afdcab5..51ccdae 100755 (executable)
@@ -105,7 +105,7 @@ TEST_FAIL=0
 UNIT_TEST=0
 UPDATE_CACHE=0
 POPULATE_CACHE=0
-RECURSIV=0
+RECURSIVE=0
 DETACH=0
 DEBUG=0
 INTEGRATION_TEST=0