xci: Updates to releng/bifrost to make it work on Jenkins 85/31285/3
authorFatih Degirmenci <fatih.degirmenci@ericsson.com>
Tue, 21 Mar 2017 21:16:18 +0000 (22:16 +0100)
committerFatih Degirmenci <fatih.degirmenci@ericsson.com>
Wed, 22 Mar 2017 09:20:02 +0000 (10:20 +0100)
- get rid of BAREMETAL_DATA_FILE and use BIFROST_INVENTORY_SOURCE
so it works both for master and ocata for osa-bifrost.
- set BIFROST_INVENTORY_SOURCE file according to branch bifrost is
executed from for osa-bifrost.
- explicitly set what the SSH public key file is as ansible copies
the wrong public key to nodes if bifrost is executed on Jenkins
using sudo.
- set branches if they are not set so the scripts can be used manually
as well.
- rename jumphost to xcimaster so people do not mix it with actual
jumphost located in Pharos PODs.

Change-Id: Iff7631fa99816ad75316b62c5ac20714f67cd86a
Signed-off-by: Fatih Degirmenci <fatih.degirmenci@ericsson.com>
jjb/xci/bifrost-verify.sh
jjb/xci/xci-provision.sh
prototypes/bifrost/README.md
prototypes/bifrost/playbooks/inventory/group_vars/baremetal [new file with mode: 0644]
prototypes/bifrost/scripts/osa-bifrost-deployment.sh
prototypes/bifrost/scripts/test-bifrost-deployment.sh

index e0c5090..7624668 100755 (executable)
@@ -115,7 +115,7 @@ sudo /bin/cp -rf /opt/releng/prototypes/bifrost/* /opt/bifrost/
 cd /opt/bifrost
 sudo -E ./scripts/destroy-env.sh
 
-# provision 3 VMs; jumphost, controller, and compute
+# provision 3 VMs; xcimaster, controller, and compute
 cd /opt/bifrost
 sudo -E ./scripts/test-bifrost-deployment.sh
 
index 4308c7e..e474093 100755 (executable)
@@ -43,9 +43,9 @@ sudo /bin/rm -rf /opt/bifrost /opt/openstack-ansible /opt/stack /opt/releng /opt
 # Fix up permissions
 fix_ownership
 
-# clone all the repos first and checkout the patch afterwards
-OPENSTACK_BRANCH=${OPENSTACK_BRANCH:-master}
-OPNFV_BRANCH=${OPNFV_BRANCH:-master}
+# ensure the branches to use are set
+export OPENSTACK_BRANCH=${OPENSTACK_BRANCH:-master}
+export OPNFV_BRANCH=${OPNFV_BRANCH:-master}
 sudo git clone -b $OPENSTACK_BRANCH https://git.openstack.org/openstack/bifrost /opt/bifrost
 sudo git clone -b $OPNFV_BRANCH https://gerrit.opnfv.org/gerrit/releng /opt/releng
 
@@ -63,7 +63,7 @@ sudo /bin/cp -rf /opt/releng/prototypes/bifrost/* /opt/bifrost/
 cd /opt/bifrost
 sudo -E ./scripts/destroy-env.sh
 
-# provision 6 VMs; jumphost, controller00, controller01, controller02, compute00, and compute01
+# provision 6 VMs; xcimaster, controller00, controller01, controller02, compute00, and compute01
 cd /opt/bifrost
 sudo -E ./scripts/osa-bifrost-deployment.sh
 
index 0ba49d4..dc1417a 100644 (file)
@@ -31,7 +31,7 @@ Please follow that steps:
     cd /opt/bifrost
     sudo ./scripts/destroy-env.sh
 
-8. Run deployment script to spin up 3 vms with bifrost: jumphost, controller and compute::
+8. Run deployment script to spin up 3 vms with bifrost: xcimaster, controller and compute::
 
     cd /opt/bifrost
     sudo ./scripts/test-bifrost-deployment.sh
diff --git a/prototypes/bifrost/playbooks/inventory/group_vars/baremetal b/prototypes/bifrost/playbooks/inventory/group_vars/baremetal
new file mode 100644 (file)
index 0000000..008b04d
--- /dev/null
@@ -0,0 +1,53 @@
+---
+# The ironic API URL for bifrost operations.  Defaults to localhost.
+# ironic_url: "http://localhost:6385/"
+
+# The network interface that bifrost will be operating on.  Defaults
+# to virbr0 in roles, can be overridden here.
+# network_interface: "virbr0"
+
+# The path to the SSH key to be utilized for testing and burn-in
+# to configuration drives. When set, it should be set in both baremetal
+# and localhost groups, however this is only an override to the default.
+
+# workaround for opnfv ci until we can fix non-root use
+ssh_public_key_path: "/root/.ssh/id_rsa.pub"
+
+# Normally this user should be root, however if cirros is used,
+# a user may wish to define a specific user for testing VM
+# connectivity during a test sequence
+testing_user: root
+
+# The default port to download files via.  Required for IPA URL generation.
+# Presently the defaults are located in the roles, however if changed both
+# the localhost and baremetal group files must be updated.
+# file_url_port: 8080
+
+# IPA Image parameters.  If these are changed, they must be changed in
+# Both localhost and baremetal groups.  Presently the defaults
+# in each role should be sufficent for proper operation.
+# ipa_kernel: "{{http_boot_folder}}/coreos_production_pxe.vmlinuz"
+# ipa_ramdisk: "{{http_boot_folder}}/coreos_production_pxe_image-oem.cpio.gz"
+# ipa_kernel_url: "http://{{ hostvars[inventory_hostname]['ansible_' + network_interface]['ipv4']['address'] }}:{{file_url_port}}/coreos_production_pxe.vmlinuz"
+# ipa_ramdisk_url: "http://{{ hostvars[inventory_hostname]['ansible_' + network_interface]['ipv4']['address'] }}:{{file_url_port}}/coreos_production_pxe_image-oem.cpio.gz"
+
+# The http_boot_folder defines the root folder for the webserver.
+# If this setting is changed, it must be applied to both the baremetal
+# and localhost groups. Presently the role defaults are set to the value
+# below.
+# http_boot_folder: /httpboot
+
+# The settings for the name of the image to be deployed along with the
+# on disk location are below.  If changed, these settings must be applied
+# to both the baremetal and localhost groups.  If the file is already on
+# disk, then the image generation will not take place, otherwise an image
+# will be generated using diskimage-builder.
+# deploy_image_filename: "deployment_image.qcow2"
+# deploy_image: "{{http_boot_folder}}/{{deploy_image_filename}}"
+
+# Under normal circumstances, the os_ironic_node module does not wait for
+# the node to reach active state before continuing with the deployment
+# process.  This means we may have to timeout, to figure out a deployment
+# failed.  Change wait_for_node_deploy to true to cause bifrost to wait for
+# Ironic to show the instance in Active state.
+wait_for_node_deploy: false
index c92bd9d..33ad108 100755 (executable)
@@ -18,10 +18,18 @@ ENABLE_VENV="false"
 USE_DHCP="false"
 USE_VENV="false"
 BUILD_IMAGE=true
-export BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.json'}
-export BIFROST_INVENTORY_SOURCE=${BIFROST_INVENTORY_SOURCE:-'/tmp/baremetal.csv'}
 PROVISION_WAIT_TIMEOUT=${PROVISION_WAIT_TIMEOUT:-3600}
 
+# ensure the branch is set
+export OPENSTACK_BRANCH=${OPENSTACK_BRANCH:-master}
+
+# ensure the right inventory files is used based on branch
+if [ $OPENSTACK_BRANCH = "master" ]; then
+    export BIFROST_INVENTORY_SOURCE=${BIFROST_INVENTORY_SOURCE:-'/tmp/baremetal.json'}
+else
+    export BIFROST_INVENTORY_SOURCE=${BIFROST_INVENTORY_SOURCE:-'/tmp/baremetal.csv'}
+fi
+
 # Set defaults for ansible command-line options to drive the different
 # tests.
 
@@ -34,7 +42,7 @@ PROVISION_WAIT_TIMEOUT=${PROVISION_WAIT_TIMEOUT:-3600}
 # use cirros.
 
 TEST_VM_NUM_NODES=6
-export TEST_VM_NODE_NAMES="jumphost controller00 controller01 controller02 compute00 compute01"
+export TEST_VM_NODE_NAMES="xcimaster controller00 controller01 controller02 compute00 compute01"
 export VM_DOMAIN_TYPE="kvm"
 # 8 vCPU, 60 GB HDD are minimum equipment
 export VM_CPU=${VM_CPU:-8}
@@ -107,8 +115,7 @@ ${ANSIBLE} \
        -e test_vm_num_nodes=${TEST_VM_NUM_NODES} \
        -e test_vm_memory_size=${VM_MEMORY_SIZE} \
        -e enable_venv=${ENABLE_VENV} \
-       -e test_vm_domain_type=${VM_DOMAIN_TYPE} \
-       -e baremetal_json_file=${BAREMETAL_DATA_FILE}
+       -e test_vm_domain_type=${VM_DOMAIN_TYPE}
 
 # Execute the installation and VM startup test.
 ${ANSIBLE} \
index 2e33bc1..83cf1cc 100755 (executable)
@@ -18,9 +18,8 @@ ENABLE_VENV="false"
 USE_DHCP="false"
 USE_VENV="false"
 BUILD_IMAGE=true
-export BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.json'}
-export BIFROST_INVENTORY_SOURCE=${BIFROST_INVENTORY_SOURCE:-'/tmp/baremetal.csv'}
 PROVISION_WAIT_TIMEOUT=${PROVISION_WAIT_TIMEOUT:-3600}
+BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.json'}
 
 # Set defaults for ansible command-line options to drive the different
 # tests.
@@ -34,7 +33,7 @@ PROVISION_WAIT_TIMEOUT=${PROVISION_WAIT_TIMEOUT:-3600}
 # use cirros.
 
 TEST_VM_NUM_NODES=3
-export TEST_VM_NODE_NAMES="jumphost.opnfvlocal controller00.opnfvlocal compute00.opnfvlocal"
+export TEST_VM_NODE_NAMES="xcimaster controller00 compute00"
 export VM_DOMAIN_TYPE="kvm"
 export VM_CPU=${VM_CPU:-4}
 export VM_DISK=${VM_DISK:-100}