lib64/
parts/
sdist/
-var/
wheels/
*.egg-info/
.installed.cfg
testapi_venv/
.cache
.tox
+*.retry
- 'os-nosdn-nofeature-ha'
- 'os-nosdn-nofeature-ha-ipv6'
- 'os-nosdn-ovs-noha'
+ - 'os-nosdn-ovs-ha'
- 'os-nosdn-fdio-noha'
- 'os-nosdn-fdio-ha'
- 'os-nosdn-kvm-ha'
- 'os-odl_l2-fdio-ha'
- 'os-odl_l2-netvirt_gbp_fdio-noha'
- 'os-odl_l2-sfc-noha'
+ - 'os-odl_l3-nofeature-noha'
- 'os-odl_l3-nofeature-ha'
+ - 'os-odl_l3-ovs-noha'
+ - 'os-odl_l3-ovs-ha'
- 'os-odl-bgpvpn-ha'
- 'os-odl-gluon-noha'
- 'os-odl_l3-fdio-noha'
- 'os-odl_l3-fdio_dvr-noha'
- 'os-odl_l3-fdio_dvr-ha'
- 'os-odl_l3-csit-noha'
- - 'os-odl_l3-nofeature-noha'
- 'os-onos-nofeature-ha'
- 'gate'
build-step-failure-threshold: 'never'
failure-threshold: 'never'
unstable-threshold: 'FAILURE'
+ - trigger-builds:
+ - project: 'apex-deploy-baremetal-os-nosdn-ovs-ha-{stream}'
+ predefined-parameters: |
+ BUILD_DIRECTORY=apex-build-{stream}/.build
+ OPNFV_CLEAN=yes
+ git-revision: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ block: true
+ - trigger-builds:
+ - project: 'functest-apex-{daily-slave}-daily-{stream}'
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-nosdn-ovs-ha
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
+ - trigger-builds:
+ - project: 'yardstick-apex-{slave}-daily-{stream}'
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-nosdn-ovs-ha
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
+ - trigger-builds:
+ - project: 'apex-deploy-baremetal-os-odl_l3-ovs-ha-{stream}'
+ predefined-parameters: |
+ BUILD_DIRECTORY=apex-build-{stream}/.build
+ OPNFV_CLEAN=yes
+ git-revision: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ block: true
+ - trigger-builds:
+ - project: 'functest-apex-{daily-slave}-daily-{stream}'
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-odl_l3-ovs-ha
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
+ - trigger-builds:
+ - project: 'yardstick-apex-{slave}-daily-{stream}'
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-odl_l3-ovs-ha
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
# CSIT promote
- job-template:
- build-blocker:
use-build-blocker: true
blocking-jobs:
- - 'daisy.*-deploy-({pod})?-daily-.*'
+ - 'daisy-daily-.*'
+ - 'daisy4nfv-(merge|verify)-.*'
block-level: 'NODE'
wrappers:
- build-blocker:
use-build-blocker: true
blocking-jobs:
- - '{installer}-.*deploy-.*'
+ - '{installer}-daily-.*'
+ - 'daisy4nfv-(merge|verify)-.*'
block-level: 'NODE'
scm:
- danube:
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
- disabled: false
+ disabled: true
#####################################
# patch merge phases
#####################################
- build-blocker:
use-build-blocker: true
blocking-jobs:
- - '{alias}-merge-deploy-.*'
+ - '{alias}-(merge|verify)-.*'
+ - '{project}-daily-.*'
block-level: 'NODE'
scm:
- build-blocker:
use-build-blocker: true
blocking-jobs:
- - '{alias}-verify-deploy-.*'
+ - '{alias}-(merge|verify)-.*'
+ - '{installer}-daily-.*'
block-level: 'NODE'
scm:
docker exec $container_id ${run_cmd}
sudo cp -r ${DOVETAIL_REPO_DIR}/results ./
-#To make sure the file owner is jenkins, for the copied results files in the above line
+#To make sure the file owner is the current user, for the copied results files in the above line
#if not, there will be error when next time to wipe workspace
-sudo chown -R jenkins:jenkins ${WORKSPACE}/results
+CURRENT_USER=${SUDO_USER:-$USER}
+PRIMARY_GROUP=$(id -gn $CURRENT_USER)
+sudo chown -R ${CURRENT_USER}:${PRIMARY_GROUP} ${WORKSPACE}/results
echo "Dovetail: done!"
disable-strict-forbidden-file-verification: 'true'
forbidden-file-paths:
- compare-type: ANT
- pattern: 'docs/**|.gitignore'
+ pattern: 'docs/**'
builders:
- description-setter:
#!/bin/bash
[[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
+# Remove containers along with image opnfv/yardstick*:<none>
+dangling_images=($(docker images -f "dangling=true" | grep opnfv/yardstick | awk '{print $3}'))
+if [[ -n ${dangling_images} ]]; then
+ echo "Removing opnfv/yardstick:<none> images and their containers..."
+ for image_id in "${dangling_images[@]}"; do
+ echo " Removing image_id: $image_id and its containers"
+ containers=$(docker ps -a | grep $image_id | awk '{print $1}')
+ if [[ -n "$containers" ]];then
+ docker rm -f $containers >${redirect}
+ fi
+ docker rmi $image_id >${redirect}
+ done
+fi
+
echo "Cleaning up docker containers/images..."
# Remove previous running containers if exist
if [[ ! -z $(docker ps -a | grep opnfv/yardstick) ]]; then
for tag in "${image_tags[@]}"; do
echo "Removing docker image opnfv/yardstick:$tag..."
docker rmi opnfv/yardstick:$tag >$redirect
-
done
fi
+
logger = logger.Logger("SSH utils").getLogger()
SSH_TIMEOUT = 60
+''' Monkey Patch paramiko _custom_start_client '''
+# We are using paramiko 2.1.1 and in the CI in the SFC
+# test we are facing this issue:
+# https://github.com/robotframework/SSHLibrary/issues/158
+# The fix was merged in paramiko 2.1.3 in this PR:
+# https://github.com/robotframework/SSHLibrary/pull/159/files
+# Until we upgrade we can use this monkey patch to work
+# around the issue
+
+
+def _custom_start_client(self, *args, **kwargs):
+ self.banner_timeout = 45
+ self._orig_start_client(*args, **kwargs)
+
+
+paramiko.transport.Transport._orig_start_client = \
+ paramiko.transport.Transport.start_client
+paramiko.transport.Transport.start_client = _custom_start_client
+''' Monkey Patch paramiko _custom_start_client '''
+
def get_ssh_client(hostname,
username,
--- /dev/null
+{#
+# Note(TheJulia): This file is based upon the file format provided by the git
+# committed example located at:
+# http://git.openstack.org/cgit/openstack/ironic-inspector/tree/example.conf
+#}
+[DEFAULT]
+{% if enable_keystone is defined and enable_keystone | bool == true %}
+auth_strategy = keystone
+{% else %}
+auth_strategy = {{ inspector_auth | default('noauth') }}
+{% endif %}
+debug = {{ inspector_debug | bool }}
+
+[database]
+connection=mysql+pymysql://inspector:{{ ironic_db_password }}@localhost/inspector?charset=utf8
+min_pool_size = 1
+max_pool_size = 5
+
+[firewall]
+manage_firewall = {{ inspector_manage_firewall | bool | default('false') }}
+
+[ironic]
+{% if enable_keystone is defined and enable_keystone | bool == true %}
+os_region = {{ keystone.bootstrap.region_name | default('RegionOne') }}
+project_name = baremetal
+username = {{ ironic_inspector.keystone.default_username }}
+password = {{ ironic_inspector.keystone.default_password }}
+auth_url = {{ ironic_inspector.service_catalog.auth_url }}
+auth_type = password
+auth_strategy = keystone
+user_domain_id = default
+project_domain_id = default
+
+{% else %}
+auth_strategy = {{ ironic_auth_strategy | default('noauth') }}
+{% endif %}
+
+{% if enable_keystone is defined and enable_keystone | bool == true %}
+[keystone_authtoken]
+auth_plugin = password
+auth_url = {{ ironic_inspector.service_catalog.auth_url }}
+username = {{ ironic_inspector.service_catalog.username }}
+password = {{ ironic_inspector.service_catalog.password }}
+user_domain_id = default
+project_name = service
+project_domain_id = default
+
+{% endif %}
+{#
+# Note(TheJulia) preserving ironic_url in the configuration
+# in case future changes allow breaking of the deployment across
+# multiple nodes.
+#ironic_url = http://localhost:6385/
+#}
+
+[processing]
+add_ports = {{ inspector_port_addition | default('pxe') }}
+keep_ports = {{ inspector_keep_ports | default('present') }}
+ramdisk_logs_dir = {{ inspector_data_dir }}/log
+always_store_ramdisk_logs = {{ inspector_store_ramdisk_logs | default('true') | bool }}
+{% if inspector.discovery.enabled == true %}
+node_not_found_hook = enroll
+
+[discovery]
+enroll_node_driver = {{ inspector.discovery.default_node_driver }}
+{% endif %}
--- /dev/null
+# {{ ansible_managed }}
+# For additional details on configuring ironic, you may wish to reference
+# the sample configuration file which can be located at
+# http://git.openstack.org/cgit/openstack/ironic/tree/etc/ironic/ironic.conf.sample
+
+
+[DEFAULT]
+# NOTE(TheJulia): Until Bifrost supports neutron or some other network
+# configuration besides a flat network where bifrost orchustrates the
+# control instead of ironic, noop is the only available network driver.
+enabled_network_interfaces = noop
+{% if testing | bool == true %}
+enabled_drivers = agent_ssh,pxe_ssh
+debug = true
+{% else %}
+enabled_drivers = {{ enabled_drivers }}
+debug = false
+{% endif %}
+
+rabbit_userid = ironic
+rabbit_password = {{ ironic_db_password }}
+
+{% if enable_keystone is defined and enable_keystone | bool == true %}
+auth_strategy = keystone
+{% else %}
+auth_strategy = noauth
+{% endif %}
+
+[pxe]
+pxe_append_params = systemd.journald.forward_to_console=yes {{ extra_kernel_options | default('') }}
+pxe_config_template = $pybasedir/drivers/modules/ipxe_config.template
+tftp_server = {{ hostvars[inventory_hostname]['ansible_' + ans_network_interface]['ipv4']['address'] }}
+tftp_root = /tftpboot
+pxe_bootfile_name = undionly.kpxe
+ipxe_enabled = true
+ipxe_boot_script = /etc/ironic/boot.ipxe
+
+[deploy]
+http_url = http://{{ hostvars[inventory_hostname]['ansible_' + ans_network_interface]['ipv4']['address'] }}:{{ file_url_port }}/
+http_root = {{ http_boot_folder }}
+
+[conductor]
+api_url = http://{{ hostvars[inventory_hostname]['ansible_' + ans_network_interface]['ipv4']['address'] }}:6385/
+clean_nodes = {{ cleaning | lower }}
+automated_clean = {{ cleaning | lower }}
+
+[database]
+connection = mysql+pymysql://ironic:{{ ironic_db_password }}@localhost/ironic?charset=utf8
+min_pool_size = 1
+max_pool_size = 5
+
+[dhcp]
+dhcp_provider = none
+
+{% if testing | bool == true %}
+[ssh]
+libvirt_uri = qemu:///system
+{% endif %}
+
+{% if enable_cors | bool == true %}
+[cors]
+allowed_origin = {{ cors_allowed_origin | default('allowed_origin=http://localhost:8000') }}
+allow_credentials = {{ enable_cors_credential_support | default('true') }}
+{% endif %}
+
+[ilo]
+use_web_server_for_images = true
+
+{% if enable_inspector | bool == true %}
+[inspector]
+enabled = true
+{% endif %}
+
+{% if enable_keystone is defined and enable_keystone | bool == true %}
+[keystone]
+region_name = {{ keystone.bootstrap.region_name | default('RegionOne')}}
+[keystone_authtoken]
+auth_plugin = password
+auth_url = {{ ironic.service_catalog.auth_url }}
+username = {{ ironic.service_catalog.username }}
+password = {{ ironic.service_catalog.password }}
+user_domain_id = default
+project_name = {{ ironic.service_catalog.project_name }}
+project_domain_id = default
+
+[service_catalog]
+auth_url = {{ ironic.service_catalog.auth_url }}
+auth_type = password
+tenant_name = {{ ironic.service_catalog.project_name }}
+username = {{ ironic.service_catalog.username }}
+password = {{ ironic.service_catalog.password }}
+{% endif %}
#-------------------------------------------------------------------------------
# XCI Flavor Configuration
#-------------------------------------------------------------------------------
-# This is the configuration for xci-aio.
-#
# You are free to modify parts of the configuration to fit into your environment.
# But before doing that, please ensure you checked other flavors to see if one
# them can be used instead, saving you some time.
# Configure VM Nodes
#-------------------------------------------------------------------------------
export TEST_VM_NUM_NODES=1
-export TEST_VM_NODE_NAMES=xciaio
+export TEST_VM_NODE_NAMES=xci
export VM_DOMAIN_TYPE=kvm
export VM_CPU=8
export VM_DISK=80
-export VM_MEMORY_SIZE=12288
+export VM_MEMORY_SIZE=8192
export VM_DISK_CACHE=unsafe
-
-#-------------------------------------------------------------------------------
-# Ansible Files for the Flavor
-#-------------------------------------------------------------------------------
-XCI_ANSIBLE_PLAYBOOK=$OPNFV_RELENG_PATH/prototypes/xci/file/configure-xci-aio.yml
-XCI_ANSIBLE_INVENTORY=$OPNFV_RELENG_PATH/prototypes/xci/file/xci-aio-inventory
-XCI_ANSIBLE_VARS=$OPNFV_RELENG_PATH/prototypes/xci/file/xci-aio-vars.yml
--- /dev/null
+#-------------------------------------------------------------------------------
+# Do not change these settings if you are not developing for XCI Sandbox!
+#-------------------------------------------------------------------------------
+export OPNFV_RELENG_GIT_URL=https://gerrit.opnfv.org/gerrit/releng.git
+export OPENSTACK_BIFROST_GIT_URL=https://git.openstack.org/openstack/bifrost
+export OPENSTACK_OSA_GIT_URL=https://git.openstack.org/openstack/openstack-ansible
+export OPENSTACK_OSA_ETC_PATH=/etc/openstack_deploy
+export CLEAN_DIB_IMAGES=false
+export XCI_IP=192.168.122.2
+export XCI_ANSIBLE_PLAYBOOKS_PATH=$OPNFV_RELENG_PATH/prototypes/xci/file/$XCI_FLAVOR/playbooks
+export XCI_ANSIBLE_VARS_PATH=$OPNFV_RELENG_PATH/prototypes/xci/file/$XCI_FLAVOR/var
+export JOB_NAME=${JOB_NAME:-false}
#-------------------------------------------------------------------------------
# XCI Flavor Configuration
#-------------------------------------------------------------------------------
-# This is the configuration for xci-aio.
-#
# You are free to modify parts of the configuration to fit into your environment.
# But before doing that, please ensure you checked other flavors to see if one
# them can be used instead, saving you some time.
# Configure VM Nodes
#-------------------------------------------------------------------------------
export TEST_VM_NUM_NODES=6
-export TEST_VM_NODE_NAMES="xcimaster controller00 controller01 controller02 compute00 compute01"
+export TEST_VM_NODE_NAMES="xci controller00 controller01 controller02 compute00 compute01"
export VM_DOMAIN_TYPE=kvm
export VM_CPU=8
-export VM_DISK=100
+export VM_DISK=80
export VM_MEMORY_SIZE=16384
export VM_DISK_CACHE=unsafe
-
-#-------------------------------------------------------------------------------
-# Ansible Files for the Flavor
-#-------------------------------------------------------------------------------
-XCI_ANSIBLE_PLAYBOOK=$OPNFV_RELENG_PATH/prototypes/xci/file/configure-xci-ha.yml
-XCI_ANSIBLE_INVENTORY=$OPNFV_RELENG_PATH/prototypes/xci/file/xci-ha-inventory
-XCI_ANSIBLE_VARS=$OPNFV_RELENG_PATH/prototypes/xci/file/xci-ha-vars.yml
#-------------------------------------------------------------------------------
# XCI Flavor Configuration
#-------------------------------------------------------------------------------
-# This is the configuration for xci-aio.
-#
# You are free to modify parts of the configuration to fit into your environment.
# But before doing that, please ensure you checked other flavors to see if one
# them can be used instead, saving you some time.
# Configure VM Nodes
#-------------------------------------------------------------------------------
export TEST_VM_NUM_NODES=3
-export TEST_VM_NODE_NAMES="xcimaster controller00 compute00"
+export TEST_VM_NODE_NAMES="xci controller00 compute00"
export VM_DOMAIN_TYPE=kvm
export VM_CPU=8
-export VM_DISK=100
-export VM_MEMORY_SIZE=12288
+export VM_DISK=80
+export VM_MEMORY_SIZE=8192
export VM_DISK_CACHE=unsafe
-
-#-------------------------------------------------------------------------------
-# Ansible Files for the Flavor
-#-------------------------------------------------------------------------------
-XCI_ANSIBLE_PLAYBOOK=$OPNFV_RELENG_PATH/prototypes/xci/file/configure-xci-mini.yml
-XCI_ANSIBLE_INVENTORY=$OPNFV_RELENG_PATH/prototypes/xci/file/xci-mini-inventory
-XCI_ANSIBLE_VARS=$OPNFV_RELENG_PATH/prototypes/xci/file/xci-mini-vars.yml
#-------------------------------------------------------------------------------
# XCI Flavor Configuration
#-------------------------------------------------------------------------------
-# This is the configuration for xci-aio.
-#
# You are free to modify parts of the configuration to fit into your environment.
# But before doing that, please ensure you checked other flavors to see if one
# them can be used instead, saving you some time.
# Configure VM Nodes
#-------------------------------------------------------------------------------
export TEST_VM_NUM_NODES=4
-export TEST_VM_NODE_NAMES="xcimaster controller00 compute00 compute01"
+export TEST_VM_NODE_NAMES="xci controller00 compute00 compute01"
export VM_DOMAIN_TYPE=kvm
export VM_CPU=8
-export VM_DISK=100
-export VM_MEMORY_SIZE=12288
+export VM_DISK=80
+export VM_MEMORY_SIZE=8192
export VM_DISK_CACHE=unsafe
-
-#-------------------------------------------------------------------------------
-# Ansible Files for the Flavor
-#-------------------------------------------------------------------------------
-XCI_ANSIBLE_PLAYBOOK=$OPNFV_RELENG_PATH/prototypes/xci/file/configure-xci-mini.yml
-XCI_ANSIBLE_INVENTORY=$OPNFV_RELENG_PATH/prototypes/xci/file/xci-mini-inventory
-XCI_ANSIBLE_VARS=$OPNFV_RELENG_PATH/prototypes/xci/file/xci-mini-vars.yml
#-------------------------------------------------------------------------------
# use releng from master until the development work with the sandbox is complete
export OPNFV_RELENG_VERSION="master"
-# HEAD of "stable/ocata" as of 24.03.2017 - verified by OPNFV CI
-export OPENSTACK_BIFROST_VERSION="a87f7ce6c8725b3bbffec7b2efa1e466796848a9"
-# HEAD of "stable/ocata" as of 24.03.2017 - verified by OPNFV CI
-export OPENSTACK_OSA_VERSION="4713cf45e11b4ebca9fbed25d1389854602213d8"
+# HEAD of "master" as of 27.03.2017 - verified by OPNFV CI
+export OPENSTACK_BIFROST_VERSION="7417ff36e4b5fc4e2a6ee7d9dddb7287be20c37d"
+# HEAD of "master" as of 27.03.2017 - verified by OPNFV CI
+export OPENSTACK_OSA_VERSION="baba7b317a5898cd73b4a11c4ce364c7e2d3d77f"
-#-------------------------------------------------------------------------------
-# Set Paths to where git repositories of XCI Components will be cloned
-#-------------------------------------------------------------------------------
-# OPNFV XCI Sandbox is not verified to be used as non-root user as of yet so
-# changing these paths might break things.
-#-------------------------------------------------------------------------------
-export OPNFV_RELENG_PATH=/opt/releng
-export OPENSTACK_BIFROST_PATH=/opt/bifrost
-export OPENSTACK_OSA_PATH=/opt/openstack-ansible
-
#-------------------------------------------------------------------------------
# Set Deployment Flavor
#-------------------------------------------------------------------------------
# OPNFV XCI currently supports 4 different types of flavors:
-# - all in one (aio) - xci-aio.sh: 1 VM which acts as controller and compute node
-# - mini opnfv: 3 VMs, 1 xcimaster, 1 controller, and 1 compute nodes
-# - noha: 4 VMs, 1 xcimaster, 1 controller, and 2 compute nodes
-# - ha: 6 VMs, 1 xcimaster, 3 controllers, and 2 compute nodes
+# - all in one (aio): 1 xci VM which acts as controller and compute node
+# - mini: 3 VMs, 1 xci VM, 1 controller, and 1 compute nodes
+# - noha: 4 VMs, 1 xci VM, 1 controller, and 2 compute nodes
+# - ha: 6 VMs, 1 xci VM, 3 controllers, and 2 compute nodes
#
# Apart from having different number of nodes, CPU, RAM, and disk allocations
# also differ from each other. Please take a look at the env-vars files for
# each of these flavors.
#
# Examples:
-# export XCI_FLAVOR="xci-aio"
+# export XCI_FLAVOR="aio"
# or
-# export XCI_FLAVOR="xci-mini"
+# export XCI_FLAVOR="mini"
# or
-# export XCI_FLAVOR="xci-noha"
+# export XCI_FLAVOR="noha"
# or
-# export XCI_FLAVOR="xci-ha"
+# export XCI_FLAVOR="ha"
#-------------------------------------------------------------------------------
-export XCI_FLAVOR=${XCI_FLAVOR:-xci-mini}
+export XCI_FLAVOR=${XCI_FLAVOR:-aio}
+
+#-------------------------------------------------------------------------------
+# Set Paths to where git repositories of XCI Components will be cloned
+#-------------------------------------------------------------------------------
+# OPNFV XCI Sandbox is not verified to be used as non-root user as of yet so
+# changing these paths might break things.
+#-------------------------------------------------------------------------------
+export OPNFV_RELENG_PATH=/opt/releng
+export OPENSTACK_BIFROST_PATH=/opt/bifrost
+export OPENSTACK_OSA_PATH=/opt/openstack-ansible
#-------------------------------------------------------------------------------
# Configure some other stuff
# ANSIBLE_VERBOSITY="-v"
# or
# ANSIBLE_VERBOSITY="-vvvv"
-export ANSIBLE_VERBOSITY=${ANSIBLE_VERBOSITY:-""}
+export ANSIBLE_VERBOSITY=${ANSIBLE_VERBOSITY-""}
set -o pipefail
set -o xtrace
+# This script must run as root
+if [[ $(whoami) != "root" ]]; then
+ echo "Error: This script must be run as root!"
+ exit 1
+fi
+
# find where are we
XCI_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $XCI_PATH/config/user-vars
# source flavor configuration
-source $XCI_PATH/flavors/$XCI_FLAVOR.sh
+source "$XCI_PATH/flavors/${XCI_FLAVOR}-vars"
+
+# source xci configuration
+source $XCI_PATH/config/env-vars
+
+# log info to console
+echo "Info: Starting XCI Deployment"
+echo "Info: Deployment parameters"
+echo "-------------------------------------------------------------------------"
+echo "xci flavor: $XCI_FLAVOR"
+echo "opnfv/releng version: $OPNFV_RELENG_VERSION"
+echo "openstack/bifrost version: $OPENSTACK_BIFROST_VERSION"
+echo "openstack/openstack-ansible version: $OPENSTACK_OSA_VERSION"
+echo "-------------------------------------------------------------------------"
+
+#-------------------------------------------------------------------------------
+# Cleanup the leftovers from the previous deployment
+#-------------------------------------------------------------------------------
+echo "Info: Cleaning up the previous deployment"
+$XCI_PATH/../bifrost/scripts/destroy-env.sh > /dev/null 2>&1
+/bin/rm -rf /opt/releng /opt/bifrost /opt/openstack-ansible /opt/stack
+
+#-------------------------------------------------------------------------------
+# Clone the repositories and checkout the versions
+#-------------------------------------------------------------------------------
+echo "Info: Cloning repositories and checking out versions"
+git clone --quiet $OPNFV_RELENG_GIT_URL $OPNFV_RELENG_PATH && \
+ cd $OPNFV_RELENG_PATH
+echo "Info: Cloned opnfv/releng. HEAD currently points at"
+echo " $(git show --oneline -s --pretty=format:'%h - %s (%cr) <%an>')"
+git clone --quiet $OPENSTACK_BIFROST_GIT_URL $OPENSTACK_BIFROST_PATH && \
+ cd $OPENSTACK_BIFROST_PATH
+echo "Info: Cloned openstack/bifrost. HEAD currently points at"
+echo " $(git show --oneline -s --pretty=format:'%h - %s (%cr) <%an>')"
+
+#-------------------------------------------------------------------------------
+# Combine opnfv and upstream scripts/playbooks
+#-------------------------------------------------------------------------------
+echo "Info: Combining opnfv/releng and opestack/bifrost scripts/playbooks"
+/bin/cp -rf $OPNFV_RELENG_PATH/prototypes/bifrost/* $OPENSTACK_BIFROST_PATH/
+
+#-------------------------------------------------------------------------------
+# Start provisioning VM nodes
+#-------------------------------------------------------------------------------
+echo "Info: Starting provisining VM nodes using openstack/bifrost"
+echo " This might take between 10 to 20 minutes depending on the flavor and the host"
+echo "-------------------------------------------------------------------------"
+cd $OPENSTACK_BIFROST_PATH
+STARTTIME=$(date +%s)
+./scripts/bifrost-provision.sh
+ENDTIME=$(date +%s)
+echo "-----------------------------------------------------------------------"
+echo "Info: VM nodes are provisioned!"
+echo "Info: It took $(($ENDTIME - $STARTTIME)) seconds to provising the VM nodes"
<section class="tiles">\r
<article class="style3">\r
<span class="image">\r
- <img src="img/functest.jpg" alt="" />\r
+ <img src="img/projectIcon_functest_250x250.png" alt="" />\r
</span>\r
<a href="functest-colorado.html">\r
<h2>Functest</h2>\r
</article>\r
<article class="style2">\r
<span class="image">\r
- <img src="img/yardstick.jpg" alt="" />\r
+ <img src="img/projectIcon_yardstick_250x250.png" alt="" />\r
</span>\r
<a href="colorado/yardstick/status-apex.html">\r
<h2>Yardstick</h2>\r
<section class="tiles">
<article class="style3">
<span class="image">
- <img src="img/functest.jpg" alt="" />
+ <img src="img/projectIcon_functest_250x250.png" alt="" />
</span>
<a href="functest-danube.html">
<h2>Functest</h2>
</article>
<article class="style2">
<span class="image">
- <img src="img/yardstick.jpg" alt="" />
+ <img src="img/projectIcon_yardstick_250x250.png" alt="" />
</span>
<a href="danube/yardstick/status-apex.html">
<h2>Yardstick</h2>
</article>
<article class="style4">
<span class="image">
- <img src="img/storperf.jpg" alt="" />
+ <img src="img/projectIcon_storperf_250x250.png" alt="" />
</span>
<a href="danube/storperf/status-apex.html">
<h2>Storperf</h2>
</div>
</a>
</article>
+ <article class="style5">
+ <span class="image">
+ <img src="img/projectIcon_vsperf_250x250.png" alt="" />
+ </span>
+ <a href="danube/vsperf/status-apex.html">
+ <h2>Vsperf</h2>
+ <div class="content">
+ <p>Virtual switch testing</p>
+ </div>
+ </a>
+ </article>
</section>
</div>
</div>
<section class="tiles">
<article class="style3">
<span class="image">
- <img src="img/functest.jpg" alt="" />
+ <img src="img/projectIcon_functest_250x250.png" alt="" />
</span>
<a href="functest-master.html">
<h2>Functest</h2>
</article>
<article class="style2">
<span class="image">
- <img src="img/yardstick.jpg" alt="" />
+ <img src="img/projectIcon_yardstick_250x250.png" alt="" />
</span>
<a href="master/yardstick/status-apex.html">
<h2>Yardstick</h2>
</article>
<article class="style4">
<span class="image">
- <img src="img/storperf.jpg" alt="" />
+ <img src="img/projectIcon_storperf_250x250.png" alt="" />
</span>
<a href="master/storperf/status-apex.html">
<h2>Storperf</h2>
</div>
</a>
</article>
+ <article class="style5">
+ <span class="image">
+ <img src="img/projectIcon_vsperf_250x250.png" alt="" />
+ </span>
+ <a href="master/vsperf/status-apex.html">
+ <h2>Vsperf</h2>
+ <div class="content">
+ <p>Virtual switch testing</p>
+ </div>
+ </a>
+ </article>
+ <article class="style1">
+ <span class="image">
+ <img src="img/projectIcon_qtip_250x250.png" alt="" />
+ </span>
+ <a href="master/qtip/status-apex.html">
+ <h2>Qtip</h2>
+ <div class="content">
+ <p>Benchmark as a service</p>
+ </div>
+ </a>
+ </article>
+ <article class="style6">
+ <span class="image">
+ <img src="img/projectIcon_bottlenecks_250x250.png" alt="" />
+ </span>
+ <a href="master/bottlenecks/status-apex.html">
+ <h2>Bottlenecks</h2>
+ <div class="content">
+ <p>Bottleneck finder</p>
+ </div>
+ </a>
+ </article>
+
</section>
</div>
</div>