Merge "Add and insert new project icons"
authorMorgan Richomme <morgan.richomme@orange.com>
Tue, 28 Mar 2017 07:47:07 +0000 (07:47 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Tue, 28 Mar 2017 07:47:07 +0000 (07:47 +0000)
46 files changed:
docs/jenkins-job-builder/index.rst
jjb/apex/apex.yml
jjb/compass4nfv/compass-project-jobs.yml
jjb/compass4nfv/compass-verify-jobs.yml
jjb/daisy4nfv/daisy-daily-jobs.yml
jjb/daisy4nfv/daisy-project-jobs.yml
jjb/daisy4nfv/daisy4nfv-merge-jobs.yml
jjb/daisy4nfv/daisy4nfv-verify-jobs.yml
jjb/dovetail/dovetail-run.sh
jjb/fuel/fuel-daily-jobs.yml
jjb/global/releng-macros.yml
jjb/joid/joid-daily-jobs.yml
jjb/qtip/helpers/cleanup-deploy.sh
jjb/qtip/helpers/validate-deploy.sh
jjb/qtip/qtip-validate-jobs.yml
jjb/xci/bifrost-verify-jobs.yml
jjb/xci/bifrost-verify.sh
jjb/xci/xci-daily-jobs.yml
jjb/xci/xci-provision.sh
jjb/yardstick/yardstick-cleanup.sh
modules/opnfv/utils/ssh_utils.py
prototypes/bifrost/scripts/bifrost-provision.sh [moved from prototypes/bifrost/scripts/test-bifrost-deployment.sh with 67% similarity]
prototypes/bifrost/scripts/osa-bifrost-deployment.sh [deleted file]
prototypes/xci/config/env-vars [new file with mode: 0755]
prototypes/xci/config/pinned-versions [new file with mode: 0755]
prototypes/xci/config/user-vars [new file with mode: 0755]
prototypes/xci/file/aio/playbooks/configure-xcihost.yml [new file with mode: 0644]
prototypes/xci/file/aio/playbooks/inventory [new file with mode: 0644]
prototypes/xci/file/aio/var/ubuntu.yml [new file with mode: 0644]
prototypes/xci/file/ha/configure-targethosts.yml [new file with mode: 0644]
prototypes/xci/file/ha/playbooks/configure-xcihost.yml [new file with mode: 0644]
prototypes/xci/file/ha/playbooks/inventory [new file with mode: 0644]
prototypes/xci/file/ha/var/ubuntu.yml [new file with mode: 0644]
prototypes/xci/file/mini/configure-targethosts.yml [new file with mode: 0644]
prototypes/xci/file/mini/playbooks/configure-xcihost.yml [new file with mode: 0644]
prototypes/xci/file/mini/playbooks/inventory [new file with mode: 0644]
prototypes/xci/file/mini/var/ubuntu.yml [new file with mode: 0644]
prototypes/xci/file/noha/configure-targethosts.yml [new file with mode: 0644]
prototypes/xci/file/noha/playbooks/configure-xcihost.yml [new file with mode: 0644]
prototypes/xci/file/noha/playbooks/inventory [new file with mode: 0644]
prototypes/xci/file/noha/var/ubuntu.yml [new file with mode: 0644]
prototypes/xci/flavors/aio [new file with mode: 0644]
prototypes/xci/flavors/ha [new file with mode: 0644]
prototypes/xci/flavors/mini [new file with mode: 0644]
prototypes/xci/flavors/noha [new file with mode: 0644]
prototypes/xci/xci-deploy.sh [new file with mode: 0755]

index b85b132..4d23ade 100644 (file)
@@ -1,9 +1,9 @@
-***************************
+===========================
 Release Engineering Project
-***************************
+===========================
 
 .. toctree::
    :numbered:
    :maxdepth: 2
 
-   opnfv-jjb-usage.rst
+   opnfv-jjb-usage
index 93eaa6c..bb6e234 100644 (file)
@@ -36,6 +36,7 @@
          - 'os-nosdn-nofeature-ha'
          - 'os-nosdn-nofeature-ha-ipv6'
          - 'os-nosdn-ovs-noha'
+         - 'os-nosdn-ovs-ha'
          - 'os-nosdn-fdio-noha'
          - 'os-nosdn-fdio-ha'
          - 'os-nosdn-kvm-ha'
          - 'os-odl_l2-fdio-ha'
          - 'os-odl_l2-netvirt_gbp_fdio-noha'
          - 'os-odl_l2-sfc-noha'
+         - 'os-odl_l3-nofeature-noha'
          - 'os-odl_l3-nofeature-ha'
+         - 'os-odl_l3-ovs-noha'
+         - 'os-odl_l3-ovs-ha'
          - 'os-odl-bgpvpn-ha'
          - 'os-odl-gluon-noha'
          - 'os-odl_l3-fdio-noha'
@@ -52,7 +56,6 @@
          - 'os-odl_l3-fdio_dvr-noha'
          - 'os-odl_l3-fdio_dvr-ha'
          - 'os-odl_l3-csit-noha'
-         - 'os-odl_l3-nofeature-noha'
          - 'os-onos-nofeature-ha'
          - 'gate'
 
             block: true
             same-node: true
         - trigger-builds:
-          - project: 'cperf-apex-intel-pod2-daily-{stream}'
+          - project: 'cperf-apex-intel-pod2-daily-master'
             predefined-parameters:
               DEPLOY_SCENARIO=os-odl_l3-nofeature-noha
             block: true
                 build-step-failure-threshold: 'never'
                 failure-threshold: 'never'
                 unstable-threshold: 'FAILURE'
+        - trigger-builds:
+          - project: 'apex-deploy-baremetal-os-nosdn-ovs-ha-{stream}'
+            predefined-parameters: |
+              BUILD_DIRECTORY=apex-build-{stream}/.build
+              OPNFV_CLEAN=yes
+            git-revision: true
+            same-node: true
+            block-thresholds:
+                build-step-failure-threshold: 'never'
+            block: true
+        - trigger-builds:
+          - project: 'functest-apex-{daily-slave}-daily-{stream}'
+            predefined-parameters:
+              DEPLOY_SCENARIO=os-nosdn-ovs-ha
+            block: true
+            same-node: true
+            block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
+        - trigger-builds:
+          - project: 'yardstick-apex-{slave}-daily-{stream}'
+            predefined-parameters:
+              DEPLOY_SCENARIO=os-nosdn-ovs-ha
+            block: true
+            same-node: true
+            block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
+        - trigger-builds:
+          - project: 'apex-deploy-baremetal-os-odl_l3-ovs-ha-{stream}'
+            predefined-parameters: |
+              BUILD_DIRECTORY=apex-build-{stream}/.build
+              OPNFV_CLEAN=yes
+            git-revision: true
+            same-node: true
+            block-thresholds:
+                build-step-failure-threshold: 'never'
+            block: true
+        - trigger-builds:
+          - project: 'functest-apex-{daily-slave}-daily-{stream}'
+            predefined-parameters:
+              DEPLOY_SCENARIO=os-odl_l3-ovs-ha
+            block: true
+            same-node: true
+            block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
+        - trigger-builds:
+          - project: 'yardstick-apex-{slave}-daily-{stream}'
+            predefined-parameters:
+              DEPLOY_SCENARIO=os-odl_l3-ovs-ha
+            block: true
+            same-node: true
+            block-thresholds:
+                build-step-failure-threshold: 'never'
+                failure-threshold: 'never'
+                unstable-threshold: 'FAILURE'
 
 # CSIT promote
 - job-template:
index 9b13e69..f962518 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
+            ppa-pathname: '/{stream}'
             disabled: false
         - danube:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            ppa-pathname: '/{stream}'
             disabled: false
 
     jobs:
@@ -47,6 +49,7 @@
         - compass-project-parameter:
             installer: '{installer}'
             gs-pathname: '{gs-pathname}'
+            ppa-pathname: '{ppa-pathname}'
         - 'opnfv-build-ubuntu-defaults'
         - '{installer}-defaults'
 
@@ -90,6 +93,7 @@
         - compass-project-parameter:
             installer: '{installer}'
             gs-pathname: '{gs-pathname}'
+            ppa-pathname: '{ppa-pathname}'
         - '{node}-defaults'
         - '{installer}-defaults'
     scm:
             description: "URL to Google Storage."
         - string:
             name: PPA_REPO
-            default: "http://205.177.226.237:9999{gs-pathname}"
+            default: "http://205.177.226.237:9999{ppa-pathname}"
         - string:
             name: PPA_CACHE
             default: "$WORKSPACE/work/repo/"
index e625c68..14279e6 100644 (file)
         - master:
             branch: '{stream}'
             gs-pathname: ''
+            ppa-pathname: '/{stream}'
             disabled: false
         - danube:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
+            ppa-pathname: '/{stream}'
             disabled: false
 
     distro:
         - 'compass-verify-defaults':
             installer: '{installer}'
             gs-pathname: '{gs-pathname}'
+            ppa-pathname: '{ppa-pathname}'
         - string:
             name: DEPLOY_SCENARIO
             default: 'os-nosdn-nofeature-ha'
         - 'compass-verify-defaults':
             installer: '{installer}'
             gs-pathname: '{gs-pathname}'
+            ppa-pathname: '{ppa-pathname}'
         - string:
             name: DEPLOY_SCENARIO
             default: 'k8-nosdn-nofeature-ha'
             description: "URL to Google Storage."
         - string:
             name: PPA_REPO
-            default: "http://205.177.226.237:9999{gs-pathname}"
+            default: "http://205.177.226.237:9999{ppa-pathname}"
         - string:
             name: PPA_CACHE
             default: "$WORKSPACE/work/repo/"
index ffae70f..c5d8e7e 100644 (file)
@@ -70,7 +70,8 @@
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
-                - 'daisy.*-deploy-({pod})?-daily-.*'
+                - 'daisy-daily-.*'
+                - 'daisy4nfv-(merge|verify)-.*'
             block-level: 'NODE'
 
     wrappers:
index 9a57e17..52769ca 100644 (file)
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
-                - '{installer}-.*deploy-.*'
+                - '{installer}-daily-.*'
+                - 'daisy4nfv-(merge|verify)-.*'
             block-level: 'NODE'
 
     scm:
index 11531f4..95e72e5 100644 (file)
@@ -21,7 +21,7 @@
         - danube:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
-            disabled: false
+            disabled: true
 #####################################
 # patch merge phases
 #####################################
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
-                - '{alias}-merge-deploy-.*'
+                - '{alias}-(merge|verify)-.*'
+                - '{project}-daily-.*'
             block-level: 'NODE'
 
     scm:
index ee78ab5..9f44d99 100644 (file)
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
-                - '{alias}-verify-deploy-.*'
+                - '{alias}-(merge|verify)-.*'
+                - '{installer}-daily-.*'
             block-level: 'NODE'
 
     scm:
index 0a2f156..f9a3df6 100755 (executable)
@@ -6,9 +6,9 @@
 set -e
 [[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
 
-# labconfig is used only for joid
-labconfig=""
 sshkey=""
+# The path of openrc.sh is defined in fetch_os_creds.sh
+OPENRC=$WORKSPACE/opnfv-openrc.sh
 if [[ ${INSTALLER_TYPE} == 'apex' ]]; then
     instack_mac=$(sudo virsh domiflist undercloud | grep default | \
                   grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
@@ -22,7 +22,7 @@ if [[ ${INSTALLER_TYPE} == 'apex' ]]; then
 elif [[ ${INSTALLER_TYPE} == 'joid' ]]; then
     # If production lab then creds may be retrieved dynamically
     # creds are on the jumphost, always in the same folder
-    labconfig="-v $LAB_CONFIG/admin-openrc:/home/opnfv/functest/conf/openstack.creds"
+    sudo cp $LAB_CONFIG/admin-openrc $OPENRC
     # If dev lab, credentials may not be the default ones, just provide a path to put them into docker
     # replace the default one by the customized one provided by jenkins config
 fi
@@ -32,21 +32,32 @@ if ! sudo iptables -C FORWARD -j RETURN 2> ${redirect} || ! sudo iptables -L FOR
     sudo iptables -I FORWARD -j RETURN
 fi
 
+if [[ ${INSTALLER_TYPE} != 'joid' ]]; then
+    releng_repo=${WORKSPACE}/releng
+    [ -d ${releng_repo} ] && sudo rm -rf ${releng_repo}
+    git clone https://gerrit.opnfv.org/gerrit/releng ${releng_repo} >/dev/null
+    ${releng_repo}/utils/fetch_os_creds.sh -d ${OPENRC} -i ${INSTALLER_TYPE} -a ${INSTALLER_IP} >${redirect}
+fi
+
+if [[ -f $OPENRC ]]; then
+    echo "INFO: openstack credentials path is $OPENRC"
+    cat $OPENRC
+else
+    echo "ERROR: file $OPENRC does not exist."
+    exit 1
+fi
+
 opts="--privileged=true -id"
-envs="-e CI_DEBUG=${CI_DEBUG} \
-      -e INSTALLER_TYPE=${INSTALLER_TYPE} \
-      -e INSTALLER_IP=${INSTALLER_IP} \
-      -e DEPLOY_SCENARIO=${DEPLOY_SCENARIO} \
-      -e DEPLOY_TYPE=${DEPLOY_TYPE}"
 results_envs="-v /var/run/docker.sock:/var/run/docker.sock \
               -v /home/opnfv/dovetail/results:/home/opnfv/dovetail/results"
+openrc_volume="-v ${OPENRC}:${OPENRC}"
 
 # Pull the image with correct tag
 echo "Dovetail: Pulling image opnfv/dovetail:${DOCKER_TAG}"
 docker pull opnfv/dovetail:$DOCKER_TAG >$redirect
 
-cmd="sudo docker run ${opts} ${envs} ${results_envs} ${labconfig} ${sshkey} \
-     opnfv/dovetail:${DOCKER_TAG} /bin/bash"
+cmd="docker run ${opts} ${results_envs} ${openrc_volume} \
+     ${sshkey} opnfv/dovetail:${DOCKER_TAG} /bin/bash"
 echo "Dovetail: running docker run command: ${cmd}"
 ${cmd} >${redirect}
 sleep 5
@@ -67,7 +78,7 @@ if [ $(docker ps | grep "opnfv/dovetail:${DOCKER_TAG}" | wc -l) == 0 ]; then
 fi
 
 list_cmd="dovetail list ${TESTSUITE}"
-run_cmd="dovetail run --testsuite ${TESTSUITE} -d true"
+run_cmd="dovetail run --openrc ${OPENRC} --testsuite ${TESTSUITE} -d"
 echo "Container exec command: ${list_cmd}"
 docker exec $container_id ${list_cmd}
 echo "Container exec command: ${run_cmd}"
@@ -79,3 +90,4 @@ sudo cp -r ${DOVETAIL_REPO_DIR}/results ./
 sudo chown -R jenkins:jenkins ${WORKSPACE}/results
 
 echo "Dovetail: done!"
+
index 12456dd..32abad6 100644 (file)
 - trigger:
     name: 'fuel-os-odl_l2-nofeature-ha-zte-pod2-daily-master-trigger'
     triggers:
-        - timed: '0 18 * * *'
+        - timed: ''
 - trigger:
     name: 'fuel-os-odl_l3-nofeature-ha-zte-pod2-daily-master-trigger'
     triggers:
index 63613f8..6fdb4ca 100644 (file)
@@ -75,7 +75,6 @@
             submodule:
                 recursive: true
                 timeout: 20
-                shallow-clone: true
 - trigger:
     name: 'daily-trigger-disabled'
     triggers:
index ec6c868..7dc7189 100644 (file)
@@ -78,7 +78,7 @@
         - 'k8-nosdn-nofeature-noha':
             auto-trigger-name: 'joid-{scenario}-{pod}-{stream}-trigger'
         - 'k8-nosdn-lb-noha':
-            auto-trigger-name: 'daily-trigger-disabled'
+            auto-trigger-name: 'joid-{scenario}-{pod}-{stream}-trigger'
 
     jobs:
         - 'joid-{scenario}-{pod}-daily-{stream}'
index 95babb3..9cb19a5 100644 (file)
@@ -7,20 +7,15 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 # Remove previous running containers if exist
-if [[ ! -z $(docker ps -a | grep opnfv/qtip) ]]; then
+if [[ ! -z $(docker ps -a | grep "opnfv/qtip:$DOCKER_TAG") ]]; then
     echo "Removing existing opnfv/qtip containers..."
     # workaround: sometimes it throws an error when stopping qtip container.
     # To make sure ci job unblocked, remove qtip container by force without stopping it.
-    docker rm -f $(docker ps -a | grep opnfv/qtip | awk '{print $1}')
+    docker rm -f $(docker ps -a | grep "opnfv/qtip:$DOCKER_TAG" | awk '{print $1}')
 fi
 
 # Remove existing images if exist
-if [[ ! -z $(docker images | grep opnfv/qtip) ]]; then
-    echo "Docker images to remove:"
-    docker images | head -1 && docker images | grep opnfv/qtip
-    image_tags=($(docker images | grep opnfv/qtip | awk '{print $2}'))
-    for tag in "${image_tags[@]}"; do
-        echo "Removing docker image opnfv/qtip:$tag..."
-        docker rmi opnfv/qtip:$tag
-    done
+if [[ $(docker images opnfv/qtip:${DOCKER_TAG} | wc -l) -gt 1 ]]; then
+    echo "Removing docker image opnfv/qtip:$DOCKER_TAG..."
+    docker rmi opnfv/qtip:$DOCKER_TAG
 fi
index a73e33c..9f3dbe4 100644 (file)
@@ -10,7 +10,10 @@ set -e
 
 envs="INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP}
 -e NODE_NAME=${NODE_NAME} -e CI_DEBUG=${CI_DEBUG}"
+ramfs=/tmp/qtip/ramfs
+cfg_dir=$(dirname $ramfs)
 dir_imgstore="${HOME}/imgstore"
+ramfs_volume="$ramfs:/mnt/ramfs"
 
 echo "--------------------------------------------------------"
 echo "POD: $NODE_NAME"
@@ -21,7 +24,24 @@ echo "--------------------------------------------------------"
 echo "Qtip: Pulling docker image: opnfv/qtip:${DOCKER_TAG}"
 docker pull opnfv/qtip:$DOCKER_TAG
 
-cmd=" docker run -id -e $envs opnfv/qtip:${DOCKER_TAG} /bin/bash"
+# use ramfs to fix docker socket connection issue with overlay mode in centos
+if [ ! -d $ramfs ]; then
+    mkdir -p $ramfs
+fi
+
+if [ ! -z "$(df $ramfs | tail -n -1 | grep $ramfs)" ]; then
+    sudo mount -t tmpfs -o size=32M tmpfs $ramfs
+fi
+
+# enable contro path in docker
+cat <<EOF > ${cfg_dir}/ansible.cfg
+[defaults]
+callback_whitelist = profile_tasks
+[ssh_connection]
+control_path=/mnt/ramfs/ansible-ssh-%%h-%%p-%%r
+EOF
+
+cmd=" docker run -id -e $envs -v ${ramfs_volume} opnfv/qtip:${DOCKER_TAG} /bin/bash"
 echo "Qtip: Running docker command: ${cmd}"
 ${cmd}
 
@@ -32,6 +52,7 @@ if [ $(docker ps | grep 'opnfv/qtip' | wc -l) == 0 ]; then
 else
     echo "The container ID is: ${container_id}"
     QTIP_REPO=/home/opnfv/repos/qtip
+    docker cp ${cfg_dir}/ansible.cfg ${container_id}:/home/opnfv/.ansible.cfg
 # TODO(zhihui_wu): use qtip cli to execute benchmark test in the future
     docker exec -t ${container_id} bash -c "cd ${QTIP_REPO}/qtip/runner/ &&
     python runner.py -d /home/opnfv/qtip/results/ -b all"
index e1b71f5..8dd97de 100644 (file)
 - builder:
     name: qtip-validate-deploy
     builders:
-        - shell:
-            !include-raw: ./helpers/validate-deploy.sh
         - shell:
             !include-raw: ./helpers/cleanup-deploy.sh
+        - shell:
+            !include-raw: ./helpers/validate-deploy.sh
+
 
 #-----------
 # parameter
index 6aea1c4..2fa99b2 100644 (file)
 # VM defaults
 #--------------------------------
 - defaults:
-    name: vm_defaults
+    name: verify_vm_defaults
+    test-vm-num-nodes: '3'
+    test-vm-node-names: 'xcimaster controller00 compute00'
+    vm-domain-type: 'kvm'
+    vm-cpu: '2'
     vm-disk: '30'
+    vm-memory-size: '4096'
     vm-disk-cache: 'unsafe'
-    vm-memory: '4096'
-    vm-cpu: '2'
 
 #--------------------------------
 # job templates
@@ -67,7 +70,7 @@
 
     disabled: '{obj:disabled}'
 
-    defaults: vm_defaults
+    defaults: verify_vm_defaults
 
     concurrent: false
 
             name: DIB_OS_PACKAGES
             default: '{dib-os-packages}'
         - string:
-            name: VM_DISK
-            default: '{vm-disk}'
+            name: TEST_VM_NUM_NODES
+            default: '{test-vm-num-nodes}'
         - string:
-            name: VM_DISK_CACHE
-            default: '{vm-disk-cache}'
+            name: TEST_VM_NODE_NAMES
+            default: '{test-vm-node-names}'
         - string:
-            name: VM_MEMORY
-            default: '{vm-memory}'
+            name: VM_DOMAIN_TYPE
+            default: '{vm-domain-type}'
         - string:
             name: VM_CPU
             default: '{vm-cpu}'
+        - string:
+            name: VM_DISK
+            default: '{vm-disk}'
+        - string:
+            name: VM_MEMORY_SIZE
+            default: '{vm-memory-size}'
+        - string:
+            name: VM_DISK_CACHE
+            default: '{vm-disk-cache}'
         - string:
             name: CLEAN_DIB_IMAGES
             default: 'true'
         - string:
             name: BIFROST_LOG_URL
             default: 'http://artifacts.opnfv.org/cross-community-ci/openstack/bifrost/$GERRIT_NAME/$GERRIT_CHANGE_NUMBER/$GERRIT_PATCHSET_NUMBER/$JOB_NAME'
+        - string:
+            name: ANSIBLE_VERBOSITY
+            default: '-vvvv'
 
     scm:
         - git:
index 7624668..782a234 100755 (executable)
@@ -117,7 +117,7 @@ sudo -E ./scripts/destroy-env.sh
 
 # provision 3 VMs; xcimaster, controller, and compute
 cd /opt/bifrost
-sudo -E ./scripts/test-bifrost-deployment.sh
+sudo -E ./scripts/bifrost-provision.sh
 
 # list the provisioned VMs
 cd /opt/bifrost
index f42f862..dbe3b65 100644 (file)
@@ -60,6 +60,7 @@
             dib-os-element: 'opensuse-minimal'
             dib-os-packages: 'vim,less,bridge-utils,iputils,rsyslog,curl'
             extra-dib-elements: 'openssh-server'
+
 #--------------------------------
 #        Phases
 #--------------------------------
         - 'xci-{scenario}-{pod}-{distro}-daily-{stream}'
         - 'xci-{phase}-{pod}-{distro}-daily-{stream}'
 
+#--------------------------------
+# VM defaults
+#--------------------------------
+- defaults:
+    name: daily_vm_defaults
+    test-vm-num-nodes: '6'
+    test-vm-node-names: 'xcimaster controller00 controller01 controller02 compute00 compute01'
+    vm-domain-type: 'kvm'
+    vm-cpu: '8'
+    vm-disk: '100'
+    vm-memory-size: '16384'
+    vm-disk-cache: 'unsafe'
+
 #--------------------------------
 # job templates
 #--------------------------------
 
     disabled: '{obj:disabled}'
 
+    defaults: daily_vm_defaults
+
     concurrent: false
 
     properties:
         - string:
             name: DIB_OS_PACKAGES
             default: '{dib-os-packages}'
+        - string:
+            name: TEST_VM_NUM_NODES
+            default: '{test-vm-num-nodes}'
+        - string:
+            name: TEST_VM_NODE_NAMES
+            default: '{test-vm-node-names}'
+        - string:
+            name: VM_DOMAIN_TYPE
+            default: '{vm-domain-type}'
+        - string:
+            name: VM_CPU
+            default: '{vm-cpu}'
+        - string:
+            name: VM_DISK
+            default: '{vm-disk}'
+        - string:
+            name: VM_MEMORY_SIZE
+            default: '{vm-memory-size}'
+        - string:
+            name: VM_DISK_CACHE
+            default: '{vm-disk-cache}'
         - string:
             name: CLEAN_DIB_IMAGES
             default: 'true'
         - label:
             name: SLAVE_LABEL
             default: '{slave-label}'
+        - string:
+            name: ANSIBLE_VERBOSITY
+            default: ''
 
     wrappers:
         - xci-fix-perms-workspace
index a1ba78b..ba447aa 100755 (executable)
@@ -86,7 +86,7 @@ sudo -E ./scripts/destroy-env.sh
 
 # provision 6 VMs; xcimaster, controller00, controller01, controller02, compute00, and compute01
 cd /opt/bifrost
-sudo -E ./scripts/osa-bifrost-deployment.sh
+sudo -E ./scripts/bifrost-provision.sh
 
 # list the provisioned VMs
 cd /opt/bifrost
index 4e6f7d6..51455b5 100755 (executable)
@@ -1,6 +1,20 @@
 #!/bin/bash
 [[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
 
+# Remove containers along with image opnfv/yardstick*:<none>
+dangling_images=($(docker images -f "dangling=true" | grep opnfv/yardstick | awk '{print $3}'))
+if [[ -n ${dangling_images} ]]; then
+    echo "Removing opnfv/yardstick:<none> images and their containers..."
+    for image_id in "${dangling_images[@]}"; do
+        echo "      Removing image_id: $image_id and its containers"
+        containers=$(docker ps -a | grep $image_id | awk '{print $1}')
+        if [[ -n "$containers" ]];then
+            docker rm -f $containers >${redirect}
+        fi
+        docker rmi $image_id >${redirect}
+    done
+fi
+
 echo "Cleaning up docker containers/images..."
 # Remove previous running containers if exist
 if [[ ! -z $(docker ps -a | grep opnfv/yardstick) ]]; then
@@ -17,6 +31,6 @@ if [[ ! -z $(docker images | grep opnfv/yardstick) ]]; then
     for tag in "${image_tags[@]}"; do
         echo "Removing docker image opnfv/yardstick:$tag..."
         docker rmi opnfv/yardstick:$tag >$redirect
-
     done
 fi
+
index d17f5ae..4c5ff5c 100644 (file)
@@ -15,6 +15,27 @@ import paramiko
 from opnfv.utils import opnfv_logger as logger
 
 logger = logger.Logger("SSH utils").getLogger()
+SSH_TIMEOUT = 60
+
+''' Monkey Patch paramiko _custom_start_client '''
+# We are using paramiko 2.1.1 and in the CI in the SFC
+# test we are facing this issue:
+# https://github.com/robotframework/SSHLibrary/issues/158
+# The fix was merged in paramiko 2.1.3 in this PR:
+# https://github.com/robotframework/SSHLibrary/pull/159/files
+# Until we upgrade we can use this monkey patch to work
+# around the issue
+
+
+def _custom_start_client(self, *args, **kwargs):
+    self.banner_timeout = 45
+    self._orig_start_client(*args, **kwargs)
+
+
+paramiko.transport.Transport._orig_start_client = \
+    paramiko.transport.Transport.start_client
+paramiko.transport.Transport.start_client = _custom_start_client
+''' Monkey Patch paramiko _custom_start_client '''
 
 
 def get_ssh_client(hostname,
@@ -40,11 +61,13 @@ def get_ssh_client(hostname,
             client.load_system_host_keys()
             client.connect(hostname,
                            username=username,
-                           pkey=key)
+                           pkey=key,
+                           timeout=SSH_TIMEOUT)
         else:
             client.connect(hostname,
                            username=username,
-                           password=password)
+                           password=password,
+                           timeout=SSH_TIMEOUT)
 
         return client
     except Exception as e:
@@ -96,7 +119,8 @@ class ProxyHopClient(paramiko.SSHClient):
         self.proxy_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
         self.proxy_ssh.connect(jh_ip,
                                username=jh_user,
-                               password=jh_pass)
+                               password=jh_pass,
+                               timeout=SSH_TIMEOUT)
         self.proxy_transport = self.proxy_ssh.get_transport()
 
     def connect(self, hostname, port=22, username='root', password=None,
@@ -126,7 +150,8 @@ class ProxyHopClient(paramiko.SSHClient):
             super(ProxyHopClient, self).connect(hostname,
                                                 username=username,
                                                 pkey=proxy_key,
-                                                sock=self.proxy_channel)
+                                                sock=self.proxy_channel,
+                                                timeout=timeout)
             os.remove(self.local_ssh_key)
         except Exception as e:
             logger.error(e)
@@ -7,42 +7,45 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
 set -eux
 set -o pipefail
+
 export PYTHONUNBUFFERED=1
 SCRIPT_HOME="$(cd "$(dirname "$0")" && pwd)"
 BIFROST_HOME=$SCRIPT_HOME/..
 ANSIBLE_INSTALL_ROOT=${ANSIBLE_INSTALL_ROOT:-/opt/stack}
+ANSIBLE_VERBOSITY=${ANSIBLE_VERBOSITY-"-vvvv"}
 ENABLE_VENV="false"
 USE_DHCP="false"
 USE_VENV="false"
 BUILD_IMAGE=true
 PROVISION_WAIT_TIMEOUT=${PROVISION_WAIT_TIMEOUT:-3600}
-BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.json'}
-
-# Set defaults for ansible command-line options to drive the different
-# tests.
-
-# NOTE(TheJulia/cinerama): The variables defined on the command line
-# for the default and DHCP tests are to drive the use of Cirros as the
-# deployed operating system, and as such sets the test user to cirros,
-# and writes a debian style interfaces file out to the configuration
-# drive as cirros does not support the network_info.json format file
-# placed in the configuration drive. The "build image" test does not
-# use cirros.
-
-TEST_VM_NUM_NODES=3
-export TEST_VM_NODE_NAMES="xcimaster controller00 compute00"
-export VM_DOMAIN_TYPE="kvm"
+
+# Ensure the right inventory files is used based on branch
+CURRENT_BIFROST_BRANCH=$(git rev-parse --abbrev-ref HEAD)
+if [ $CURRENT_BIFROST_BRANCH = "master" ]; then
+    BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.json'}
+    INVENTORY_FILE_FORMAT="baremetal_json_file"
+else
+    BAREMETAL_DATA_FILE=${BAREMETAL_DATA_FILE:-'/tmp/baremetal.csv'}
+    INVENTORY_FILE_FORMAT="baremetal_csv_file"
+fi
+export BIFROST_INVENTORY_SOURCE=$BAREMETAL_DATA_FILE
+
+# Default settings for VMs
+export TEST_VM_NUM_NODES=${TEST_VM_NUM_NODES:-3}
+export TEST_VM_NODE_NAMES=${TEST_VM_NODE_NAMES:-"xcimaster controller00 compute00"}
+export VM_DOMAIN_TYPE=${VM_DOMAIN_TYPE:-kvm}
 export VM_CPU=${VM_CPU:-4}
 export VM_DISK=${VM_DISK:-100}
+export VM_MEMORY_SIZE=${VM_MEMORY_SIZE:-8192}
 export VM_DISK_CACHE=${VM_DISK_CACHE:-unsafe}
+
+# Settings for bifrost
 TEST_PLAYBOOK="opnfv-virtual.yaml"
 USE_INSPECTOR=true
 USE_CIRROS=false
 TESTING_USER=root
-VM_MEMORY_SIZE=${VM_MEMORY_SIZE:-8192}
 DOWNLOAD_IPA=true
 CREATE_IPA_IMAGE=false
 INSPECT_NODES=true
@@ -50,27 +53,21 @@ INVENTORY_DHCP=false
 INVENTORY_DHCP_STATIC_IP=false
 WRITE_INTERFACES_FILE=true
 
-# Set BIFROST_INVENTORY_SOURCE
-export BIFROST_INVENTORY_SOURCE=/tmp/baremetal.json
-
-# settings for console access
+# Settings for console access
 export DIB_DEV_USER_PWDLESS_SUDO=yes
 export DIB_DEV_USER_PASSWORD=devuser
 
-# settings for distro: trusty/ubuntu-minimal, 7/centos7
+# Settings for distro: trusty/ubuntu-minimal, 7/centos7, 42.2/suse
 export DIB_OS_RELEASE=${DIB_OS_RELEASE:-trusty}
 export DIB_OS_ELEMENT=${DIB_OS_ELEMENT:-ubuntu-minimal}
 
-# for centos 7: "vim,less,bridge-utils,iputils,rsyslog,curl"
+# DIB OS packages
 export DIB_OS_PACKAGES=${DIB_OS_PACKAGES:-"vlan,vim,less,bridge-utils,language-pack-en,iputils-ping,rsyslog,curl"}
 
 # Additional dib elements
 export EXTRA_DIB_ELEMENTS=${EXTRA_DIB_ELEMENTS:-"openssh-server"}
 
 # Source Ansible
-# NOTE(TheJulia): Ansible stable-1.9 source method tosses an error deep
-# under the hood which -x will detect, so for this step, we need to suspend
-# and then re-enable the feature.
 set +x +o nounset
 $SCRIPT_HOME/env-setup.sh
 source ${ANSIBLE_INSTALL_ROOT}/ansible/hacking/env-setup
@@ -87,29 +84,29 @@ cd $BIFROST_HOME/playbooks
 
 # Syntax check of dynamic inventory test path
 for task in syntax-check list-tasks; do
-    ${ANSIBLE} -vvvv \
+    ${ANSIBLE} ${ANSIBLE_VERBOSITY} \
            -i inventory/localhost \
            test-bifrost-create-vm.yaml \
            --${task}
-    ${ANSIBLE} -vvvv \
+    ${ANSIBLE} ${ANSIBLE_VERBOSITY} \
            -i inventory/localhost \
            ${TEST_PLAYBOOK} \
            --${task} \
            -e testing_user=${TESTING_USER}
 done
 
-# Create the test VMS
-${ANSIBLE} -vvvv \
+# Create the VMS
+${ANSIBLE} ${ANSIBLE_VERBOSITY} \
        -i inventory/localhost \
        test-bifrost-create-vm.yaml \
        -e test_vm_num_nodes=${TEST_VM_NUM_NODES} \
        -e test_vm_memory_size=${VM_MEMORY_SIZE} \
        -e enable_venv=${ENABLE_VENV} \
        -e test_vm_domain_type=${VM_DOMAIN_TYPE} \
-       -e baremetal_json_file=${BAREMETAL_DATA_FILE}
+       -e ${INVENTORY_FILE_FORMAT}=${BAREMETAL_DATA_FILE}
 
-# Execute the installation and VM startup test.
-${ANSIBLE} -vvvv \
+# Execute the installation and VM startup test
+${ANSIBLE} ${ANSIBLE_VERBOSITY} \
     -i inventory/bifrost_inventory.py \
     ${TEST_PLAYBOOK} \
     -e use_cirros=${USE_CIRROS} \
@@ -128,9 +125,9 @@ ${ANSIBLE} -vvvv \
 EXITCODE=$?
 
 if [ $EXITCODE != 0 ]; then
-    echo "****************************"
-    echo "Test failed. See logs folder"
-    echo "****************************"
+    echo "************************************"
+    echo "Provisioning failed. See logs folder"
+    echo "************************************"
 fi
 
 exit $EXITCODE
diff --git a/prototypes/bifrost/scripts/osa-bifrost-deployment.sh b/prototypes/bifrost/scripts/osa-bifrost-deployment.sh
deleted file mode 100755 (executable)
index fb66ae9..0000000
+++ /dev/null
@@ -1,143 +0,0 @@
-#!/bin/bash
-# SPDX-license-identifier: Apache-2.0
-##############################################################################
-# Copyright (c) 2016 Ericsson AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-set -eux
-set -o pipefail
-export PYTHONUNBUFFERED=1
-SCRIPT_HOME="$(cd "$(dirname "$0")" && pwd)"
-BIFROST_HOME=$SCRIPT_HOME/..
-ANSIBLE_INSTALL_ROOT=${ANSIBLE_INSTALL_ROOT:-/opt/stack}
-ENABLE_VENV="false"
-USE_DHCP="false"
-USE_VENV="false"
-BUILD_IMAGE=true
-PROVISION_WAIT_TIMEOUT=${PROVISION_WAIT_TIMEOUT:-3600}
-
-# ensure the right inventory files is used based on branch
-CURRENT_BIFROST_BRANCH=$(git rev-parse --abbrev-ref HEAD)
-if [ $CURRENT_BIFROST_BRANCH = "master" ]; then
-    export BIFROST_INVENTORY_SOURCE=${BIFROST_INVENTORY_SOURCE:-'/tmp/baremetal.json'}
-else
-    export BIFROST_INVENTORY_SOURCE=${BIFROST_INVENTORY_SOURCE:-'/tmp/baremetal.csv'}
-fi
-
-# Set defaults for ansible command-line options to drive the different
-# tests.
-
-# NOTE(TheJulia/cinerama): The variables defined on the command line
-# for the default and DHCP tests are to drive the use of Cirros as the
-# deployed operating system, and as such sets the test user to cirros,
-# and writes a debian style interfaces file out to the configuration
-# drive as cirros does not support the network_info.json format file
-# placed in the configuration drive. The "build image" test does not
-# use cirros.
-
-TEST_VM_NUM_NODES=6
-export TEST_VM_NODE_NAMES="xcimaster controller00 controller01 controller02 compute00 compute01"
-export VM_DOMAIN_TYPE="kvm"
-# 8 vCPU, 60 GB HDD are minimum equipment
-export VM_CPU=${VM_CPU:-8}
-export VM_DISK=${VM_DISK:-100}
-export VM_DISK_CACHE=${VM_DISK_CACHE:-unsafe}
-TEST_PLAYBOOK="opnfv-virtual.yaml"
-USE_INSPECTOR=true
-USE_CIRROS=false
-TESTING_USER=root
-# seting the memory to 16 GB to make more easily success
-# 8 GB RAM is minimum equipment, but it work with at least 12 GB.
-VM_MEMORY_SIZE=${VM_MEMORY_SIZE:-16384}
-DOWNLOAD_IPA=true
-CREATE_IPA_IMAGE=false
-INSPECT_NODES=true
-INVENTORY_DHCP=false
-INVENTORY_DHCP_STATIC_IP=false
-WRITE_INTERFACES_FILE=true
-
-
-# settings for console access
-export DIB_DEV_USER_PWDLESS_SUDO=yes
-export DIB_DEV_USER_PASSWORD=devuser
-
-# settings for distro: trusty/ubuntu-minimal, 7/centos7
-export DIB_OS_RELEASE=${DIB_OS_RELEASE:-xenial}
-export DIB_OS_ELEMENT=${DIB_OS_ELEMENT:-ubuntu-minimal}
-
-# for centos 7: "vim,less,bridge-utils,iputils,rsyslog,curl"
-export DIB_OS_PACKAGES=${DIB_OS_PACKAGES:-"vlan,vim,less,bridge-utils,sudo,language-pack-en,iputils-ping,rsyslog,curl,python,debootstrap,ifenslave,ifenslave-2.6,lsof,lvm2,tcpdump,nfs-kernel-server,chrony"}
-
-# Additional dib elements
-export EXTRA_DIB_ELEMENTS=${EXTRA_DIB_ELEMENTS:-"openssh-server"}
-
-# Source Ansible
-# NOTE(TheJulia): Ansible stable-1.9 source method tosses an error deep
-# under the hood which -x will detect, so for this step, we need to suspend
-# and then re-enable the feature.
-set +x +o nounset
-$SCRIPT_HOME/env-setup.sh
-source ${ANSIBLE_INSTALL_ROOT}/ansible/hacking/env-setup
-ANSIBLE=$(which ansible-playbook)
-set -x -o nounset
-
-logs_on_exit() {
-    $SCRIPT_HOME/collect-test-info.sh
-}
-trap logs_on_exit EXIT
-
-# Change working directory
-cd $BIFROST_HOME/playbooks
-
-# Syntax check of dynamic inventory test path
-for task in syntax-check list-tasks; do
-    ${ANSIBLE} \
-           -i inventory/localhost \
-           test-bifrost-create-vm.yaml \
-           --${task}
-    ${ANSIBLE} \
-           -i inventory/localhost \
-           ${TEST_PLAYBOOK} \
-           --${task} \
-           -e testing_user=${TESTING_USER}
-done
-
-# Create the test VMS
-${ANSIBLE} \
-       -i inventory/localhost \
-       test-bifrost-create-vm.yaml \
-       -e test_vm_num_nodes=${TEST_VM_NUM_NODES} \
-       -e test_vm_memory_size=${VM_MEMORY_SIZE} \
-       -e enable_venv=${ENABLE_VENV} \
-       -e test_vm_domain_type=${VM_DOMAIN_TYPE}
-
-# Execute the installation and VM startup test.
-${ANSIBLE} \
-    -i inventory/bifrost_inventory.py \
-    ${TEST_PLAYBOOK} \
-    -e use_cirros=${USE_CIRROS} \
-    -e testing_user=${TESTING_USER} \
-    -e test_vm_num_nodes=${TEST_VM_NUM_NODES} \
-    -e inventory_dhcp=${INVENTORY_DHCP} \
-    -e inventory_dhcp_static_ip=${INVENTORY_DHCP_STATIC_IP} \
-    -e enable_venv=${ENABLE_VENV} \
-    -e enable_inspector=${USE_INSPECTOR} \
-    -e inspect_nodes=${INSPECT_NODES} \
-    -e download_ipa=${DOWNLOAD_IPA} \
-    -e create_ipa_image=${CREATE_IPA_IMAGE} \
-    -e write_interfaces_file=${WRITE_INTERFACES_FILE} \
-    -e ipv4_gateway=192.168.122.1 \
-    -e wait_timeout=${PROVISION_WAIT_TIMEOUT}
-EXITCODE=$?
-
-if [ $EXITCODE != 0 ]; then
-    echo "****************************"
-    echo "Test failed. See logs folder"
-    echo "****************************"
-fi
-
-exit $EXITCODE
diff --git a/prototypes/xci/config/env-vars b/prototypes/xci/config/env-vars
new file mode 100755 (executable)
index 0000000..106a179
--- /dev/null
@@ -0,0 +1,12 @@
+#-------------------------------------------------------------------------------
+# Do not change these settings if you are not developing for XCI Sandbox!
+#-------------------------------------------------------------------------------
+export OPNFV_RELENG_GIT_URL=https://gerrit.opnfv.org/gerrit/releng.git
+export OPENSTACK_BIFROST_GIT_URL=https://git.openstack.org/openstack/bifrost
+export OPENSTACK_OSA_GIT_URL=https://git.openstack.org/openstack/openstack-ansible
+export OPENSTACK_OSA_ETC_PATH=/etc/openstack_deploy
+export CLEAN_DIB_IMAGES=false
+export XCI_IP=192.168.122.2
+export XCI_ANSIBLE_PLAYBOOKS_PATH=$OPNFV_RELENG_PATH/prototypes/xci/file/$XCI_FLAVOR/playbooks
+export XCI_ANSIBLE_VARS_PATH=$OPNFV_RELENG_PATH/prototypes/xci/file/$XCI_FLAVOR/var
+export JOB_NAME=${JOB_NAME:-false}
diff --git a/prototypes/xci/config/pinned-versions b/prototypes/xci/config/pinned-versions
new file mode 100755 (executable)
index 0000000..2fe9eee
--- /dev/null
@@ -0,0 +1,26 @@
+#-------------------------------------------------------------------------------
+# Pinned Component Versions
+#-------------------------------------------------------------------------------
+# You are free to override these versions in user-vars to experiment with
+# different branches or with different commits but be aware that things might
+# not work as expected.
+#
+# It is important to be consistent between branches you use for OpenStack
+# projects OPNFV XCI uses.
+#
+# Examples:
+#   export OPENSTACK_BIFROST_VERSION="stable/ocata"
+#   export OPENSTACK_OSA_VERSION="stable/ocata"
+# or
+#   export OPENSTACK_BIFROST_VERSION="master"
+#   export OPENSTACK_OSA_VERSION="master"
+# or
+#   export OPENSTACK_BIFROST_VERSION="a87f7ce6c8725b3bbffec7b2efa1e466796848a9"
+#   export OPENSTACK_OSA_VERSION="4713cf45e11b4ebca9fbed25d1389854602213d8"
+#-------------------------------------------------------------------------------
+# use releng from master until the development work with the sandbox is complete
+export OPNFV_RELENG_VERSION="master"
+# HEAD of "master" as of 27.03.2017 - verified by OPNFV CI
+export OPENSTACK_BIFROST_VERSION="7417ff36e4b5fc4e2a6ee7d9dddb7287be20c37d"
+# HEAD of "master" as of 27.03.2017 - verified by OPNFV CI
+export OPENSTACK_OSA_VERSION="baba7b317a5898cd73b4a11c4ce364c7e2d3d77f"
diff --git a/prototypes/xci/config/user-vars b/prototypes/xci/config/user-vars
new file mode 100755 (executable)
index 0000000..f29dda6
--- /dev/null
@@ -0,0 +1,44 @@
+#-------------------------------------------------------------------------------
+# Set Deployment Flavor
+#-------------------------------------------------------------------------------
+# OPNFV XCI currently supports 4 different types of flavors:
+#   - all in one (aio): 1 xci VM which acts as controller and compute node
+#   - mini: 3 VMs, 1 xci VM, 1 controller, and 1 compute nodes
+#   - noha: 4 VMs, 1 xci VM, 1 controller, and 2 compute nodes
+#   - ha: 6 VMs, 1 xci VM, 3 controllers, and 2 compute nodes
+#
+# Apart from having different number of nodes, CPU, RAM, and disk allocations
+# also differ from each other. Please take a look at the env-vars files for
+# each of these flavors.
+#
+# Examples:
+#   export XCI_FLAVOR="aio"
+# or
+#   export XCI_FLAVOR="mini"
+# or
+#   export XCI_FLAVOR="noha"
+# or
+#   export XCI_FLAVOR="ha"
+#-------------------------------------------------------------------------------
+export XCI_FLAVOR=${XCI_FLAVOR:-aio}
+
+#-------------------------------------------------------------------------------
+# Set Paths to where git repositories of XCI Components will be cloned
+#-------------------------------------------------------------------------------
+# OPNFV XCI Sandbox is not verified to be used as non-root user as of yet so
+# changing these paths might break things.
+#-------------------------------------------------------------------------------
+export OPNFV_RELENG_PATH=/opt/releng
+export OPENSTACK_BIFROST_PATH=/opt/bifrost
+export OPENSTACK_OSA_PATH=/opt/openstack-ansible
+
+#-------------------------------------------------------------------------------
+# Configure some other stuff
+#-------------------------------------------------------------------------------
+# Set the verbosity for ansible
+#
+# Examples:
+#   ANSIBLE_VERBOSITY="-v"
+# or
+#   ANSIBLE_VERBOSITY="-vvvv"
+export ANSIBLE_VERBOSITY=${ANSIBLE_VERBOSITY-""}
diff --git a/prototypes/xci/file/aio/playbooks/configure-xcihost.yml b/prototypes/xci/file/aio/playbooks/configure-xcihost.yml
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/prototypes/xci/file/aio/playbooks/inventory b/prototypes/xci/file/aio/playbooks/inventory
new file mode 100644 (file)
index 0000000..9283e51
--- /dev/null
@@ -0,0 +1,2 @@
+[xciaio]
+xciaio ansible_ssh_host=192.168.122.2
diff --git a/prototypes/xci/file/aio/var/ubuntu.yml b/prototypes/xci/file/aio/var/ubuntu.yml
new file mode 100644 (file)
index 0000000..3a041b1
--- /dev/null
@@ -0,0 +1,7 @@
+---
+OPENSTACK_OSA_GIT_URL: "{{ lookup('env','OPENSTACK_OSA_GIT_URL') }}"
+OPENSTACK_OSA_PATH: "{{ lookup('env','OPENSTACK_OSA_PATH') }}"
+OPENSTACK_OSA_VERSION: "{{ lookup('env','OPENSTACK_OSA_VERSION') }}"
+OPENSTACK_OSA_ETC_PATH: "{{ lookup('env','OPENSTACK_OSA_ETC_PATH') }}"
+XCI_IP: "{{ lookup('env','XCI_IP') }}"
+multi_host: "False"
diff --git a/prototypes/xci/file/ha/configure-targethosts.yml b/prototypes/xci/file/ha/configure-targethosts.yml
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/prototypes/xci/file/ha/playbooks/configure-xcihost.yml b/prototypes/xci/file/ha/playbooks/configure-xcihost.yml
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/prototypes/xci/file/ha/playbooks/inventory b/prototypes/xci/file/ha/playbooks/inventory
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/prototypes/xci/file/ha/var/ubuntu.yml b/prototypes/xci/file/ha/var/ubuntu.yml
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/prototypes/xci/file/mini/configure-targethosts.yml b/prototypes/xci/file/mini/configure-targethosts.yml
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/prototypes/xci/file/mini/playbooks/configure-xcihost.yml b/prototypes/xci/file/mini/playbooks/configure-xcihost.yml
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/prototypes/xci/file/mini/playbooks/inventory b/prototypes/xci/file/mini/playbooks/inventory
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/prototypes/xci/file/mini/var/ubuntu.yml b/prototypes/xci/file/mini/var/ubuntu.yml
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/prototypes/xci/file/noha/configure-targethosts.yml b/prototypes/xci/file/noha/configure-targethosts.yml
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/prototypes/xci/file/noha/playbooks/configure-xcihost.yml b/prototypes/xci/file/noha/playbooks/configure-xcihost.yml
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/prototypes/xci/file/noha/playbooks/inventory b/prototypes/xci/file/noha/playbooks/inventory
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/prototypes/xci/file/noha/var/ubuntu.yml b/prototypes/xci/file/noha/var/ubuntu.yml
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/prototypes/xci/flavors/aio b/prototypes/xci/flavors/aio
new file mode 100644 (file)
index 0000000..48754e5
--- /dev/null
@@ -0,0 +1,18 @@
+#-------------------------------------------------------------------------------
+# XCI Flavor Configuration
+#-------------------------------------------------------------------------------
+# You are free to modify parts of the configuration to fit into your environment.
+# But before doing that, please ensure you checked other flavors to see if one
+# them can be used instead, saving you some time.
+#-------------------------------------------------------------------------------
+
+#-------------------------------------------------------------------------------
+# Configure VM Nodes
+#-------------------------------------------------------------------------------
+export TEST_VM_NUM_NODES=1
+export TEST_VM_NODE_NAMES=xci
+export VM_DOMAIN_TYPE=kvm
+export VM_CPU=8
+export VM_DISK=80
+export VM_MEMORY_SIZE=8192
+export VM_DISK_CACHE=unsafe
diff --git a/prototypes/xci/flavors/ha b/prototypes/xci/flavors/ha
new file mode 100644 (file)
index 0000000..8a045a3
--- /dev/null
@@ -0,0 +1,18 @@
+#-------------------------------------------------------------------------------
+# XCI Flavor Configuration
+#-------------------------------------------------------------------------------
+# You are free to modify parts of the configuration to fit into your environment.
+# But before doing that, please ensure you checked other flavors to see if one
+# them can be used instead, saving you some time.
+#-------------------------------------------------------------------------------
+
+#-------------------------------------------------------------------------------
+# Configure VM Nodes
+#-------------------------------------------------------------------------------
+export TEST_VM_NUM_NODES=6
+export TEST_VM_NODE_NAMES="xci controller00 controller01 controller02 compute00 compute01"
+export VM_DOMAIN_TYPE=kvm
+export VM_CPU=8
+export VM_DISK=80
+export VM_MEMORY_SIZE=16384
+export VM_DISK_CACHE=unsafe
diff --git a/prototypes/xci/flavors/mini b/prototypes/xci/flavors/mini
new file mode 100644 (file)
index 0000000..44b015d
--- /dev/null
@@ -0,0 +1,18 @@
+#-------------------------------------------------------------------------------
+# XCI Flavor Configuration
+#-------------------------------------------------------------------------------
+# You are free to modify parts of the configuration to fit into your environment.
+# But before doing that, please ensure you checked other flavors to see if one
+# them can be used instead, saving you some time.
+#-------------------------------------------------------------------------------
+
+#-------------------------------------------------------------------------------
+# Configure VM Nodes
+#-------------------------------------------------------------------------------
+export TEST_VM_NUM_NODES=3
+export TEST_VM_NODE_NAMES="xci controller00 compute00"
+export VM_DOMAIN_TYPE=kvm
+export VM_CPU=8
+export VM_DISK=80
+export VM_MEMORY_SIZE=8192
+export VM_DISK_CACHE=unsafe
diff --git a/prototypes/xci/flavors/noha b/prototypes/xci/flavors/noha
new file mode 100644 (file)
index 0000000..7f686e5
--- /dev/null
@@ -0,0 +1,18 @@
+#-------------------------------------------------------------------------------
+# XCI Flavor Configuration
+#-------------------------------------------------------------------------------
+# You are free to modify parts of the configuration to fit into your environment.
+# But before doing that, please ensure you checked other flavors to see if one
+# them can be used instead, saving you some time.
+#-------------------------------------------------------------------------------
+
+#-------------------------------------------------------------------------------
+# Configure VM Nodes
+#-------------------------------------------------------------------------------
+export TEST_VM_NUM_NODES=4
+export TEST_VM_NODE_NAMES="xci controller00 compute00 compute01"
+export VM_DOMAIN_TYPE=kvm
+export VM_CPU=8
+export VM_DISK=80
+export VM_MEMORY_SIZE=8192
+export VM_DISK_CACHE=unsafe
diff --git a/prototypes/xci/xci-deploy.sh b/prototypes/xci/xci-deploy.sh
new file mode 100755 (executable)
index 0000000..401c098
--- /dev/null
@@ -0,0 +1,76 @@
+#!/bin/bash
+set -o errexit
+set -o nounset
+set -o pipefail
+set -o xtrace
+
+# This script must run as root
+if [[ $(whoami) != "root" ]]; then
+    echo "Error: This script must be run as root!"
+    exit 1
+fi
+
+# find where are we
+XCI_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+# source pinned versions
+source $XCI_PATH/config/pinned-versions
+
+# source user vars
+source $XCI_PATH/config/user-vars
+
+# source flavor configuration
+source $XCI_PATH/flavors/$XCI_FLAVOR
+
+# source xci configuration
+source $XCI_PATH/config/env-vars
+
+# log info to console
+echo "Info: Starting XCI Deployment"
+echo "Info: Deployment parameters"
+echo "-------------------------------------------------------------------------"
+echo "xci flavor: $XCI_FLAVOR"
+echo "opnfv/releng version: $OPNFV_RELENG_VERSION"
+echo "openstack/bifrost version: $OPENSTACK_BIFROST_VERSION"
+echo "openstack/openstack-ansible version: $OPENSTACK_OSA_VERSION"
+echo "-------------------------------------------------------------------------"
+
+#-------------------------------------------------------------------------------
+# Cleanup the leftovers from the previous deployment
+#-------------------------------------------------------------------------------
+echo "Info: Cleaning up the previous deployment"
+$XCI_PATH/../bifrost/scripts/destroy-env.sh > /dev/null 2>&1
+/bin/rm -rf /opt/releng /opt/bifrost /opt/openstack-ansible /opt/stack
+
+#-------------------------------------------------------------------------------
+# Clone the repositories and checkout the versions
+#-------------------------------------------------------------------------------
+echo "Info: Cloning repositories and checking out versions"
+git clone --quiet $OPNFV_RELENG_GIT_URL $OPNFV_RELENG_PATH && \
+    cd $OPNFV_RELENG_PATH
+echo "Info: Cloned opnfv/releng. HEAD currently points at"
+echo "      $(git show --oneline -s --pretty=format:'%h - %s (%cr) <%an>')"
+git clone --quiet $OPENSTACK_BIFROST_GIT_URL $OPENSTACK_BIFROST_PATH && \
+    cd $OPENSTACK_BIFROST_PATH
+echo "Info: Cloned openstack/bifrost. HEAD currently points at"
+echo "      $(git show --oneline -s --pretty=format:'%h - %s (%cr) <%an>')"
+
+#-------------------------------------------------------------------------------
+# Combine opnfv and upstream scripts/playbooks
+#-------------------------------------------------------------------------------
+echo "Info: Combining opnfv/releng and opestack/bifrost scripts/playbooks"
+/bin/cp -rf $OPNFV_RELENG_PATH/prototypes/bifrost/* $OPENSTACK_BIFROST_PATH/
+
+#-------------------------------------------------------------------------------
+# Start provisioning VM nodes
+#-------------------------------------------------------------------------------
+echo "Info: Starting provisining VM nodes using openstack/bifrost"
+echo "      This might take between 10 to 20 minutes depending on the flavor and the host"
+echo "-------------------------------------------------------------------------"
+cd $OPENSTACK_BIFROST_PATH
+STARTTIME=$(date +%s)
+./scripts/bifrost-provision.sh
+ENDTIME=$(date +%s)
+echo "-----------------------------------------------------------------------"
+echo "Info: VM nodes are provisioned!"
+echo "Info: It took $(($ENDTIME - $STARTTIME)) seconds to provising the VM nodes"