Merge "Bottlenecks testcase online"
authormei mei <meimei@huawei.com>
Fri, 16 Mar 2018 03:53:27 +0000 (03:53 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Fri, 16 Mar 2018 03:53:27 +0000 (03:53 +0000)
.gitmodules [new file with mode: 0644]
jjb/apex/apex-deploy.sh
jjb/apex/apex-download-artifact.sh
jjb/apex/apex.yml
jjb/apex/apex.yml.j2
jjb/dovetail/dovetail-run.sh
jjb/global-jjb [new submodule]
jjb/xci/xci-run-functest.sh
jjb/xci/xci-verify-jobs.yml
modules/opnfv/deployment/compass/adapter_container.py [new file with mode: 0644]
modules/opnfv/deployment/factory.py

diff --git a/.gitmodules b/.gitmodules
new file mode 100644 (file)
index 0000000..07b28be
--- /dev/null
@@ -0,0 +1,3 @@
+[submodule "jjb/global-jjb"]
+       path = jjb/global-jjb
+       url = https://github.com/lfit/releng-global-jjb
index b8ae75a..123db3e 100755 (executable)
@@ -31,7 +31,9 @@ elif [[ "$DEPLOY_SCENARIO" == *gate* ]]; then
 fi
 
 # Dev or RPM/ISO build
-if [[ "$ARTIFACT_VERSION" =~ dev ]]; then
+# For upstream deployments we currently only use git repo and not RPM
+# Need to decide after Fraser if we want to use RPM or not for upstream
+if [[ "$ARTIFACT_VERSION" =~ dev || "$DEPLOY_SCENARIO" =~ "upstream" ]]; then
   # Settings for deploying from git workspace
   DEPLOY_SETTINGS_DIR="${WORKSPACE}/config/deploy"
   NETWORK_SETTINGS_DIR="${WORKSPACE}/config/network"
@@ -134,6 +136,11 @@ else
   DEPLOY_CMD="${DEPLOY_CMD} -i ${INVENTORY_FILE}"
 fi
 
+if [[ "$DEPLOY_SCENARIO" =~ "upstream" ]]; then
+  echo "Upstream deployment detected"
+  DEPLOY_CMD="${DEPLOY_CMD} --upstream"
+fi
+
 if [ "$IPV6_FLAG" == "True" ]; then
   NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_v6.yaml"
 elif echo ${DEPLOY_SCENARIO} | grep fdio; then
index 68baf59..c12406c 100755 (executable)
@@ -10,7 +10,10 @@ echo
 
 [[ -d $BUILD_DIRECTORY ]] || mkdir -p $BUILD_DIRECTORY
 
-if [[ "$ARTIFACT_VERSION" =~ dev ]]; then
+# if upstream we do not need to download anything
+if [[ "$DEPLOY_SCENARIO" =~ upstream ]]; then
+  echo "Upstream deployment detected, skipping download artifact"
+elif [[ "$ARTIFACT_VERSION" =~ dev ]]; then
   # dev build
   GERRIT_PATCHSET_NUMBER=$(echo $GERRIT_REFSPEC | grep -Eo '[0-9]+$')
   export OPNFV_ARTIFACT_VERSION="dev${GERRIT_CHANGE_NUMBER}_${GERRIT_PATCHSET_NUMBER}"
index cf29b92..e19a90e 100644 (file)
               abort-all-job: false
               git-revision: false
       - multijob:
-          name: Dovetail
+          name: Dovetail-proposed_tests
           condition: ALWAYS
           projects:
             - name: 'dovetail-apex-baremetal-proposed_tests-{scenario_stream}'
               predefined-parameters:
                 DEPLOY_SCENARIO=$DEPLOY_SCENARIO
               kill-phase-on: NEVER
-              enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|odl-bgpvpn)-ha/"
+              enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|odl-bgpvpn)-ha/
+                                 && $BUILD_NUMBER % 2 == 1"
+              abort-all-job: false
+              git-revision: false
+      - multijob:
+          name: Dovetail-default
+          condition: ALWAYS
+          projects:
+            - name: 'dovetail-apex-baremetal-default-{scenario_stream}'
+              node-parameters: true
+              current-parameters: false
+              predefined-parameters:
+                DEPLOY_SCENARIO=$DEPLOY_SCENARIO
+              kill-phase-on: NEVER
+              enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|odl-bgpvpn)-ha/
+                                 && $BUILD_NUMBER % 2 == 0"
               abort-all-job: false
               git-revision: false
       - multijob:
index ab65c4e..ecc6f27 100644 (file)
               abort-all-job: false
               git-revision: false
       - multijob:
-          name: Dovetail
+          name: Dovetail-proposed_tests
           condition: ALWAYS
           projects:
             - name: 'dovetail-apex-baremetal-proposed_tests-{scenario_stream}'
               predefined-parameters:
                 DEPLOY_SCENARIO=$DEPLOY_SCENARIO
               kill-phase-on: NEVER
-              enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|odl-bgpvpn)-ha/"
+              enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|odl-bgpvpn)-ha/
+                                 && $BUILD_NUMBER % 2 == 1"
+              abort-all-job: false
+              git-revision: false
+      - multijob:
+          name: Dovetail-default
+          condition: ALWAYS
+          projects:
+            - name: 'dovetail-apex-baremetal-default-{scenario_stream}'
+              node-parameters: true
+              current-parameters: false
+              predefined-parameters:
+                DEPLOY_SCENARIO=$DEPLOY_SCENARIO
+              kill-phase-on: NEVER
+              enable-condition: "def m = '$DEPLOY_SCENARIO' ==~ /os-(nosdn-nofeature|odl-bgpvpn)-ha/
+                                 && $BUILD_NUMBER % 2 == 0"
               abort-all-job: false
               git-revision: false
       - multijob:
index ec879e3..a5a95f4 100755 (executable)
@@ -24,6 +24,9 @@ mkdir -p ${DOVETAIL_HOME}
 DOVETAIL_CONFIG=${DOVETAIL_HOME}/pre_config
 mkdir -p ${DOVETAIL_CONFIG}
 
+DOVETAIL_IMAGES=${DOVETAIL_HOME}/images
+mkdir -p ${DOVETAIL_IMAGES}
+
 ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
 
 sshkey=""
@@ -189,7 +192,7 @@ if [[ ! -f ${ubuntu_image} ]]; then
     echo "Download image ubuntu-16.04-server-cloudimg-amd64-disk1.img ..."
     wget -q -nc http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${image_path}
 fi
-sudo cp ${ubuntu_image} ${DOVETAIL_CONFIG}
+sudo cp ${ubuntu_image} ${DOVETAIL_IMAGES}
 
 # functest needs to download this image first before running
 cirros_image=${image_path}/cirros-0.3.5-x86_64-disk.img
@@ -197,7 +200,7 @@ if [[ ! -f ${cirros_image} ]]; then
     echo "Download image cirros-0.3.5-x86_64-disk.img ..."
     wget -q -nc http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img -P ${image_path}
 fi
-sudo cp ${cirros_image} ${DOVETAIL_CONFIG}
+sudo cp ${cirros_image} ${DOVETAIL_IMAGES}
 
 # snaps_smoke test case needs to download this image first before running
 ubuntu14_image=${image_path}/ubuntu-14.04-server-cloudimg-amd64-disk1.img
@@ -205,7 +208,7 @@ if [[ ! -f ${ubuntu14_image} ]]; then
     echo "Download image ubuntu-14.04-server-cloudimg-amd64-disk1.img ..."
     wget -q -nc https://cloud-images.ubuntu.com/releases/14.04/release/ubuntu-14.04-server-cloudimg-amd64-disk1.img -P ${image_path}
 fi
-sudo cp ${ubuntu14_image} ${DOVETAIL_CONFIG}
+sudo cp ${ubuntu14_image} ${DOVETAIL_IMAGES}
 
 # cloudify_ims test case needs to download these 2 images first before running
 cloudify_image=${image_path}/cloudify-manager-premium-4.0.1.qcow2
@@ -213,13 +216,13 @@ if [[ ! -f ${cloudify_image} ]]; then
     echo "Download image cloudify-manager-premium-4.0.1.qcow2 ..."
     wget -q -nc http://repository.cloudifysource.org/cloudify/4.0.1/sp-release/cloudify-manager-premium-4.0.1.qcow2 -P ${image_path}
 fi
-sudo cp ${cloudify_image} ${DOVETAIL_CONFIG}
+sudo cp ${cloudify_image} ${DOVETAIL_IMAGES}
 trusty_image=${image_path}/trusty-server-cloudimg-amd64-disk1.img
 if [[ ! -f ${trusty_image} ]]; then
     echo "Download image trusty-server-cloudimg-amd64-disk1.img ..."
     wget -q -nc http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img -P ${image_path}
 fi
-sudo cp ${trusty_image} ${DOVETAIL_CONFIG}
+sudo cp ${trusty_image} ${DOVETAIL_IMAGES}
 
 opts="--privileged=true -id"
 
@@ -236,10 +239,8 @@ fi
 echo "Dovetail: Pulling image ${DOCKER_REPO}:${DOCKER_TAG}"
 docker pull ${DOCKER_REPO}:$DOCKER_TAG >$redirect
 
-env4bgpvpn="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP}"
-
 cmd="docker run ${opts} -e DOVETAIL_HOME=${DOVETAIL_HOME} ${docker_volume} ${dovetail_home_volume} \
-     ${sshkey} ${env4bgpvpn} ${DOCKER_REPO}:${DOCKER_TAG} /bin/bash"
+     ${sshkey} ${DOCKER_REPO}:${DOCKER_TAG} /bin/bash"
 echo "Dovetail: running docker run command: ${cmd}"
 ${cmd} >${redirect}
 sleep 5
diff --git a/jjb/global-jjb b/jjb/global-jjb
new file mode 160000 (submodule)
index 0000000..779110b
--- /dev/null
@@ -0,0 +1 @@
+Subproject commit 779110b5cd63f3eabb63598a1be79d9b9ba85464
index 60b48cf..1f616de 100755 (executable)
@@ -51,7 +51,7 @@ if ! sed -n "/^- scenario: $DEPLOY_SCENARIO$/,/^$/p" $OPNFV_SCENARIO_REQUIREMENT
     exit 0
 fi
 
-ssh -F $HOME/.ssh/${DISTRO}-xci-vm-config ${DISTRO}_xci_vm "cd releng-xci/xci && PATH=/home/devuser/.local/bin:$PATH ansible-playbook -i installer/osa/files/$XCI_FLAVOR/inventory playbooks/prepare-functest.yml"
+ssh -F $HOME/.ssh/${DISTRO}-xci-vm-config ${DISTRO}_xci_vm_opnfv "cd /root/releng-xci/xci/playbooks && ansible-playbook -i inventory prepare-functest.yml"
 echo "Running functest"
 ssh -F $HOME/.ssh/${DISTRO}-xci-vm-config ${DISTRO}_xci_vm_opnfv "/root/run-functest.sh"
 echo "Functest log"
index 383af2f..d78dc82 100644 (file)
               forbidden-file-paths:
                 - compare-type: ANT
                   pattern: 'xci/scripts/vm/**'
+                - compare-type: ANT
+                  pattern: 'docs/**'
+                - compare-type: ANT
+                  pattern: 'prototypes/**'
+                - compare-type: ANT
+                  pattern: 'upstream/**'
             - project-compare-type: 'REG_EXP'
               project-pattern: 'sfc|sdnvpn'
               branches:
diff --git a/modules/opnfv/deployment/compass/adapter_container.py b/modules/opnfv/deployment/compass/adapter_container.py
new file mode 100644 (file)
index 0000000..1713fe2
--- /dev/null
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2018 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+
+from opnfv.deployment import manager
+from opnfv.utils import opnfv_logger as logger
+from opnfv.utils import ssh_utils
+
+import yaml
+import os
+
+logger = logger.Logger(__name__).getLogger()
+
+
+class ContainerizedCompassAdapter():
+
+    def __init__(self, installer_ip, installer_user, pkey_file):
+
+        self.installer = 'compass'
+        self.installer_ip = installer_ip
+        self.installer_user = installer_user
+        self.pkey_file = pkey_file
+        self.DST_PATH_UC = "/tmp/openstack_user_config.yml"
+        self.nodes = []
+        self.ROLES = {}
+
+        if pkey_file is not None and not os.path.isfile(pkey_file):
+            raise Exception(
+                'The private key file %s does not exist!' % pkey_file)
+
+    def _find_nodes(self, file):
+        nodes = file['compute_hosts']
+        for compute in nodes:
+            self.ROLES[compute] = 'compute'
+        controllers = file['haproxy_hosts']
+        for controller in controllers:
+            nodes[controller] = controllers[controller]
+            self.ROLES[controller] = 'controller'
+        return nodes
+
+    def _process_nodes(self, raw_nodes):
+        nodes = []
+
+        for node in raw_nodes:
+            name = node
+            ip = raw_nodes[node]['ip']
+            status = 'active'
+            id = None
+            if self.ROLES[node] == 'controller':
+                roles = 'controller'
+            elif self.ROLES[node] == 'compute':
+                roles = 'compute'
+            ssh_client = ssh_utils.get_ssh_client(hostname=ip,
+                                                  username=self.installer_user,
+                                                  pkey_file=self.pkey_file)
+            node = manager.Node(id, ip, name, status, roles, ssh_client)
+            nodes.append(node)
+
+        return nodes
+
+    def get_nodes(self, options=None):
+        try:
+            # if we have retrieved previously all the nodes, don't do it again
+            # This fails the first time when the constructor calls this method
+            # therefore the try/except
+            if len(self.nodes) > 0:
+                return self.nodes
+        except:
+            pass
+
+        with open(self.DST_PATH_UC, 'r') as stream:
+            try:
+                file = yaml.load(stream)
+                raw_nodes = self._find_nodes(file)
+            except yaml.YAMLError as exc:
+                logger.error(exc)
+        self.nodes = self._process_nodes(raw_nodes)
+        return self.nodes
index 2788e5e..1fd8d44 100644 (file)
@@ -9,7 +9,7 @@
 
 
 from opnfv.deployment.apex import adapter as apex_adapter
-from opnfv.deployment.compass import adapter as compass_adapter
+from opnfv.deployment.compass import adapter_container as compass_adapter
 from opnfv.deployment.fuel import adapter as fuel_adapter
 from opnfv.deployment.osa import adapter as osa_adapter
 from opnfv.deployment.daisy import adapter as daisy_adapter
@@ -44,10 +44,10 @@ class Factory(object):
                                             installer_user=installer_user,
                                             installer_pwd=installer_pwd)
         elif installer.lower() == "compass":
-            return compass_adapter.CompassAdapter(
+            return compass_adapter.ContainerizedCompassAdapter(
                 installer_ip=installer_ip,
                 installer_user=installer_user,
-                installer_pwd=installer_pwd)
+                pkey_file=pkey_file)
         elif installer.lower() == "osa":
             return osa_adapter.OSAAdapter(installer_ip=installer_ip,
                                           installer_user=installer_user,