From: Aric Gardner Date: Mon, 20 Feb 2017 16:07:56 +0000 (+0000) Subject: Merge "Initial add of VES job file" X-Git-Tag: danube.1.0~244 X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=commitdiff_plain;h=052bb66ad662a0a843c347bc32be03f11bc70f07;hp=19c85524b6d411909148d2281b58332848d55323;p=releng.git Merge "Initial add of VES job file" --- diff --git a/jjb/apex/apex-deploy.sh b/jjb/apex/apex-deploy.sh index dc70488e7..f2dc9945d 100755 --- a/jjb/apex/apex-deploy.sh +++ b/jjb/apex/apex-deploy.sh @@ -189,6 +189,9 @@ if [[ "$JOB_NAME" == *virtual* ]]; then NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings.yaml" fi DEPLOY_CMD="${DEPLOY_CMD} -v" + if [[ "${DEPLOY_SCENARIO}" =~ fdio|ovs ]]; then + DEPLOY_CMD="${DEPLOY_CMD} --virtual-ram 14" + fi if [[ "$JOB_NAME" == *csit* ]]; then DEPLOY_CMD="${DEPLOY_CMD} -e csit-environment.yaml --virtual-computes 2" fi diff --git a/jjb/bottlenecks/bottlenecks-ci-jobs.yml b/jjb/bottlenecks/bottlenecks-ci-jobs.yml index a9ccd6977..2779e316b 100644 --- a/jjb/bottlenecks/bottlenecks-ci-jobs.yml +++ b/jjb/bottlenecks/bottlenecks-ci-jobs.yml @@ -72,7 +72,8 @@ suite: - 'rubbos' - 'vstf' - - 'posca' + - 'posca_stress_traffic' + - 'posca_stress_ping' jobs: - 'bottlenecks-{installer}-{suite}-{pod}-daily-{stream}' @@ -137,65 +138,14 @@ - builder: name: bottlenecks-env-cleanup builders: - - shell: | - #!/bin/bash - set -e - [[ $GERRIT_REFSPEC_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null" - - echo "Bottlenecks: docker containers/images cleaning up" - if [[ ! -z $(docker ps -a | grep opnfv/bottlenecks) ]]; then - echo "removing existing opnfv/bottlenecks containers" - docker ps -a | grep opnfv/bottlenecks | awk '{print $1}' | xargs docker rm -f >$redirect - fi - - if [[ ! -z $(docker images | grep opnfv/bottlenecks) ]]; then - echo "Bottlenecks: docker images to remove:" - docker images | head -1 && docker images | grep opnfv/bottlenecks - image_tags=($(docker images | grep opnfv/bottlenecks | awk '{print $2}')) - for tag in "${image_tags[@]}"; do - echo "Removing docker image opnfv/bottlenecks:$tag..." - docker rmi opnfv/bottlenecks:$tag >$redirect - done - fi + - shell: + !include-raw: ./bottlenecks-cleanup.sh - builder: name: bottlenecks-run-suite builders: - - shell: | - #!/bin/bash - set -e - [[ $GERRIT_REFSPEC_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null" - - echo "Bottlenecks: to pull image opnfv/bottlenecks:${DOCKER_TAG}" - docker pull opnfv/bottlenecks:$DOCKER_TAG >${redirect} - - echo "Bottlenecks: docker start running" - opts="--privileged=true -id" - envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \ - -e NODE_NAME=${NODE_NAME} -e EXTERNAL_NET=${EXTERNAL_NETWORK} \ - -e BOTTLENECKS_BRANCH=${BOTTLENECKS_BRANCH} -e GERRIT_REFSPEC_DEBUG=${GERRIT_REFSPEC_DEBUG} \ - -e BOTTLENECKS_DB_TARGET=${BOTTLENECKS_DB_TARGET} -e PACKAGE_URL=${PACKAGE_URL}" - cmd="sudo docker run ${opts} ${envs} opnfv/bottlenecks:${DOCKER_TAG} /bin/bash" - echo "Bottlenecks: docker cmd running ${cmd}" - ${cmd} >${redirect} - - echo "Bottlenecks: obtain docker id" - container_id=$(docker ps | grep "opnfv/bottlenecks:${DOCKER_TAG}" | awk '{print $1}' | head -1) - if [ -z ${container_id} ]; then - echo "Cannot find opnfv/bottlenecks container ID ${container_id}. Please check if it exists." - docker ps -a - exit 1 - fi - - echo "Bottlenecks: to prepare openstack environment" - prepare_env="${REPO_DIR}/ci/prepare_env.sh" - echo "Bottlenecks: docker cmd running: ${prepare_env}" - sudo docker exec ${container_id} ${prepare_env} - - echo "Bottlenecks: to run testsuite ${SUITE_NAME}" - run_testsuite="${REPO_DIR}/run_tests.sh -s ${SUITE_NAME}" - echo "Bottlenecks: docker cmd running: ${run_testsuite}" - sudo docker exec ${container_id} ${run_testsuite} + - shell: + !include-raw: ./bottlenecks-run-suite.sh #################### # parameter macros diff --git a/jjb/bottlenecks/bottlenecks-cleanup.sh b/jjb/bottlenecks/bottlenecks-cleanup.sh new file mode 100644 index 000000000..0ba042318 --- /dev/null +++ b/jjb/bottlenecks/bottlenecks-cleanup.sh @@ -0,0 +1,111 @@ +#!/bin/bash +set -e +[[ $GERRIT_REFSPEC_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null" + +BOTTLENECKS_IMAGE=opnfv/bottlenecks +echo "Bottlenecks: docker containers/images cleaning up" + +dangling_images=($(docker images -f "dangling=true" | grep $BOTTLENECKS_IMAGE | awk '{print $3}')) +if [[ -n $dangling_images ]]; then + echo "Removing $BOTTLENECKS_IMAGE: dangling images and their containers" + docker images | head -1 && docker images | grep $dangling_images + for image_id in "${dangling_images[@]}"; do + echo "Bottlenecks: Removing dangling image $image_id" + docker rmi -f $image_id >${redirect} + done +fi + +for image_id in "${dangling_images[@]}"; do + if [[ -n $(docker ps -a | grep $image_id) ]]; then + echo "Bottlenecks: Removing containers associated with dangling image: $image_id" + docker ps -a | head -1 && docker ps -a | grep $image_id + docker ps -a | grep $image_id | awk '{print $1}'| xargs docker rm -f >${redirect} + fi +done + +if [[ -n $(docker ps -a | grep $BOTTLENECKS_IMAGE) ]]; then + echo "Removing existing $BOTTLENECKS_IMAGE containers" + docker ps -a | grep $BOTTLENECKS_IMAGE | awk '{print $1}' | xargs docker rm -f >$redirect +fi + +if [[ -n $(docker images | grep $BOTTLENECKS_IMAGE) ]]; then + echo "Bottlenecks: docker images to remove:" + docker images | head -1 && docker images | grep $BOTTLENECKS_IMAGE + image_tags=($(docker images | grep $BOTTLENECKS_IMAGE | awk '{print $2}')) + for tag in "${image_tags[@]}"; do + echo "Removing docker image $BOTTLENECKS_IMAGE:$tag..." + docker rmi $BOTTLENECKS_IMAGE:$tag >$redirect + done +fi + +echo "Yardstick: docker containers/images cleaning up" +YARDSTICK_IMAGE=opnfv/yardstick + +dangling_images=($(docker images -f "dangling=true" | grep $YARDSTICK_IMAGE | awk '{print $3}')) +if [[ -n $dangling_images ]]; then + echo "Removing $YARDSTICK_IMAGE: dangling images and their containers" + docker images | head -1 && docker images | grep $dangling_images + for image_id in "${dangling_images[@]}"; do + echo "Yardstick: Removing dangling image $image_id" + docker rmi -f $image_id >${redirect} + done +fi + +for image_id in "${dangling_images[@]}"; do + if [[ -n $(docker ps -a | grep $image_id) ]]; then + echo "Yardstick: Removing containers associated with dangling image: $image_id" + docker ps -a | head -1 && docker ps -a | grep $image_id + docker ps -a | grep $image_id | awk '{print $1}'| xargs docker rm -f >${redirect} + fi +done + +if [[ -n $(docker ps -a | grep $YARDSTICK_IMAGE) ]]; then + echo "Removing existing $YARDSTICK_IMAGE containers" + docker ps -a | grep $YARDSTICK_IMAGE | awk '{print $1}' | xargs docker rm -f >$redirect +fi + +if [[ -n $(docker images | grep $YARDSTICK_IMAGE) ]]; then + echo "Yardstick: docker images to remove:" + docker images | head -1 && docker images | grep $YARDSTICK_IMAGE + image_tags=($(docker images | grep $YARDSTICK_IMAGE | awk '{print $2}')) + for tag in "${image_tags[@]}"; do + echo "Removing docker image $YARDSTICK_IMAGE:$tag..." + docker rmi $YARDSTICK_IMAGE:$tag >$redirect + done +fi + +echo "InfluxDB: docker containers/images cleaning up" +INFLUXDB_IMAGE=tutum/influxdb + +dangling_images=($(docker images -f "dangling=true" | grep $INFLUXDB_IMAGE | awk '{print $3}')) +if [[ -n $dangling_images ]]; then + echo "Removing $INFLUXDB_IMAGE: dangling images and their containers" + docker images | head -1 && docker images | grep $dangling_images + for image_id in "${dangling_images[@]}"; do + echo "InfluxDB: Removing dangling image $image_id" + docker rmi -f $image_id >${redirect} + done +fi + +for image_id in "${dangling_images[@]}"; do + if [[ -n $(docker ps -a | grep $image_id) ]]; then + echo "InfluxDB: Removing containers associated with dangling image: $image_id" + docker ps -a | head -1 && docker ps -a | grep $image_id + docker ps -a | grep $image_id | awk '{print $1}'| xargs docker rm -f >${redirect} + fi +done + +if [[ -n $(docker ps -a | grep $INFLUXDB_IMAGE) ]]; then + echo "Removing existing $INFLUXDB_IMAGE containers" + docker ps -a | grep $INFLUXDB_IMAGE | awk '{print $1}' | xargs docker rm -f >$redirect +fi + +if [[ -n $(docker images | grep $INFLUXDB_IMAGE) ]]; then + echo "InfluxDB: docker images to remove:" + docker images | head -1 && docker images | grep $INFLUXDB_IMAGE + image_tags=($(docker images | grep $INFLUXDB_IMAGE | awk '{print $2}')) + for tag in "${image_tags[@]}"; do + echo "Removing docker image $INFLUXDB_IMAGE:$tag..." + docker rmi $INFLUXDB_IMAGE:$tag >$redirect + done +fi \ No newline at end of file diff --git a/jjb/bottlenecks/bottlenecks-project-jobs.yml b/jjb/bottlenecks/bottlenecks-project-jobs.yml index 12ea31b13..a0abb9331 100644 --- a/jjb/bottlenecks/bottlenecks-project-jobs.yml +++ b/jjb/bottlenecks/bottlenecks-project-jobs.yml @@ -29,7 +29,8 @@ suite: - 'rubbos' - 'vstf' - - 'posca' + - 'posca_stress_traffic' + - 'posca_stress_ping' ################################ # job templates diff --git a/jjb/bottlenecks/bottlenecks-run-suite.sh b/jjb/bottlenecks/bottlenecks-run-suite.sh new file mode 100644 index 000000000..f69463fc2 --- /dev/null +++ b/jjb/bottlenecks/bottlenecks-run-suite.sh @@ -0,0 +1,65 @@ +#!/bin/bash +#set -e +[[ $GERRIT_REFSPEC_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null" +BOTTLENECKS_IMAGE=opnfv/bottlenecks + +if [[ $SUITE_NAME == rubbos || $SUITE_NAME == vstf ]]; then + echo "Bottlenecks: to pull image $BOTTLENECKS_IMAGE:${DOCKER_TAG}" + docker pull $BOTTLENECKS_IMAGE:$DOCKER_TAG >${redirect} + + echo "Bottlenecks: docker start running" + opts="--privileged=true -id" + envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \ + -e NODE_NAME=${NODE_NAME} -e EXTERNAL_NET=${EXTERNAL_NETWORK} \ + -e BOTTLENECKS_BRANCH=${BOTTLENECKS_BRANCH} -e GERRIT_REFSPEC_DEBUG=${GERRIT_REFSPEC_DEBUG} \ + -e BOTTLENECKS_DB_TARGET=${BOTTLENECKS_DB_TARGET} -e PACKAGE_URL=${PACKAGE_URL}" + cmd="sudo docker run ${opts} ${envs} $BOTTLENECKS_IMAGE:${DOCKER_TAG} /bin/bash" + echo "Bottlenecks: docker cmd running ${cmd}" + ${cmd} >${redirect} + + echo "Bottlenecks: obtain docker id" + container_id=$(docker ps | grep "$BOTTLENECKS_IMAGE:${DOCKER_TAG}" | awk '{print $1}' | head -1) + if [ -z ${container_id} ]; then + echo "Cannot find $BOTTLENECKS_IMAGE container ID ${container_id}. Please check if it exists." + docker ps -a + exit 1 + fi + + echo "Bottlenecks: to prepare openstack environment" + prepare_env="${REPO_DIR}/ci/prepare_env.sh" + echo "Bottlenecks: docker cmd running: ${prepare_env}" + sudo docker exec ${container_id} ${prepare_env} + + echo "Bottlenecks: to run testsuite ${SUITE_NAME}" + run_testsuite="${REPO_DIR}/run_tests.sh -s ${SUITE_NAME}" + echo "Bottlenecks: docker cmd running: ${run_testsuite}" + sudo docker exec ${container_id} ${run_testsuite} +else + echo "Bottlenecks: installing POSCA docker-compose" + if [ -d usr/local/bin/docker-compose ]; then + rm -rf usr/local/bin/docker-compose + fi + curl -L https://github.com/docker/compose/releases/download/1.11.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose + chmod +x /usr/local/bin/docker-compose + + echo "Bottlenecks: composing up dockers" + cd $WORKSPACE + docker-compose -f $WORKSPACE/docker/bottleneck-compose/docker-compose.yml up -d + + echo "Bottlenecks: running traffic stress/factor testing in posca testsuite " + POSCA_SCRIPT=/home/opnfv/bottlenecks/testsuites/posca + if [[ $SUITE_NAME == posca_stress_traffic ]]; then + TEST_CASE=posca_factor_system_bandwidth + echo "Bottlenecks: pulling tutum/influxdb for yardstick" + docker pull tutum/influxdb:0.13 + sleep 5 + docker exec bottleneckcompose_bottlenecks_1 python ${POSCA_SCRIPT}/run_posca.py testcase $TEST_CASE + elif [[ $SUITE_NAME == posca_stress_ping ]]; then + TEST_CASE=posca_stress_ping + sleep 5 + docker exec bottleneckcompose_bottlenecks_1 python ${POSCA_SCRIPT}/run_posca.py testcase $TEST_CASE + fi + + echo "Bottlenecks: cleaning up docker-compose images and dockers" + docker-compose -f $WORKSPACE/docker/bottleneck-compose/docker-compose.yml down --rmi all +fi \ No newline at end of file diff --git a/jjb/copper/copper.yml b/jjb/copper/copper.yml index ea1af473c..b65466e01 100644 --- a/jjb/copper/copper.yml +++ b/jjb/copper/copper.yml @@ -64,5 +64,4 @@ set -o nounset set -o pipefail - cd $WORKSPACE/ci shellcheck -f tty tests/*.sh diff --git a/jjb/daisy4nfv/daisy-deploy.sh b/jjb/daisy4nfv/daisy-deploy.sh new file mode 100755 index 000000000..b303c2c05 --- /dev/null +++ b/jjb/daisy4nfv/daisy-deploy.sh @@ -0,0 +1,60 @@ +#!/bin/bash +set -o nounset +set -o pipefail + +echo "--------------------------------------------------------" +echo "This is $INSTALLER_TYPE deploy job!" +echo "--------------------------------------------------------" + +DEPLOY_SCENARIO=${DEPLOY_SCENARIO:-"os-nosdn-nofeature-ha"} +BRIDGE=${BRIDGE:-pxebr} +LAB_NAME=${NODE_NAME/-*} +POD_NAME=${NODE_NAME/*-} +deploy_ret=0 + +if [[ ! "$NODE_NAME" =~ "-virtual" ]] && [[ ! "$LAB_NAME" =~ (zte) ]]; then + echo "Unsupported lab $LAB_NAME for now, Cannot continue!" + exit $deploy_ret +fi + +# clone the securedlab repo +cd $WORKSPACE +BASE_DIR=$(cd ./;pwd) + +echo "Cloning securedlab repo $BRANCH" +git clone ssh://jenkins-ericsson@gerrit.opnfv.org:29418/securedlab --quiet \ + --branch $BRANCH + +DEPLOY_COMMAND="sudo ./ci/deploy/deploy.sh -b $BASE_DIR \ + -l $LAB_NAME -p $POD_NAME -B $BRIDGE" + +# log info to console +echo """ +Deployment parameters +-------------------------------------------------------- +Scenario: $DEPLOY_SCENARIO +LAB: $LAB_NAME +POD: $POD_NAME +BRIDGE: $BRIDGE +BASE_DIR: $BASE_DIR + +Starting the deployment using $INSTALLER_TYPE. This could take some time... +-------------------------------------------------------- +Issuing command +$DEPLOY_COMMAND +""" + +# start the deployment +#$DEPLOY_COMMAND + +if [ $? -ne 0 ]; then + echo + echo "Depolyment failed!" + deploy_ret=1 +else + echo + echo "--------------------------------------------------------" + echo "Deployment done!" +fi + +exit $deploy_ret diff --git a/jjb/daisy4nfv/daisy-project-jobs.yml b/jjb/daisy4nfv/daisy-project-jobs.yml index 156740980..0127ed094 100644 --- a/jjb/daisy4nfv/daisy-project-jobs.yml +++ b/jjb/daisy4nfv/daisy-project-jobs.yml @@ -196,7 +196,7 @@ - shell: !include-raw: ./daisy4nfv-download-artifact.sh - shell: - !include-raw: ./daisy4nfv-deploy.sh + !include-raw: ./daisy-deploy.sh - builder: name: 'daisy-test-daily-macro' diff --git a/jjb/daisy4nfv/daisy4nfv-deploy.sh b/jjb/daisy4nfv/daisy4nfv-deploy.sh deleted file mode 100755 index cc2c10388..000000000 --- a/jjb/daisy4nfv/daisy4nfv-deploy.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -echo "Daisy deployment WIP" diff --git a/jjb/daisy4nfv/daisy4nfv-merge-jobs.yml b/jjb/daisy4nfv/daisy4nfv-merge-jobs.yml index a6659b2bf..95d851cca 100644 --- a/jjb/daisy4nfv/daisy4nfv-merge-jobs.yml +++ b/jjb/daisy4nfv/daisy4nfv-merge-jobs.yml @@ -193,7 +193,7 @@ - shell: !include-raw: ./daisy4nfv-download-artifact.sh - shell: - !include-raw: ./daisy4nfv-virtual-deploy.sh + !include-raw: ./daisy-deploy.sh - shell: !include-raw: ./daisy4nfv-workspace-cleanup.sh diff --git a/jjb/daisy4nfv/daisy4nfv-virtual-deploy.sh b/jjb/daisy4nfv/daisy4nfv-virtual-deploy.sh deleted file mode 100755 index ef4a07b8d..000000000 --- a/jjb/daisy4nfv/daisy4nfv-virtual-deploy.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -echo "--------------------------------------------------------" -echo "This is diasy4nfv virtual deploy job!" -echo "--------------------------------------------------------" - -cd $WORKSPACE - -if [[ "$NODE_NAME" =~ "-virtual" ]]; then - export NETWORK_CONF=./deploy/config/vm_environment/$NODE_NAME/network.yml - export DHA_CONF=./deploy/config/vm_environment/$NODE_NAME/deploy.yml -else - # TODO: For the time being, we need to pass this script to let contributors merge their work. - echo "No support for non-virtual node" - exit 0 -fi - -sudo ./ci/deploy/deploy.sh -d ${DHA_CONF} -n ${NETWORK_CONF} -p ${NODE_NAME:-"zte-virtual1"} - -if [ $? -ne 0 ]; then - echo "depolyment failed!" - deploy_ret=1 -fi - -echo -echo "--------------------------------------------------------" -echo "Done!" - -exit $deploy_ret diff --git a/jjb/doctor/doctor.yml b/jjb/doctor/doctor.yml index 2333fca14..28888d673 100644 --- a/jjb/doctor/doctor.yml +++ b/jjb/doctor/doctor.yml @@ -22,9 +22,9 @@ - fuel: slave-label: 'ool-virtual2' pod: 'ool-virtual2' - - joid: - slave-label: 'ool-virtual3' - pod: 'ool-virtual3' + #- joid: + # slave-label: 'ool-virtual3' + # pod: 'ool-virtual3' inspector: - 'sample' diff --git a/jjb/fuel/fuel-daily-jobs.yml b/jjb/fuel/fuel-daily-jobs.yml index f78c4a317..237855236 100644 --- a/jjb/fuel/fuel-daily-jobs.yml +++ b/jjb/fuel/fuel-daily-jobs.yml @@ -106,6 +106,8 @@ auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger' - 'os-nosdn-kvm_ovs_dpdk-noha': auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger' + - 'os-nosdn-kvm_ovs_dpdk_bar-noha': + auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger' jobs: - 'fuel-{scenario}-{pod}-daily-{stream}' @@ -357,7 +359,11 @@ - trigger: name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-baremetal-daily-master-trigger' triggers: - - timed: '30 16 * * *' + - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-baremetal-daily-master-trigger' + triggers: + - timed: '' #----------------------------------------------- # Triggers for job running on fuel-baremetal against danube branch #----------------------------------------------- @@ -447,6 +453,10 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-baremetal-daily-danube-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-baremetal-daily-danube-trigger' + triggers: + - timed: '' #----------------------------------------------- # Triggers for job running on fuel-virtual against master branch #----------------------------------------------- @@ -534,7 +544,11 @@ - trigger: name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-virtual-daily-master-trigger' triggers: - - timed: '' + - timed: '30 16 * * *' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-virtual-daily-master-trigger' + triggers: + - timed: '30 20 * * *' #----------------------------------------------- # Triggers for job running on fuel-virtual against danube branch #----------------------------------------------- @@ -623,6 +637,10 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-virtual-daily-danube-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-virtual-daily-danube-trigger' + triggers: + - timed: '' #----------------------------------------------- # ZTE POD1 Triggers running against master branch #----------------------------------------------- @@ -711,6 +729,10 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod1-daily-master-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod1-daily-master-trigger' + triggers: + - timed: '' #----------------------------------------------- # ZTE POD2 Triggers running against master branch @@ -800,6 +822,10 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod2-daily-master-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod2-daily-master-trigger' + triggers: + - timed: '' #----------------------------------------------- # ZTE POD3 Triggers running against master branch #----------------------------------------------- @@ -888,6 +914,10 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod3-daily-master-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod3-daily-master-trigger' + triggers: + - timed: '' #----------------------------------------------- # ZTE POD1 Triggers running against danube branch #----------------------------------------------- @@ -976,6 +1006,10 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod1-daily-danube-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod1-daily-danube-trigger' + triggers: + - timed: '' #----------------------------------------------- # ZTE POD2 Triggers running against danube branch @@ -1065,6 +1099,10 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod2-daily-danube-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod2-daily-danube-trigger' + triggers: + - timed: '' #----------------------------------------------- # ZTE POD3 Triggers running against danube branch #----------------------------------------------- @@ -1153,3 +1191,7 @@ name: 'fuel-os-nosdn-kvm_ovs_dpdk-noha-zte-pod3-daily-danube-trigger' triggers: - timed: '' +- trigger: + name: 'fuel-os-nosdn-kvm_ovs_dpdk_bar-noha-zte-pod3-daily-danube-trigger' + triggers: + - timed: '' diff --git a/jjb/functest/set-functest-env.sh b/jjb/functest/set-functest-env.sh index abec480dc..05e3d5792 100755 --- a/jjb/functest/set-functest-env.sh +++ b/jjb/functest/set-functest-env.sh @@ -17,32 +17,34 @@ if [[ ${RC_FILE_PATH} != '' ]] && [[ -f ${RC_FILE_PATH} ]] ; then echo "Credentials file detected: ${RC_FILE_PATH}" # volume if credentials file path is given to Functest rc_file_vol="-v ${RC_FILE_PATH}:/home/opnfv/functest/conf/openstack.creds" + RC_FLAG=1 fi if [[ ${INSTALLER_TYPE} == 'apex' ]]; then ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" - if sudo virsh list | grep instack; then - instack_mac=$(sudo virsh domiflist instack | grep default | \ - grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+") - elif sudo virsh list | grep undercloud; then - instack_mac=$(sudo virsh domiflist undercloud | grep default | \ + if sudo virsh list | grep undercloud; then + echo "Installer VM detected" + undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \ grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+") + INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'}) + sshkey_vol="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa" + sudo scp $ssh_options root@${INSTALLER_IP}:/home/stack/stackrc ${HOME}/stackrc + stackrc_vol="-v ${HOME}/stackrc:/home/opnfv/functest/conf/stackrc" + + if sudo iptables -C FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then + sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable + fi + if sudo iptables -C FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then + sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable + fi + elif [[ "$RC_FLAG" == 1 ]]; then + echo "No available installer VM, but credentials provided...continuing" else - echo "No available installer VM exists...exiting" + echo "No available installer VM exists and no credentials provided...exiting" exit 1 fi - INSTALLER_IP=$(/usr/sbin/arp -e | grep ${instack_mac} | awk {'print $1'}) - sshkey_vol="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa" - sudo scp $ssh_options root@${INSTALLER_IP}:/home/stack/stackrc ${HOME}/stackrc - stackrc_vol="-v ${HOME}/stackrc:/home/opnfv/functest/conf/stackrc" - if sudo iptables -C FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then - sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable - fi - if sudo iptables -C FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then - sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable - fi fi diff --git a/jjb/global/releng-macros.yml b/jjb/global/releng-macros.yml index 9b09e315f..d5eb0c974 100644 --- a/jjb/global/releng-macros.yml +++ b/jjb/global/releng-macros.yml @@ -61,7 +61,21 @@ choosing-strategy: 'gerrit' refspec: '$GERRIT_REFSPEC' <<: *git-scm-defaults - +- scm: + name: git-scm-with-submodules + scm: + - git: + credentials-id: '$SSH_CREDENTIAL_ID' + url: '$GIT_BASE' + refspec: '' + branches: + - 'refs/heads/{branch}' + skip-tag: true + wipe-workspace: true + submodule: + recursive: true + timeout: 20 + shallow-clone: true - trigger: name: 'daily-trigger-disabled' triggers: @@ -72,7 +86,6 @@ triggers: - timed: '' -# NOTE: unused macro, but we may use this for some jobs. - trigger: name: gerrit-trigger-patchset-created triggers: @@ -86,12 +99,22 @@ - draft-published-event - comment-added-contains-event: comment-contains-value: 'recheck' + - comment-added-contains-event: + comment-contains-value: 'reverify' projects: - project-compare-type: 'ANT' project-pattern: '{project}' branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' + file-paths: + - compare-type: 'ANT' + pattern: '{files}' + skip-vote: + successful: true + failed: true + unstable: true + notbuilt: true - trigger: name: gerrit-trigger-change-merged @@ -426,7 +449,7 @@ name: clean-workspace-log builders: - shell: | - find $WORKSPACE -type f -print -name '*.log' | xargs rm -f + find $WORKSPACE -type f -name '*.log' | xargs rm -f - publisher: name: archive-artifacts diff --git a/jjb/global/slave-params.yml b/jjb/global/slave-params.yml index 429828e8e..4b3eaaabf 100644 --- a/jjb/global/slave-params.yml +++ b/jjb/global/slave-params.yml @@ -381,6 +381,20 @@ name: GIT_BASE default: https://gerrit.opnfv.org/gerrit/$PROJECT description: 'Git URL to use on this Jenkins Slave' +- parameter: + name: 'cengn-pod1-defaults' + parameters: + - node: + name: SLAVE_NAME + description: 'Slave name on Jenkins' + allowed-slaves: + - cengn-pod1 + default-slaves: + - cengn-pod1 + - string: + name: GIT_BASE + default: https://gerrit.opnfv.org/gerrit/$PROJECT + description: 'Git URL to use on this Jenkins Slave' - parameter: name: 'intel-pod1-defaults' parameters: diff --git a/jjb/infra/bifrost-verify-jobs.yml b/jjb/infra/bifrost-verify-jobs.yml index c99023edf..d595d4bef 100644 --- a/jjb/infra/bifrost-verify-jobs.yml +++ b/jjb/infra/bifrost-verify-jobs.yml @@ -147,7 +147,7 @@ publishers: - email: - recipients: fatih.degirmenci@ericsson.com yroblamo@redhat.com mchandras@suse.de jack.morgan@intel.com zhang.jun3g@zte.com.cn + recipients: fatih.degirmenci@ericsson.com yroblamo@redhat.com mchandras@suse.de jack.morgan@intel.com julienjut@gmail.com #-------------------------------- # trigger macros #-------------------------------- diff --git a/jjb/joid/joid-daily-jobs.yml b/jjb/joid/joid-daily-jobs.yml index b28dd6025..88269d3c5 100644 --- a/jjb/joid/joid-daily-jobs.yml +++ b/jjb/joid/joid-daily-jobs.yml @@ -46,6 +46,9 @@ - orange-pod1: slave-label: orange-pod1 <<: *master + - cengn-pod1: + slave-label: cengn-pod1 + <<: *master #-------------------------------- # scenarios #-------------------------------- @@ -232,6 +235,10 @@ name: 'joid-os-nosdn-nofeature-ha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-nofeature-ha-cengn-pod1-master-trigger' + triggers: + - timed: '5 2 * * *' # os-nosdn-nofeature-ha trigger - branch: danube - trigger: name: 'joid-os-nosdn-nofeature-ha-baremetal-danube-trigger' @@ -245,6 +252,10 @@ name: 'joid-os-nosdn-nofeature-ha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-nofeature-ha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # os-odl_l2-nofeature-ha trigger - branch: master - trigger: name: 'joid-os-odl_l2-nofeature-ha-baremetal-master-trigger' @@ -258,6 +269,10 @@ name: 'joid-os-odl_l2-nofeature-ha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-odl_l2-nofeature-ha-cengn-pod1-master-trigger' + triggers: + - timed: '5 7 * * *' # os-odl_l2-nofeature-ha trigger - branch: danube - trigger: name: 'joid-os-odl_l2-nofeature-ha-baremetal-danube-trigger' @@ -271,6 +286,10 @@ name: 'joid-os-odl_l2-nofeature-ha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-odl_l2-nofeature-ha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # os-onos-nofeature-ha trigger - branch: master - trigger: name: 'joid-os-onos-nofeature-ha-baremetal-master-trigger' @@ -284,6 +303,10 @@ name: 'joid-os-onos-nofeature-ha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-onos-nofeature-ha-cengn-pod1-master-trigger' + triggers: + - timed: '5 12 * * *' # os-onos-nofeature-ha trigger - branch: danube - trigger: name: 'joid-os-onos-nofeature-ha-baremetal-danube-trigger' @@ -297,6 +320,10 @@ name: 'joid-os-onos-nofeature-ha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-onos-nofeature-ha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # os-onos-sfc-ha trigger - branch: master - trigger: name: 'joid-os-onos-sfc-ha-baremetal-master-trigger' @@ -310,6 +337,10 @@ name: 'joid-os-onos-sfc-ha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-onos-sfc-ha-cengn-pod1-master-trigger' + triggers: + - timed: '5 17 * * *' # os-onos-sfc-ha trigger - branch: danube - trigger: name: 'joid-os-onos-sfc-ha-baremetal-danube-trigger' @@ -323,6 +354,10 @@ name: 'joid-os-onos-sfc-ha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-onos-sfc-ha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # os-nosdn-lxd-noha trigger - branch: master - trigger: name: 'joid-os-nosdn-lxd-noha-baremetal-master-trigger' @@ -336,6 +371,10 @@ name: 'joid-os-nosdn-lxd-noha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-lxd-noha-cengn-pod1-master-trigger' + triggers: + - timed: '5 22 * * *' # os-nosdn-lxd-noha trigger - branch: danube - trigger: name: 'joid-os-nosdn-lxd-noha-baremetal-danube-trigger' @@ -349,6 +388,10 @@ name: 'joid-os-nosdn-lxd-noha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-lxd-noha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # os-nosdn-lxd-ha trigger - branch: master - trigger: name: 'joid-os-nosdn-lxd-ha-baremetal-master-trigger' @@ -362,6 +405,10 @@ name: 'joid-os-nosdn-lxd-ha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-lxd-ha-cengn-pod1-master-trigger' + triggers: + - timed: '5 10 * * *' # os-nosdn-lxd-ha trigger - branch: danube - trigger: name: 'joid-os-nosdn-lxd-ha-baremetal-danube-trigger' @@ -375,6 +422,10 @@ name: 'joid-os-nosdn-lxd-ha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-lxd-ha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # os-nosdn-nofeature-noha trigger - branch: master - trigger: name: 'joid-os-nosdn-nofeature-noha-baremetal-master-trigger' @@ -388,6 +439,10 @@ name: 'joid-os-nosdn-nofeature-noha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-nofeature-noha-cengn-pod1-master-trigger' + triggers: + - timed: '5 4 * * *' # os-nosdn-nofeature-noha trigger - branch: danube - trigger: name: 'joid-os-nosdn-nofeature-noha-baremetal-danube-trigger' @@ -401,6 +456,10 @@ name: 'joid-os-nosdn-nofeature-noha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-os-nosdn-nofeature-noha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # k8-nosdn-nofeature-noha trigger - branch: master - trigger: name: 'joid-k8-nosdn-nofeature-noha-baremetal-master-trigger' @@ -414,6 +473,10 @@ name: 'joid-k8-nosdn-nofeature-noha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-k8-nosdn-nofeature-noha-cengn-pod1-master-trigger' + triggers: + - timed: '5 15 * * *' # k8-nosdn-nofeature-noha trigger - branch: danube - trigger: name: 'joid-k8-nosdn-nofeature-noha-baremetal-danube-trigger' @@ -427,6 +490,10 @@ name: 'joid-k8-nosdn-nofeature-noha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-k8-nosdn-nofeature-noha-cengn-pod1-danube-trigger' + triggers: + - timed: '' # k8-nosdn-lb-noha trigger - branch: master - trigger: name: 'joid-k8-nosdn-lb-noha-baremetal-master-trigger' @@ -440,6 +507,10 @@ name: 'joid-k8-nosdn-lb-noha-orange-pod1-master-trigger' triggers: - timed: '' +- trigger: + name: 'joid-k8-nosdn-lb-noha-cengn-pod1-master-trigger' + triggers: + - timed: '5 20 * * *' # k8-nosdn-lb-noha trigger - branch: danube - trigger: name: 'joid-k8-nosdn-lb-noha-baremetal-danube-trigger' @@ -453,3 +524,7 @@ name: 'joid-k8-nosdn-lb-noha-orange-pod1-danube-trigger' triggers: - timed: '' +- trigger: + name: 'joid-k8-nosdn-lb-noha-cengn-pod1-danube-trigger' + triggers: + - timed: '' diff --git a/jjb/opnfvdocs/docs-post-rtd.sh b/jjb/opnfvdocs/docs-post-rtd.sh new file mode 100644 index 000000000..7faa26f38 --- /dev/null +++ b/jjb/opnfvdocs/docs-post-rtd.sh @@ -0,0 +1,7 @@ +#!/bin/bash +if [ $GERRIT_BRANCH == "master" ]; then + RTD_BUILD_VERSION=latest +else + RTD_BUILD_VERSION=${{GERRIT_BRANCH/\//-}} +fi +curl -X POST --data "version_slug=$RTD_BUILD_VERSION" https://readthedocs.org/build/{rtdproject} diff --git a/jjb/opnfvdocs/docs-rtd.yaml b/jjb/opnfvdocs/docs-rtd.yaml new file mode 100644 index 000000000..151b53550 --- /dev/null +++ b/jjb/opnfvdocs/docs-rtd.yaml @@ -0,0 +1,85 @@ +- project: + name: docs-rtd + jobs: + - 'docs-merge-rtd-{stream}' + - 'docs-verify-rtd-{stream}' + + stream: + - master: + branch: 'master' + + project: 'opnfvdocs' + rtdproject: 'opnfv' + # TODO: Archive Artifacts + +- job-template: + name: 'docs-merge-rtd-{stream}' + + project-type: freestyle + + parameters: + - label: + name: SLAVE_LABEL + default: 'lf-build1' + description: 'Slave label on Jenkins' + - project-parameter: + project: '{project}' + branch: '{branch}' + - string: + name: GIT_BASE + default: https://gerrit.opnfv.org/gerrit/releng + description: 'Git URL to use on this Jenkins Slave' + scm: + - git-scm + + triggers: + - gerrit-trigger-change-merged + + builders: + - shell: !include-raw: docs-post-rtd.sh + +- job-template: + name: 'docs-verify-rtd-{stream}' + + project-type: freestyle + + parameters: + - label: + name: SLAVE_LABEL + default: 'lf-build1' + description: 'Slave label on Jenkins' + - project-parameter: + project: '{project}' + branch: '{branch}' + - string: + name: GIT_BASE + default: https://gerrit.opnfv.org/gerrit/opnfvdocs + description: 'Git URL to use on this Jenkins Slave' + scm: + - git-scm-with-submodules: + branch: '{branch}' + + triggers: + - gerrit-trigger-patchset-created: + server: 'gerrit.opnfv.org' + project: '**' + branch: '{branch}' + files: 'docs/**/*.rst' + - timed: 'H H * * *' + + builders: + - shell: | + if [ "$GERRIT_PROJECT" != "opnfvdocs" ]; then + cd docs/submodules/$GERRIT_PROJECT + git fetch origin $GERRIT_REFSPEC && git checkout FETCH_HEAD + else + git fetch origin $GERRIT_REFSPEC && git checkout FETCH_HEAD + fi + - shell: | + sudo pip install virtualenv + virtualenv $WORKSPACE/venv + source $WORKSPACE/venv/bin/activate + pip install --upgrade pip + pip freeze + pip install tox + tox -edocs diff --git a/modules/opnfv/deployment/apex/adapter.py b/modules/opnfv/deployment/apex/adapter.py index cb827d886..225e17438 100644 --- a/modules/opnfv/deployment/apex/adapter.py +++ b/modules/opnfv/deployment/apex/adapter.py @@ -35,28 +35,34 @@ class ApexAdapter(manager.DeploymentHandler): return None for line in lines: - if 'controller' in line: - roles = "controller" - elif 'compute' in line: - roles = "compute" - else: + roles = [] + if any(x in line for x in ['-----', 'Networks']): continue - if 'Daylight' in line: - roles += ", OpenDaylight" + if 'controller' in line: + roles.append(manager.Role.CONTROLLER) + if 'compute' in line: + roles.append(manager.Role.COMPUTE) + if 'opendaylight' in line.lower(): + roles.append(manager.Role.ODL) + fields = line.split('|') id = re.sub('[!| ]', '', fields[1]).encode() name = re.sub('[!| ]', '', fields[2]).encode() - status_node = re.sub('[!| ]', '', fields[3]).encode() + status_node = re.sub('[!| ]', '', fields[3]).encode().lower() ip = re.sub('[!| ctlplane=]', '', fields[4]).encode() - if status_node.lower() == 'active': - status = manager.Node.STATUS_OK + ssh_client = None + if 'active' in status_node: + status = manager.NodeStatus.STATUS_OK ssh_client = ssh_utils.get_ssh_client(hostname=ip, username='heat-admin', pkey_file=self.pkey_file) + elif 'error' in status_node: + status = manager.NodeStatus.STATUS_ERROR + elif 'off' in status_node: + status = manager.NodeStatus.STATUS_OFFLINE else: - status = manager.Node.STATUS_INACTIVE - ssh_client = None + status = manager.NodeStatus.STATUS_INACTIVE node = manager.Node(id, ip, name, status, roles, ssh_client) nodes.append(node) @@ -73,8 +79,9 @@ class ApexAdapter(manager.DeploymentHandler): "grep Description|sed 's/^.*\: //'") cmd_ver = ("sudo yum info opendaylight 2>/dev/null|" "grep Version|sed 's/^.*\: //'") + description = None for node in self.nodes: - if 'controller' in node.get_attribute('roles'): + if node.is_controller(): description = node.run_cmd(cmd_descr) version = node.run_cmd(cmd_ver) break diff --git a/modules/opnfv/deployment/fuel/adapter.py b/modules/opnfv/deployment/fuel/adapter.py index 3e6ef50a0..4ba9ca961 100644 --- a/modules/opnfv/deployment/fuel/adapter.py +++ b/modules/opnfv/deployment/fuel/adapter.py @@ -66,7 +66,7 @@ class FuelAdapter(manager.DeploymentHandler): if options and options['cluster'] and len(self.nodes) > 0: n = [] for node in self.nodes: - if node.info['cluster'] == options['cluster']: + if str(node.info['cluster']) == str(options['cluster']): n.append(node) return n @@ -114,7 +114,7 @@ class FuelAdapter(manager.DeploymentHandler): index_ip = i elif "mac" in fields[i]: index_mac = i - elif "roles " in fields[i]: + elif "roles " in fields[i] and "pending_roles" not in fields[i]: index_roles = i elif "online" in fields[i]: index_online = i @@ -124,26 +124,36 @@ class FuelAdapter(manager.DeploymentHandler): fields = lines[i].rsplit(' | ') id = fields[index_id].strip().encode() ip = fields[index_ip].strip().encode() - status_node = fields[index_status].strip().encode() + status_node = fields[index_status].strip().encode().lower() name = fields[index_name].strip().encode() - roles = fields[index_roles].strip().encode() + roles_all = fields[index_roles].strip().encode().lower() + + roles = [x for x in [manager.Role.CONTROLLER, + manager.Role.COMPUTE, + manager.Role.ODL] if x in roles_all] dict = {"cluster": fields[index_cluster].strip().encode(), "mac": fields[index_mac].strip().encode(), "status_node": status_node, "online": fields[index_online].strip().encode()} + ssh_client = None if status_node == 'ready': - status = manager.Node.STATUS_OK + status = manager.NodeStatus.STATUS_OK proxy = {'ip': self.installer_ip, 'username': self.installer_user, 'password': self.installer_pwd} ssh_client = ssh_utils.get_ssh_client(hostname=ip, username='root', proxy=proxy) + elif 'error' in status_node: + status = manager.NodeStatus.STATUS_ERROR + elif 'off' in status_node: + status = manager.NodeStatus.STATUS_OFFLINE + elif 'discover' in status_node: + status = manager.NodeStatus.STATUS_UNUSED else: - status = manager.Node.STATUS_INACTIVE - ssh_client = None + status = manager.NodeStatus.STATUS_INACTIVE node = manager.Node( id, ip, name, status, roles, ssh_client, dict) @@ -160,7 +170,7 @@ class FuelAdapter(manager.DeploymentHandler): cmd = 'source openrc;nova-manage version 2>/dev/null' version = None for node in self.nodes: - if 'controller' in node.get_attribute('roles'): + if node.is_controller(): version = node.run_cmd(cmd) break return version @@ -169,7 +179,7 @@ class FuelAdapter(manager.DeploymentHandler): cmd = "apt-cache show opendaylight|grep Version|sed 's/^.*\: //'" version = None for node in self.nodes: - if 'controller' in node.get_attribute('roles'): + if node.is_controller(): odl_version = node.run_cmd(cmd) if odl_version: version = 'OpenDaylight ' + odl_version diff --git a/modules/opnfv/deployment/manager.py b/modules/opnfv/deployment/manager.py index 8c9599b6e..e1fd9497f 100644 --- a/modules/opnfv/deployment/manager.py +++ b/modules/opnfv/deployment/manager.py @@ -27,7 +27,7 @@ class Deployment(object): status, openstack_version, sdn_controller, - nodes=[]): + nodes=None): self.deployment_info = { 'installer': installer, @@ -89,26 +89,36 @@ class Deployment(object): sdn_controller=self.deployment_info['sdn_controller']) for node in self.deployment_info['nodes']: - s += '\t\t{node_object}\n'.format(node_object=node) + s += '{node_object}\n'.format(node_object=node) return s -class Node(object): +class Role(): + CONTROLLER = 'controller' + COMPUTE = 'compute' + ODL = 'opendaylight' + ONOS = 'onos' + +class NodeStatus(): STATUS_OK = 'active' STATUS_INACTIVE = 'inactive' STATUS_OFFLINE = 'offline' - STATUS_FAILED = 'failed' + STATUS_ERROR = 'error' + STATUS_UNUSED = 'unused' + + +class Node(object): def __init__(self, id, ip, name, status, - roles, - ssh_client, - info={}): + roles=None, + ssh_client=None, + info=None): self.id = id self.ip = ip self.name = name @@ -117,11 +127,21 @@ class Node(object): self.roles = roles self.info = info + self.cpu_info = 'unknown' + self.memory = 'unknown' + self.ovs = 'unknown' + + if ssh_client: + sys_info = self.get_system_info() + self.cpu_info = sys_info['cpu_info'] + self.memory = sys_info['memory'] + self.ovs = self.get_ovs_info() + def get_file(self, src, dest): ''' SCP file from a node ''' - if self.status is not Node.STATUS_OK: + if self.status is not NodeStatus.STATUS_OK: logger.info("The node %s is not active" % self.ip) return 1 logger.info("Fetching %s from %s" % (src, self.ip)) @@ -137,7 +157,7 @@ class Node(object): ''' SCP file to a node ''' - if self.status is not Node.STATUS_OK: + if self.status is not NodeStatus.STATUS_OK: logger.info("The node %s is not active" % self.ip) return 1 logger.info("Copying %s to %s" % (src, self.ip)) @@ -153,9 +173,9 @@ class Node(object): ''' Run command remotely on a node ''' - if self.status is not Node.STATUS_OK: - logger.info("The node %s is not active" % self.ip) - return 1 + if self.status is not NodeStatus.STATUS_OK: + logger.error("The node %s is not active" % self.ip) + return None _, stdout, stderr = (self.ssh_client.exec_command(cmd)) error = stderr.readlines() if len(error) > 0: @@ -174,20 +194,17 @@ class Node(object): 'name': self.name, 'status': self.status, 'roles': self.roles, + 'cpu_info': self.cpu_info, + 'memory': self.memory, + 'ovs': self.ovs, 'info': self.info } - def get_attribute(self, attribute): - ''' - Returns an attribute given the name - ''' - return self.get_dict()[attribute] - def is_controller(self): ''' Returns if the node is a controller ''' - if 'controller' in self.get_attribute('roles'): + if 'controller' in self.roles: return True return False @@ -195,12 +212,61 @@ class Node(object): ''' Returns if the node is a compute ''' - if 'compute' in self.get_attribute('roles'): + if 'compute' in self.roles: return True return False + def get_ovs_info(self): + ''' + Returns the ovs version installed + ''' + cmd = "ovs-vsctl --version|head -1| sed 's/^.*) //'" + return self.run_cmd(cmd) + + def get_system_info(self): + ''' + Returns the ovs version installed + ''' + cmd = 'grep MemTotal /proc/meminfo' + memory = self.run_cmd(cmd).partition('MemTotal:')[-1].strip().encode() + + cpu_info = {} + cmd = 'lscpu' + result = self.run_cmd(cmd) + for line in result.splitlines(): + if line.startswith('CPU(s)'): + cpu_info['num_cpus'] = line.split(' ')[-1].encode() + elif line.startswith('Thread(s) per core'): + cpu_info['threads/core'] = line.split(' ')[-1].encode() + elif line.startswith('Core(s) per socket'): + cpu_info['cores/socket'] = line.split(' ')[-1].encode() + elif line.startswith('Model name'): + cpu_info['model'] = line.partition( + 'Model name:')[-1].strip().encode() + elif line.startswith('Architecture'): + cpu_info['arch'] = line.split(' ')[-1].encode() + + return {'memory': memory, 'cpu_info': cpu_info} + def __str__(self): - return str(self.get_dict()) + return ''' + name: {name} + id: {id} + ip: {ip} + status: {status} + roles: {roles} + cpu: {cpu_info} + memory: {memory} + ovs: {ovs} + info: {info}'''.format(name=self.name, + id=self.id, + ip=self.ip, + status=self.status, + roles=self.roles, + cpu_info=self.cpu_info, + memory=self.memory, + ovs=self.ovs, + info=self.info) class DeploymentHandler(object): @@ -236,7 +302,7 @@ class DeploymentHandler(object): self.installer_node = Node(id='', ip=installer_ip, name=installer, - status='active', + status=NodeStatus.STATUS_OK, ssh_client=self.installer_connection, roles='installer node') else: diff --git a/modules/opnfv/utils/ovs_logger.py b/modules/opnfv/utils/ovs_logger.py index 75b4cec80..d650eb9ab 100644 --- a/modules/opnfv/utils/ovs_logger.py +++ b/modules/opnfv/utils/ovs_logger.py @@ -7,7 +7,7 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -import opnfv.utils.OPNFVLogger as OPNFVLogger +import opnfv.utils.opnfv_logger as OPNFVLogger import os import time import shutil diff --git a/prototypes/bifrost/scripts/test-bifrost-deployment.sh b/prototypes/bifrost/scripts/test-bifrost-deployment.sh index 914a906f4..3e2381fea 100755 --- a/prototypes/bifrost/scripts/test-bifrost-deployment.sh +++ b/prototypes/bifrost/scripts/test-bifrost-deployment.sh @@ -79,6 +79,11 @@ source ${ANSIBLE_INSTALL_ROOT}/ansible/hacking/env-setup ANSIBLE=$(which ansible-playbook) set -x -o nounset +logs_on_exit() { + $SCRIPT_HOME/collect-test-info.sh +} +trap logs_on_exit EXIT + # Change working directory cd $BIFROST_HOME/playbooks @@ -129,6 +134,4 @@ if [ $EXITCODE != 0 ]; then echo "****************************" fi -$SCRIPT_HOME/collect-test-info.sh - exit $EXITCODE diff --git a/utils/push-test-logs.sh b/utils/push-test-logs.sh index 5e428d07b..9099657c8 100644 --- a/utils/push-test-logs.sh +++ b/utils/push-test-logs.sh @@ -25,7 +25,7 @@ node_list=(\ 'intel-pod5' 'intel-pod6' 'intel-pod7' 'intel-pod8' \ 'ericsson-pod1' 'ericsson-pod2' \ 'ericsson-virtual1' 'ericsson-virtual2' 'ericsson-virtual3' \ -'ericsson-virtual4' 'ericsson-virtual5' \ +'ericsson-virtual4' 'ericsson-virtual5' 'ericsson-virtual12' \ 'arm-pod1' 'arm-pod3' \ 'huawei-pod1' 'huawei-pod2' 'huawei-pod3' 'huawei-pod4' 'huawei-pod5' \ 'huawei-pod6' 'huawei-pod7' 'huawei-pod12' \ diff --git a/utils/test/reporting/functest/reporting-status.py b/utils/test/reporting/functest/reporting-status.py index 158ee597b..df5632335 100755 --- a/utils/test/reporting/functest/reporting-status.py +++ b/utils/test/reporting/functest/reporting-status.py @@ -61,13 +61,13 @@ logger.info("*******************************************") # Retrieve test cases of Tier 1 (smoke) config_tiers = functest_yaml_config.get("tiers") -# we consider Tier 1 (smoke),2 (features) +# we consider Tier 0 (Healthcheck), Tier 1 (smoke),2 (features) # to validate scenarios -# Tier > 4 are not used to validate scenarios but we display the results anyway +# Tier > 2 are not used to validate scenarios but we display the results anyway # tricky thing for the API as some tests are Functest tests # other tests are declared directly in the feature projects for tier in config_tiers: - if tier['order'] > 0 and tier['order'] < 2: + if tier['order'] >= 0 and tier['order'] < 2: for case in tier['testcases']: if case['name'] not in blacklist: testValid.append(tc.TestCase(case['name'], diff --git a/utils/test/reporting/functest/testCase.py b/utils/test/reporting/functest/testCase.py index df0874e0b..22196c86b 100644 --- a/utils/test/reporting/functest/testCase.py +++ b/utils/test/reporting/functest/testCase.py @@ -43,7 +43,8 @@ class TestCase(object): 'parser': 'Parser', 'connection_check': 'Health (connection)', 'api_check': 'Health (api)', - 'snaps_smoke': 'SNAPS'} + 'snaps_smoke': 'SNAPS', + 'snaps_health_check': 'Health (dhcp)'} try: self.displayName = display_name_matrix[self.name] except: @@ -138,7 +139,8 @@ class TestCase(object): 'parser': 'parser-basics', 'connection_check': 'connection_check', 'api_check': 'api_check', - 'snaps_smoke': 'snaps_smoke' + 'snaps_smoke': 'snaps_smoke', + 'snaps_health_check': 'snaps_health_check' } try: return test_match_matrix[self.name] diff --git a/utils/test/reporting/reporting.yaml b/utils/test/reporting/reporting.yaml index 9db0890b2..2fb6b7831 100644 --- a/utils/test/reporting/reporting.yaml +++ b/utils/test/reporting/reporting.yaml @@ -36,12 +36,20 @@ functest: - ovno - security_scan - rally_sanity + - healthcheck + - odl_netvirt + - aaa + - cloudify_ims + - orchestra_ims + - juju_epc + - orchestra + - promise max_scenario_criteria: 50 test_conf: https://git.opnfv.org/cgit/functest/plain/functest/ci/testcases.yaml log_level: ERROR jenkins_url: https://build.opnfv.org/ci/view/functest/job/ exclude_noha: False - exclude_virtual: True + exclude_virtual: False yardstick: test_conf: https://git.opnfv.org/cgit/yardstick/plain/tests/ci/report_config.yaml diff --git a/utils/test/reporting/utils/reporting_utils.py b/utils/test/reporting/utils/reporting_utils.py index fc5d188af..1879fb628 100644 --- a/utils/test/reporting/utils/reporting_utils.py +++ b/utils/test/reporting/utils/reporting_utils.py @@ -269,7 +269,8 @@ def getJenkinsUrl(build_tag): url_base = get_config('functest.jenkins_url') try: build_id = [int(s) for s in build_tag.split("-") if s.isdigit()] - url_id = build_tag[8:-(len(build_id) + 3)] + "/" + str(build_id[0]) + url_id = (build_tag[8:-(len(str(build_id[0])) + 1)] + + "/" + str(build_id[0])) jenkins_url = url_base + url_id + "/console" except: print('Impossible to get jenkins url:') diff --git a/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py b/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py index a8c1a94fe..7c8c333a5 100644 --- a/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py +++ b/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py @@ -116,6 +116,17 @@ class ScenarioGURHandler(GenericScenarioHandler): db_keys = ['name'] self._update(query, db_keys) + @swagger.operation(nickname="deleteScenarioByName") + def delete(self, name): + """ + @description: delete a scenario by name + @return 200: delete success + @raise 404: scenario not exist: + """ + + query = {'name': name} + self._delete(query) + def _update_query(self, keys, data): query = dict() equal = True diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py index c15dc32ea..3a0abf934 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py @@ -1,11 +1,9 @@ from copy import deepcopy +from datetime import datetime import json import os -from datetime import datetime -from opnfv_testapi.common.constants import HTTP_BAD_REQUEST -from opnfv_testapi.common.constants import HTTP_FORBIDDEN -from opnfv_testapi.common.constants import HTTP_OK +from opnfv_testapi.common import constants import opnfv_testapi.resources.scenario_models as models from test_testcase import TestBase @@ -38,7 +36,7 @@ class TestScenarioBase(TestBase): return res.href.split('/')[-1] def assert_res(self, code, scenario, req=None): - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) if req is None: req = self.req_d scenario_dict = scenario.format_http() @@ -61,29 +59,29 @@ class TestScenarioBase(TestBase): class TestScenarioCreate(TestScenarioBase): def test_withoutBody(self): (code, body) = self.create() - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) def test_emptyName(self): req_empty = models.ScenarioCreateRequest('') (code, body) = self.create(req_empty) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('name missing', body) def test_noneName(self): req_none = models.ScenarioCreateRequest(None) (code, body) = self.create(req_none) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('name missing', body) def test_success(self): (code, body) = self.create_d() - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assert_create_body(body) def test_alreadyExist(self): self.create_d() (code, body) = self.create_d() - self.assertEqual(code, HTTP_FORBIDDEN) + self.assertEqual(code, constants.HTTP_FORBIDDEN) self.assertIn('already exists', body) @@ -126,7 +124,7 @@ class TestScenarioGet(TestScenarioBase): def _query_and_assert(self, query, found=True, reqs=None): code, body = self.query(query) if not found: - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assertEqual(0, len(body.scenarios)) else: self.assertEqual(len(reqs), len(body.scenarios)) @@ -296,10 +294,23 @@ class TestScenarioUpdate(TestScenarioBase): def _update_and_assert(self, update_req, new_scenario, name=None): code, _ = self.update(update_req, self.scenario) - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self._get_and_assert(self._none_default(name, self.scenario), new_scenario) @staticmethod def _none_default(check, default): return check if check else default + + +class TestScenarioDelete(TestScenarioBase): + def test_notFound(self): + code, body = self.delete('notFound') + self.assertEqual(code, constants.HTTP_NOT_FOUND) + + def test_success(self): + scenario = self.create_return_name(self.req_d) + code, _ = self.delete(scenario) + self.assertEqual(code, constants.HTTP_OK) + code, _ = self.get(scenario) + self.assertEqual(code, constants.HTTP_NOT_FOUND)