Merge "prototypes: xci: scripts: Add update-osa-version-files.sh script"
authorMarkos Chandras <mchandras@suse.de>
Wed, 28 Jun 2017 14:41:53 +0000 (14:41 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Wed, 28 Jun 2017 14:41:53 +0000 (14:41 +0000)
46 files changed:
jjb/barometer/barometer-build.sh [new file with mode: 0644]
jjb/barometer/barometer-upload-artifact.sh [new file with mode: 0644]
jjb/barometer/barometer.yml
jjb/ci_gate_security/anteater-clone-all-repos.sh [new file with mode: 0755]
jjb/ci_gate_security/anteater-report-to-gerrit.sh
jjb/ci_gate_security/anteater-security-audit-weekly.sh [new file with mode: 0644]
jjb/ci_gate_security/anteater-security-audit.sh
jjb/ci_gate_security/opnfv-ci-gate-security.yml
jjb/compass4nfv/compass-ci-jobs.yml
jjb/compass4nfv/compass-dovetail-jobs.yml
jjb/compass4nfv/compass-verify-jobs.yml
jjb/doctor/doctor.yml
jjb/dovetail/dovetail-ci-jobs.yml
jjb/dovetail/dovetail-cleanup.sh
jjb/dovetail/dovetail-run.sh
jjb/fuel/fuel-daily-jobs.yml
jjb/fuel/fuel-deploy.sh
jjb/fuel/fuel-download-artifact.sh
jjb/global/installer-params.yml
jjb/global/releng-macros.yml
jjb/netready/netready.yml
jjb/releng/opnfv-docker.sh
jjb/releng/opnfv-docker.yml
jjb/releng/testapi-docker-deploy.sh
jjb/storperf/storperf.yml
jjb/yardstick/yardstick-daily-jobs.yml
jjb/yardstick/yardstick-daily.sh
prototypes/xci/README.rst
prototypes/xci/config/pinned-versions
prototypes/xci/config/user-vars
prototypes/xci/file/ansible-role-requirements.yml
prototypes/xci/playbooks/provision-vm-nodes.yml
prototypes/xci/var/opnfv.yml
utils/fetch_os_creds.sh
utils/test/reporting/functest/reporting-status.py
utils/test/reporting/functest/reporting-tempest.py
utils/test/reporting/utils/reporting_utils.py
utils/test/testapi/3rd_party/static/testapi-ui/app.js
utils/test/testapi/3rd_party/static/testapi-ui/components/results/resultsController.js
utils/test/testapi/3rd_party/static/testapi-ui/config.json
utils/test/testapi/etc/config.ini
utils/test/testapi/htmlize/htmlize.py
utils/test/testapi/opnfv_testapi/common/config.py
utils/test/testapi/opnfv_testapi/resources/handlers.py
utils/test/testapi/opnfv_testapi/resources/result_handlers.py
utils/test/testapi/opnfv_testapi/tests/unit/fake_pymongo.py

diff --git a/jjb/barometer/barometer-build.sh b/jjb/barometer/barometer-build.sh
new file mode 100644 (file)
index 0000000..e40841b
--- /dev/null
@@ -0,0 +1,21 @@
+set -x
+
+OPNFV_ARTIFACT_VERSION=$(date -u +"%Y-%m-%d_%H-%M-%S")
+OPNFV_ARTIFACT_URL="$GS_URL/$OPNFV_ARTIFACT_VERSION/"
+
+# log info to console
+echo "Starting the build of Barometer RPMs"
+echo "------------------------------------"
+echo
+
+cd ci
+./install_dependencies.sh
+./build_rpm.sh
+cd $WORKSPACE
+
+# save information regarding artifact into file
+(
+    echo "OPNFV_ARTIFACT_VERSION=$OPNFV_ARTIFACT_VERSION"
+    echo "OPNFV_ARTIFACT_URL=$OPNFV_ARTIFACT_URL"
+) > $WORKSPACE/opnfv.properties
+
diff --git a/jjb/barometer/barometer-upload-artifact.sh b/jjb/barometer/barometer-upload-artifact.sh
new file mode 100644 (file)
index 0000000..887c492
--- /dev/null
@@ -0,0 +1,46 @@
+#!/bin/bash
+set -o nounset
+set -o pipefail
+
+RPM_WORKDIR=$WORKSPACE/rpmbuild
+RPM_DIR=$RPM_WORKDIR/RPMS/x86_64/
+cd $WORKSPACE/
+
+# source the opnfv.properties to get ARTIFACT_VERSION
+source $WORKSPACE/opnfv.properties
+
+# upload property files
+gsutil cp $WORKSPACE/opnfv.properties gs://$OPNFV_ARTIFACT_URL/opnfv.properties > gsutil.properties.log 2>&1
+gsutil cp $WORKSPACE/opnfv.properties gs://$GS_URL/latest.properties > gsutil.latest.log 2>&1
+
+echo "Uploading the barometer RPMs to artifacts.opnfv.org"
+echo "---------------------------------------------------"
+echo
+
+gsutil -m cp -r $RPM_DIR/* $OPNFV_ARTIFACT_URL > $WORKSPACE/gsutil.log 2>&1
+
+# Check if the RPMs were pushed
+gsutil ls $OPNFV_ARTIFACT_URL > /dev/null 2>&1
+if [[ $? -ne 0 ]]; then
+  echo "Problem while uploading barometer RPMs to $OPNFV_ARTIFACT_URL!"
+  echo "Check log $WORKSPACE/gsutil.log on the appropriate build server"
+  exit 1
+fi
+
+gsutil -m setmeta \
+    -h "Cache-Control:private, max-age=0, no-transform" \
+    gs://$OPNFV_ARTIFACT_URL/*.rpm > /dev/null 2>&1
+
+gsutil -m setmeta \
+    -h "Content-Type:text/html" \
+    -h "Cache-Control:private, max-age=0, no-transform" \
+    gs://$GS_URL/latest.properties \
+    gs://$OPNFV_ARTIFACT_URL/opnfv.properties > /dev/null 2>&1
+
+echo
+echo "--------------------------------------------------------"
+echo "Done!"
+echo "Artifact is available at $OPNFV_ARTIFACT_URL"
+
+#cleanup the RPM repo from the build machine.
+rm -rf $RPM_WORKDIR
index 68b8a04..c8fb9e2 100644 (file)
          - timed: '@midnight'
 
     builders:
-        - shell: |
-            pwd
-            cd ci
-            ./install_dependencies.sh
-            ./build_rpm.sh
+        - shell:
+            !include-raw-escape: ./barometer-build.sh
+        - shell:
+            !include-raw-escape: ./barometer-upload-artifact.sh
+
+########################
+# parameter macros
+########################
+- parameter:
+    name: barometer-project-parameter
+    parameters:
+        - string:
+            name: GS_URL
+            default: '$GS_BASE{gs-pathname}'
+            description: "URL to Google Storage."
diff --git a/jjb/ci_gate_security/anteater-clone-all-repos.sh b/jjb/ci_gate_security/anteater-clone-all-repos.sh
new file mode 100755 (executable)
index 0000000..8a9e73d
--- /dev/null
@@ -0,0 +1,33 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+set -o errexit
+set -o pipefail
+set -o nounset
+export PATH=$PATH:/usr/local/bin/
+
+
+#WORKSPACE="$(pwd)"
+
+cd $WORKSPACE
+if [ ! -d "$WORKSPACE/allrepos" ]; then
+  mkdir $WORKSPACE/allrepos
+fi
+
+cd $WORKSPACE/allrepos
+
+declare -a PROJECT_LIST
+EXCLUDE_PROJECTS="All-Projects|All-Users|securedlab"
+
+PROJECT_LIST=($(ssh gerrit.opnfv.org -p 29418 gerrit ls-projects | egrep -v $EXCLUDE_PROJECTS))
+echo "PROJECT_LIST=(${PROJECT_LIST[*]})" > $WORKSPACE/opnfv-projects.sh
+
+for PROJECT in ${PROJECT_LIST[@]}; do
+  echo "> Cloning $PROJECT"
+  if [ ! -d "$PROJECT" ]; then
+    git clone "https://gerrit.opnfv.org/gerrit/$PROJECT.git"
+  else
+    pushd "$PROJECT" > /dev/null
+    git pull -f
+    popd > /dev/null
+  fi
+done
index 71c5a06..fc3018f 100644 (file)
@@ -1,5 +1,5 @@
 #!/bin/bash
-set -o errexit
+# SPDX-license-identifier: Apache-2.0
 set -o pipefail
 export PATH=$PATH:/usr/local/bin/
 EXITSTATUS=0
diff --git a/jjb/ci_gate_security/anteater-security-audit-weekly.sh b/jjb/ci_gate_security/anteater-security-audit-weekly.sh
new file mode 100644 (file)
index 0000000..436a173
--- /dev/null
@@ -0,0 +1,37 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+
+echo "--------------------------------------------------------"
+vols="-v $WORKSPACE/allrepos/:/home/opnfv/anteater/allrepos/"
+echo "Pulling releng-anteater docker image"
+echo "--------------------------------------------------------"
+docker pull opnfv/releng-anteater
+echo "--------------------------------------------------------"
+cmd="docker run -id $vols opnfv/releng-anteater /bin/bash"
+echo "Running docker command $cmd"
+container_id=$($cmd)
+echo "Container ID is $container_id"
+source $WORKSPACE/opnfv-projects.sh
+for project in "${PROJECT_LIST[@]}"
+
+do
+  cmd="anteater --project testproj --path /home/opnfv/anteater/allrepos/$project"
+  echo "Executing command inside container"
+  echo "$cmd"
+  echo "--------------------------------------------------------"
+  docker exec $container_id $cmd > $WORKSPACE/"$project".securityaudit.log 2>&1
+done
+
+exit_code=$?
+echo "--------------------------------------------------------"
+echo "Stopping docker container with ID $container_id"
+docker stop $container_id
+
+
+#gsutil cp $WORKSPACE/securityaudit.log \
+#    gs://$GS_URL/$PROJECT-securityaudit-weekly.log 2>&1
+#
+#gsutil -m setmeta \
+#    -h "Content-Type:text/html" \
+#    -h "Cache-Control:private, max-age=0, no-transform" \
+#    gs://$GS_URL/$PROJECT-securityaudit-weekly.log > /dev/null 2>&1
index d5c0e40..9bd3cc3 100644 (file)
@@ -15,18 +15,14 @@ echo "--------------------------------------------------------"
 docker pull opnfv/releng-anteater
 echo "--------------------------------------------------------"
 
-cmd="sudo docker run --privileged=true -id $envs $vols opnfv/releng-anteater /bin/bash"
-echo "Running docker command $cmd"
-container_id=$($cmd)
-echo "Container ID is $container_id"
-cmd="anteater --project $PROJECT --patchset /home/opnfv/anteater/$PROJECT/patchset"
-echo "Executing command inside container"
+cmd="docker run -i $envs $vols --rm opnfv/releng-anteater \
+/home/opnfv/venv/bin/anteater --project $PROJECT --patchset /home/opnfv/anteater/$PROJECT/patchset"
+echo "Running docker container"
 echo "$cmd"
-echo "--------------------------------------------------------"
-docker exec $container_id $cmd > $WORKSPACE/securityaudit.log 2>&1
+$cmd > $WORKSPACE/securityaudit.log 2>&1
 exit_code=$?
 echo "--------------------------------------------------------"
-echo "Stopping docker container with ID $container_id"
-docker stop $container_id
+echo "Docker container exited with code: $exit_code"
+echo "--------------------------------------------------------"
 cat securityaudit.log
 exit 0
index e2ad03e..489dbc5 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-license-identifier: Apache-2.0
 ########################
 # Job configuration for opnfv-anteater (security audit)
 ########################
@@ -9,6 +10,7 @@
 
     jobs:
         - 'opnfv-security-audit-verify-{stream}'
+        - 'opnfv-security-audit-weekly-{stream}'
 
     stream:
         - master:
 ########################
 # job templates
 ########################
+- job-template:
+    name: 'opnfv-security-audit-weekly-{stream}'
+
+    disabled: '{obj:disabled}'
+
+    parameters:
+        - label:
+            name: SLAVE_LABEL
+            default: 'ericsson-build3'
+            description: 'Slave label on Jenkins'
+        - project-parameter:
+            project: releng
+            branch: '{branch}'
+
+    triggers:
+        - timed: '@weekly'
+
+    builders:
+        - anteater-security-audit-weekly
+
 - job-template:
     name: 'opnfv-security-audit-verify-{stream}'
 
@@ -55,7 +77,7 @@
                     comment-contains-value: 'reverify'
             projects:
               - project-compare-type: 'REG_EXP'
-                project-pattern: 'sandbox|releng'
+                project-pattern: 'sandbox|releng|octopus|pharos|functest'
                 branches:
                   - branch-compare-type: 'ANT'
                     branch-pattern: '**/{branch}'
     builders:
         - shell:
             !include-raw: ./anteater-report-to-gerrit.sh
+
+- builder:
+    name: anteater-security-audit-weekly
+    builders:
+        - shell:
+            !include-raw:
+                - ./anteater-clone-all-repos.sh
+                - ./anteater-security-audit-weekly.sh
index 3ba69fa..2472491 100644 (file)
                 unstable-threshold: 'FAILURE'
         # dovetail only master by now, not sync with A/B/C branches
         # here the stream means the SUT stream, dovetail stream is defined in its own job
-        # only run on os-(nosdn|odl_l2)-(nofeature|bgpvpn)-ha scenario
+        # only run on os-(nosdn|odl_l2|onos|odl_l3)-nofeature-ha scenario
+        # run against SUT master branch, dovetail docker image with latest tag
+        # run against SUT danube branch, dovetail docker image with latest tag(odd days)and cvp.X.X.X tag(even days)
         - conditional-step:
-            condition-kind: regex-match
-            regex: os-(nosdn|odl_l2)-(nofeature|bgpvpn)-ha
-            label: '{scenario}'
+            condition-kind: and
+            condition-operands:
+                - condition-kind: regex-match
+                  regex: danube
+                  label: '{stream}'
+                - condition-kind: regex-match
+                  regex: os-(nosdn|odl_l2|onos|odl_l3)-nofeature-ha
+                  label: '{scenario}'
+                - condition-kind: day-of-week
+                  day-selector: select-days
+                  days:
+                      MON: true
+                      WED: true
+                      FRI: true
+                      SUN: true
             steps:
                 - trigger-builds:
-                    - project: 'dovetail-compass-{pod}-proposed_tests-{stream}'
+                    - project: 'dovetail-compass-{pod}-proposed_tests-master'
                       current-parameters: false
                       predefined-parameters:
                         DEPLOY_SCENARIO={scenario}
                         build-step-failure-threshold: 'never'
                         failure-threshold: 'never'
                         unstable-threshold: 'FAILURE'
+        - conditional-step:
+            condition-kind: and
+            condition-operands:
+                - condition-kind: regex-match
+                  regex: danube
+                  label: '{stream}'
+                - condition-kind: regex-match
+                  regex: os-(nosdn|odl_l2|onos|odl_l3)-nofeature-ha
+                  label: '{scenario}'
+                - condition-kind: day-of-week
+                  day-selector: select-days
+                  days:
+                      TUE: true
+                      THU: true
+                      SAT: true
+            steps:
+                - trigger-builds:
+                    - project: 'dovetail-compass-{pod}-proposed_tests-danube'
+                      current-parameters: false
+                      predefined-parameters:
+                        DEPLOY_SCENARIO={scenario}
+                      block: true
+                      same-node: true
+                      block-thresholds:
+                        build-step-failure-threshold: 'never'
+                        failure-threshold: 'never'
+                        unstable-threshold: 'FAILURE'
+        - conditional-step:
+            condition-kind: and
+            condition-operands:
+                - condition-kind: regex-match
+                  regex: os-(nosdn|odl_l2)-(nofeature|bgpvpn)-ha
+                  label: '{scenario}'
+                - condition-kind: regex-match
+                  regex: master
+                  label: '{stream}'
+            steps:
+                - trigger-builds:
+                    - project: 'dovetail-compass-{pod}-proposed_tests-master'
 
 - job-template:
     name: 'compass-deploy-{pod}-daily-{stream}'
         - build-name:
             name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
         - timeout:
-            timeout: 120
+            timeout: 240
             abort: true
         - fix-workspace-permissions
 
 - trigger:
     name: 'compass-os-ocl-nofeature-ha-baremetal-danube-trigger'
     triggers:
-        - timed: '0 5 * * *'
+        - timed: ''
 - trigger:
     name: 'compass-os-onos-sfc-ha-baremetal-danube-trigger'
     triggers:
-        - timed: ''
+        - timed: '0 5 * * *'
 - trigger:
     name: 'compass-os-odl_l2-moon-ha-baremetal-danube-trigger'
     triggers:
index 966dae5..67d1e4e 100644 (file)
         - build-name:
             name: '$BUILD_NUMBER - Scenario: $DEPLOY_SCENARIO'
         - timeout:
-            timeout: 120
+            timeout: 240
             abort: true
         - fix-workspace-permissions
 
index 4b05e22..e43f976 100644 (file)
@@ -74,7 +74,7 @@
     wrappers:
         - ssh-agent-wrapper
         - timeout:
-            timeout: 120
+            timeout: 240
             fail: true
         - fix-workspace-permissions
 
     wrappers:
         - ssh-agent-wrapper
         - timeout:
-            timeout: 120
+            timeout: 240
             fail: true
         - fix-workspace-permissions
 
     wrappers:
         - ssh-agent-wrapper
         - timeout:
-            timeout: 120
+            timeout: 240
             fail: true
         - fix-workspace-permissions
 
index 807d436..eb230b5 100644 (file)
             profiler: 'poc'
             auto-trigger-name: 'experimental'
 
+    pod:
+        - arm-pod2:
+            slave-label: '{pod}'
+        - arm-pod3:
+            slave-label: '{pod}'
+
     jobs:
         - 'doctor-verify-{stream}'
+        - 'doctor-{task}-{installer}-{inspector}-{pod}-{stream}'
         - 'doctor-{task}-{installer}-{inspector}-{stream}'
 
 - job-template:
     builders:
         - shell: "[ -e tests/run.sh ] && bash -n ./tests/run.sh"
 
+- job-template:
+    name: 'doctor-{task}-{installer}-{inspector}-{pod}-{stream}'
+
+    node: '{slave-label}'
+
+    disabled: '{obj:disabled}'
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - 'opnfv-build-ubuntu-defaults'
+
+    scm:
+        - git-scm-gerrit
+
+
+    triggers:
+        - '{auto-trigger-name}':
+            project: '{project}'
+            branch: '{branch}'
+
+    builders:
+        - shell: "[ -e tests/run.sh ] && bash -n ./tests/run.sh"
+
+
 - job-template:
     name: 'doctor-{task}-{installer}-{inspector}-{stream}'
 
index 682948d..9fdce31 100644 (file)
@@ -25,7 +25,7 @@
         branch: 'stable/{stream}'
         dovetail-branch: master
         gs-pathname: '/{stream}'
-        docker-tag: 'latest'
+        docker-tag: 'cvp.0.1.0'
 
 #-----------------------------------
 # POD, PLATFORM, AND BRANCH MAPPING
index 0ee789a..3ae0cbc 100755 (executable)
@@ -1,4 +1,11 @@
 #!/bin/bash
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
 
 [[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
 
index dce7e58..d423e9d 100755 (executable)
@@ -1,4 +1,11 @@
 #!/bin/bash
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
 
 #the noun INSTALLER is used in community, here is just the example to run.
 #multi-platforms are supported.
@@ -7,14 +14,12 @@ set -e
 [[ $CI_DEBUG == true ]] && redirect="/dev/stdout" || redirect="/dev/null"
 
 DOVETAIL_HOME=${WORKSPACE}/cvp
-if [ -d ${DOVETAIL_HOME} ]; then
-    sudo rm -rf ${DOVETAIL_HOME}/*
-else
-    sudo mkdir -p ${DOVETAIL_HOME}
-fi
+[ -d ${DOVETAIL_HOME} ] && sudo rm -rf ${DOVETAIL_HOME}
+
+mkdir -p ${DOVETAIL_HOME}
 
 DOVETAIL_CONFIG=${DOVETAIL_HOME}/pre_config
-sudo mkdir -p ${DOVETAIL_CONFIG}
+mkdir -p ${DOVETAIL_CONFIG}
 
 sshkey=""
 # The path of openrc.sh is defined in fetch_os_creds.sh
@@ -47,7 +52,7 @@ releng_repo=${WORKSPACE}/releng
 git clone https://gerrit.opnfv.org/gerrit/releng ${releng_repo} >/dev/null
 
 if [[ ${INSTALLER_TYPE} != 'joid' ]]; then
-    sudo /bin/bash ${releng_repo}/utils/fetch_os_creds.sh -d ${OPENRC} -i ${INSTALLER_TYPE} -a ${INSTALLER_IP} >${redirect}
+    ${releng_repo}/utils/fetch_os_creds.sh -d ${OPENRC} -i ${INSTALLER_TYPE} -a ${INSTALLER_IP} >${redirect}
 fi
 
 if [[ -f $OPENRC ]]; then
@@ -102,7 +107,8 @@ if [ "$INSTALLER_TYPE" == "fuel" ]; then
 fi
 
 # sdnvpn test case needs to download this image first before running
-sudo wget -nc http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${DOVETAIL_CONFIG}
+echo "Download image ubuntu-16.04-server-cloudimg-amd64-disk1.img ..."
+wget -q -nc http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${DOVETAIL_CONFIG}
 
 opts="--privileged=true -id"
 
index dd0590c..6867708 100644 (file)
             name: GS_URL
             default: artifacts.opnfv.org/$PROJECT{gs-pathname}
             description: "URL to Google Storage."
+        - string:
+            name: SSH_KEY
+            default: "/tmp/mcp.rsa"
+            description: "Path to private SSH key to access environment nodes. For MCP deployments only."
 ########################
 # trigger macros
 ########################
index 4d48ee5..2fb5c71 100755 (executable)
@@ -12,11 +12,13 @@ set -o pipefail
 
 export TERM="vt220"
 
-# source the file so we get OPNFV vars
-source latest.properties
+if [[ "$BRANCH" != 'master' ]]; then
+    # source the file so we get OPNFV vars
+    source latest.properties
 
-# echo the info about artifact that is used during the deployment
-echo "Using ${OPNFV_ARTIFACT_URL/*\/} for deployment"
+    # echo the info about artifact that is used during the deployment
+    echo "Using ${OPNFV_ARTIFACT_URL/*\/} for deployment"
+fi
 
 if [[ "$JOB_NAME" =~ "merge" ]]; then
     # set simplest scenario for virtual deploys to run for merges
@@ -75,7 +77,7 @@ echo "--------------------------------------------------------"
 echo "Scenario: $DEPLOY_SCENARIO"
 echo "Lab: $LAB_NAME"
 echo "POD: $POD_NAME"
-echo "ISO: ${OPNFV_ARTIFACT_URL/*\/}"
+[[ "$BRANCH" != 'master' ]] && echo "ISO: ${OPNFV_ARTIFACT_URL/*\/}"
 echo
 echo "Starting the deployment using $INSTALLER_TYPE. This could take some time..."
 echo "--------------------------------------------------------"
index 8cc552e..c3b8253 100755 (executable)
@@ -10,6 +10,9 @@
 set -o errexit
 set -o pipefail
 
+# disable Fuel ISO download for master branch
+[[ "$BRANCH" == 'master' ]] && exit 0
+
 # use proxy url to replace the nomral URL, for googleusercontent.com will be blocked randomly
 [[ "$NODE_NAME" =~ (zte) ]] && GS_URL=${GS_BASE_PROXY%%/*}/$GS_URL
 
index 40fc42c..ee154af 100644 (file)
             name: INSTALLER_IP
             default: '10.20.0.2'
             description: 'IP of the installer'
+        - string:
+            name: SALT_MASTER_IP
+            default: '192.168.10.100'
+            description: 'IP of the salt master (for mcp deployments)'
+        - string:
+            name: SSH_KEY
+            default: '/tmp/mcp.rsa'
+            description: 'Path to private SSH key to access environment nodes'
         - string:
             name: INSTALLER_TYPE
             default: fuel
index 5341db4..e4dfa8d 100644 (file)
             find "$local_path" | grep -e 'index.html$' -e 'pdf$' | \
                 sed -e "s|^$local_path|    http://$gs_path|" >> gerrit_comment.txt
 
+# To take advantage of this macro, have your build write
+# out the file 'gerrit_comment.txt' with information to post
+# back to gerrit and include this macro in the list of builders.
 - builder:
-    name: report-docs-build-result-to-gerrit
+    name: report-build-result-to-gerrit
     builders:
         - shell: |
             #!/bin/bash
     builders:
         - build-html-and-pdf-docs-output
         - upload-under-review-docs-to-opnfv-artifacts
-        - report-docs-build-result-to-gerrit
+        - report-build-result-to-gerrit
 
 - builder:
     name: upload-merged-docs
     builders:
         - build-html-and-pdf-docs-output
         - upload-generated-docs-to-opnfv-artifacts
-        - report-docs-build-result-to-gerrit
+        - report-build-result-to-gerrit
         - remove-old-docs-from-opnfv-artifacts
 
 - builder:
index 9a4d885..2702c45 100644 (file)
@@ -58,7 +58,7 @@
 - job-template:
     name: 'netready-build-gluon-packages-daily-{stream}'
 
-    disabled: false
+    disabled: true
 
     concurrent: true
 
index 2aa52ad..ebd0c9f 100644 (file)
@@ -73,6 +73,8 @@ fi
 # Get tag version
 echo "Current branch: $BRANCH"
 
+BUILD_BRANCH=$BRANCH
+
 if [[ "$BRANCH" == "master" ]]; then
     DOCKER_TAG="latest"
 elif [[ -n "${RELEASE_VERSION-}" ]]; then
@@ -82,19 +84,17 @@ else
     DOCKER_TAG="stable"
 fi
 
+if [[ -n "${COMMIT_ID-}" && -n "${RELEASE_VERSION-}" ]]; then
+    DOCKER_TAG=$RELEASE_VERSION
+    BUILD_BRANCH=$COMMIT_ID
+fi
+
 # Start the build
 echo "Building docker image: $DOCKER_REPO_NAME:$DOCKER_TAG"
 echo "--------------------------------------------------------"
 echo
-if [[ $DOCKER_REPO_NAME == *"dovetail"* ]]; then
-    if [[ -n "${RELEASE_VERSION-}" ]]; then
-        DOCKER_TAG=${RELEASE_VERSION}
-    fi
-    cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG -f $DOCKERFILE ."
-else
-    cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG --build-arg BRANCH=$BRANCH
-        -f $DOCKERFILE ."
-fi
+cmd="docker build --no-cache -t $DOCKER_REPO_NAME:$DOCKER_TAG --build-arg BRANCH=$BUILD_BRANCH
+    -f $DOCKERFILE ."
 
 echo ${cmd}
 ${cmd}
index 5fe0eb9..095ba41 100644 (file)
             name: DOCKER_REPO_NAME
             default: "opnfv/{project}"
             description: "Dockerhub repo to be pushed to."
+        - string:
+            name: COMMIT_ID
+            default: ""
+            description: "commit id to make a snapshot docker image"
         - string:
             name: RELEASE_VERSION
             default: ""
index b4e60b0..02c5e3a 100644 (file)
@@ -4,7 +4,7 @@ function check() {
 
     # Verify hosted
     sleep 5
-    cmd=`curl -s --head  --request GET http://testresults.opnfv.org/test/swagger/spec | grep '200 OK' > /dev/null`
+    cmd=`curl -s --head  --request GET http://testresults.opnfv.org/test/swagger/APIs | grep '200 OK' > /dev/null`
     rc=$?
     echo $rc
 
index be53b27..13186a1 100644 (file)
         - git-scm
 
     triggers:
-        - timed: '0 18 * * *'
+        - timed: '0 22 * * *'
 
     builders:
         - shell: |
index 5ff36f8..ff1d47e 100644 (file)
 
     publishers:
         - email:
-            recipients: jean.gaoliang@huawei.com limingjiang@huawei.com
+            recipients: jean.gaoliang@huawei.com limingjiang@huawei.com ross.b.brattain@intel.com
 
 ########################
 # builder macros
index 973f83a..1c2abad 100755 (executable)
@@ -31,7 +31,8 @@ fi
 opts="--privileged=true --rm"
 envs="-e INSTALLER_TYPE=${INSTALLER_TYPE} -e INSTALLER_IP=${INSTALLER_IP} \
     -e NODE_NAME=${NODE_NAME} -e EXTERNAL_NETWORK=${EXTERNAL_NETWORK} \
-    -e YARDSTICK_BRANCH=${BRANCH} -e DEPLOY_SCENARIO=${DEPLOY_SCENARIO}"
+    -e YARDSTICK_BRANCH=${BRANCH} -e BRANCH=${BRANCH} \
+    -e DEPLOY_SCENARIO=${DEPLOY_SCENARIO}"
 
 # Pull the image with correct tag
 echo "Yardstick: Pulling image opnfv/yardstick:${DOCKER_TAG}"
index 0d93665..b65abde 100644 (file)
@@ -185,6 +185,23 @@ continuously chasing the HEAD of corresponding branches.
 Once a working version is identified, the versions of the upstream components
 are then bumped in releng repo.
 
+==================
+XCI developer tips
+==================
+
+It is possible to run XCI in development mode, in order to test the
+latest changes. When deploying on this mode, the script will use the working
+directories for releng/bifrost/OSA, instead of cloning the whole repositories
+on each run.
+To enable it, you need to export the different DEV_PATH vars:
+
+- export OPNFV_RELENG_DEV_PATH=/opt/releng/
+- export OPENSTACK_BIFROST_DEV_PATH=/opt/bifrost
+- export OPENSTACK_OSA_DEV_PATH=/opt/openstack-ansible
+
+This will cause the deployment to pick the development copies stored at the
+specified directories, and use them instead of cloning those on every run.
+
 ===========================================
 Limitations, Known Issues, and Improvements
 ===========================================
index e3b49c7..c426936 100755 (executable)
@@ -22,6 +22,6 @@
 # use releng from master until the development work with the sandbox is complete
 export OPNFV_RELENG_VERSION="master"
 # HEAD of "master" as of 04.04.2017
-export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"6109f824e5510e794dbf1968c3859e8b6356d280"}
+export OPENSTACK_BIFROST_VERSION=${OPENSTACK_BIFROST_VERSION:-"7c9bb5e07c6bc3b42c9a9e8457e5eef511075b38"}
 # HEAD of "master" as of 04.04.2017
 export OPENSTACK_OSA_VERSION=${OPENSTACK_OSA_VERSION:-"d9e1330c7ff9d72a604b6b4f3af765f66a01b30e"}
index 5ed5396..fd11a58 100755 (executable)
@@ -56,3 +56,6 @@ export LOG_PATH=${LOG_PATH:-${XCI_DEVEL_ROOT}/opnfv/logs}
 export RUN_TEMPEST=${RUN_TEMPEST:-false}
 # Set this to to true to force XCI to re-create the target OS images
 export CLEAN_DIB_IMAGES=${CLEAN_DIB_IMAGES:-false}
+# Set this to a full path pointing to extra config files (containing
+# group_vars/all)
+export XCI_EXTRA_VARS_PATH=${XCI_EXTRA_VARS_PATH:-""}
index 842bcc4..5a96e2a 100644 (file)
@@ -9,6 +9,10 @@
 ##############################################################################
 # these versions are extracted based on the osa commit d9e1330c7ff9d72a604b6b4f3af765f66a01b30e on 04.04.2017
 # https://review.openstack.org/gitweb?p=openstack/openstack-ansible.git;a=commit;h=d9e1330c7ff9d72a604b6b4f3af765f66a01b30e
+- name: ansible-hardening
+  scm: git
+  src: https://git.openstack.org/openstack/ansible-hardening
+  version: 051fe3195f59d1ee8db06fca5d2cce7a25e58861
 - name: apt_package_pinning
   scm: git
   src: https://git.openstack.org/openstack/openstack-ansible-apt_package_pinning
index 8be36c7..92b5c55 100644 (file)
         delete: yes
       when:
         - OPNFV_RELENG_DEV_PATH != ""
+    - name: Copy extra vars to releng and bifrost
+      synchronize:
+        src: "{{ XCI_EXTRA_VARS_PATH }}"
+        dest: "{{ item }}"
+      with_items:
+        - "{{ OPNFV_RELENG_PATH }}/prototypes/xci/playbooks"
+        - "{{ OPENSTACK_BIFROST_PATH }}/playbooks/inventory"
+      when:
+        - XCI_EXTRA_VARS_PATH != ""
 
 - hosts: localhost
   connection: local
index 85f532a..aeaface 100644 (file)
@@ -27,3 +27,4 @@ XCI_LOOP: "{{ lookup('env','XCI_LOOP') }}"
 LOG_PATH: "{{ lookup('env','LOG_PATH') }}"
 OPNFV_HOST_IP: "{{ lookup('env','OPNFV_HOST_IP') }}"
 OPNFV_SSH_HOST_KEYS_PATH: "{{ lookup('env', 'OPNFV_SSH_HOST_KEYS_PATH') }}"
+XCI_EXTRA_VARS_PATH: "{{ lookup('env', 'XCI_EXTRA_VARS_PATH') }}"
index 458bbda..993c0b9 100755 (executable)
@@ -12,8 +12,9 @@ set -o nounset
 set -o pipefail
 
 usage() {
-    echo "usage: $0 [-v] -d <destination> -i <installer_type> -a <installer_ip>" >&2
+    echo "usage: $0 [-v] -d <destination> -i <installer_type> -a <installer_ip> [-s <ssh_key>]" >&2
     echo "[-v] Virtualized deployment" >&2
+    echo "[-s <ssh_key>] Path to ssh key. For MCP deployments only" >&2
 }
 
 info ()  {
@@ -53,11 +54,12 @@ swap_to_public() {
 : ${DEPLOY_TYPE:=''}
 
 #Get options
-while getopts ":d:i:a:h:v" optchar; do
+while getopts ":d:i:a:h:s:v" optchar; do
     case "${optchar}" in
         d) dest_path=${OPTARG} ;;
         i) installer_type=${OPTARG} ;;
         a) installer_ip=${OPTARG} ;;
+        s) ssh_key=${OPTARG} ;;
         v) DEPLOY_TYPE="virt" ;;
         *) echo "Non-option argument: '-${OPTARG}'" >&2
            usage
@@ -70,6 +72,9 @@ done
 dest_path=${dest_path:-$HOME/opnfv-openrc.sh}
 installer_type=${installer_type:-$INSTALLER_TYPE}
 installer_ip=${installer_ip:-$INSTALLER_IP}
+if [ "${installer_type}" == "fuel" ] && [ "${BRANCH}" == "master" ]; then
+    installer_ip=${SALT_MASTER_IP}
+fi
 
 if [ -z $dest_path ] || [ -z $installer_type ] || [ -z $installer_ip ]; then
     usage
@@ -89,40 +94,45 @@ ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
 
 # Start fetching the files
 if [ "$installer_type" == "fuel" ]; then
-    #ip_fuel="10.20.0.2"
     verify_connectivity $installer_ip
+    if [ "${BRANCH}" == "master" ]; then
+        ssh_key=${ssh_key:-$SSH_KEY}
+        if [ -z $ssh_key ] || [ ! -f $ssh_key ]; then
+            error "Please provide path to existing ssh key for mcp deployment."
+            exit 2
+        fi
+        ssh_options+=" -i ${ssh_key}"
 
-    env=$(sshpass -p r00tme ssh 2>/dev/null $ssh_options root@${installer_ip} \
-        'fuel env'|grep operational|head -1|awk '{print $1}') &> /dev/null
-    if [ -z $env ]; then
-        error "No operational environment detected in Fuel"
-    fi
-    env_id="${FUEL_ENV:-$env}"
-
-    # Check if controller is alive (online='True')
-    controller_ip=$(sshpass -p r00tme ssh 2>/dev/null $ssh_options root@${installer_ip} \
-        "fuel node --env ${env_id} | grep controller | grep 'True\|  1' | awk -F\| '{print \$5}' | head -1" | \
-        sed 's/ //g') &> /dev/null
+        # retrieving controller vip
+        controller_ip=$(ssh 2>/dev/null ${ssh_options} ubuntu@${installer_ip} \
+            "sudo salt --out txt 'ctl01*' pillar.get _param:openstack_control_address | awk '{print \$2}'" | \
+            sed 's/ //g') &> /dev/null
 
-    if [ -z $controller_ip ]; then
-        error "The controller $controller_ip is not up. Please check that the POD is correctly deployed."
-    fi
+        info "Fetching rc file from controller $controller_ip..."
+        ssh ${ssh_options} ubuntu@${controller_ip} "sudo cat /root/keystonercv3" > $dest_path
+    else
+        #ip_fuel="10.20.0.2"
+        env=$(sshpass -p r00tme ssh 2>/dev/null ${ssh_options} root@${installer_ip} \
+            'fuel env'|grep operational|head -1|awk '{print $1}') &> /dev/null
+        if [ -z $env ]; then
+            error "No operational environment detected in Fuel"
+        fi
+        env_id="${FUEL_ENV:-$env}"
 
-    info "Fetching rc file from controller $controller_ip..."
-    sshpass -p r00tme ssh 2>/dev/null $ssh_options root@${installer_ip} \
-        "scp $ssh_options ${controller_ip}:/root/openrc ." &> /dev/null
-    sshpass -p r00tme scp 2>/dev/null $ssh_options root@${installer_ip}:~/openrc $dest_path &> /dev/null
+        # Check if controller is alive (online='True')
+        controller_ip=$(sshpass -p r00tme ssh 2>/dev/null ${ssh_options} root@${installer_ip} \
+            "fuel node --env ${env_id} | grep controller | grep 'True\|  1' | awk -F\| '{print \$5}' | head -1" | \
+            sed 's/ //g') &> /dev/null
 
-    #This file contains the mgmt keystone API, we need the public one for our rc file
-    admin_ip=$(cat $dest_path | grep "OS_AUTH_URL" | sed 's/^.*\=//' | sed "s/^\([\"']\)\(.*\)\1\$/\2/g" | sed s'/\/$//')
-    public_ip=$(sshpass -p r00tme ssh $ssh_options root@${installer_ip} \
-        "ssh ${controller_ip} 'source openrc; openstack endpoint list'" \
-        | grep keystone | grep public | sed 's/ /\n/g' | grep ^http | head -1) &> /dev/null
-        #| grep http | head -1 | cut -d '|' -f 4 | sed 's/v1\/.*/v1\//' | sed 's/ //g') &> /dev/null
-    #NOTE: this is super ugly sed 's/v1\/.*/v1\//'OS_AUTH_URL
-    # but sometimes the output of endpoint-list is like this: http://172.30.9.70:8004/v1/%(tenant_id)s
-    # Fuel virtual need a fix
+        if [ -z $controller_ip ]; then
+            error "The controller $controller_ip is not up. Please check that the POD is correctly deployed."
+        fi
 
+        info "Fetching rc file from controller $controller_ip..."
+        sshpass -p r00tme ssh 2>/dev/null ${ssh_options} root@${installer_ip} \
+            "scp ${ssh_options} ${controller_ip}:/root/openrc ." &> /dev/null
+        sshpass -p r00tme scp 2>/dev/null ${ssh_options} root@${installer_ip}:~/openrc $dest_path &> /dev/null
+    fi
     #convert to v3 URL
     auth_url=$(cat $dest_path|grep AUTH_URL)
     if [[ -z `echo $auth_url |grep v3` ]]; then
index e700e04..77ab784 100755 (executable)
@@ -107,7 +107,6 @@ for version in versions:
         scenario_results = rp_utils.getScenarios(healthcheck,
                                                  installer,
                                                  version)
-
         # get nb of supported architecture (x86, aarch64)
         architectures = rp_utils.getArchitectures(scenario_results)
         logger.info("Supported architectures: {}".format(architectures))
index 6e6585a..0304298 100755 (executable)
@@ -1,4 +1,15 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Orange and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+# SPDX-license-identifier: Apache-2.0
+
 from urllib2 import Request, urlopen, URLError
+from datetime import datetime
 import json
 import jinja2
 import os
@@ -97,7 +108,13 @@ for version in rp_utils.get_config('general.versions'):
                     crit_rate = True
 
                 # Expect that the suite duration is inferior to 30m
-                if result['details']['duration'] < criteria_duration:
+                stop_date = datetime.strptime(result['stop_date'],
+                                              '%Y-%m-%d %H:%M:%S')
+                start_date = datetime.strptime(result['start_date'],
+                                               '%Y-%m-%d %H:%M:%S')
+
+                delta = stop_date - start_date
+                if (delta.total_seconds() < criteria_duration):
                     crit_time = True
 
                 result['criteria'] = {'tests': crit_tests,
index 599a938..0a178ba 100644 (file)
@@ -117,19 +117,29 @@ def getScenarios(case, installer, version):
     url = ("http://" + url_base + "?case=" + case +
            "&period=" + str(period) + "&installer=" + installer +
            "&version=" + version)
-    request = Request(url)
 
     try:
+        request = Request(url)
         response = urlopen(request)
         k = response.read()
         results = json.loads(k)
         test_results = results['results']
-    except URLError as e:
-        print('Got an error code:', e)
+
+        page = results['pagination']['total_pages']
+        if page > 1:
+            test_results = []
+            for i in range(1, page + 1):
+                url_page = url + "&page=" + str(i)
+                request = Request(url_page)
+                response = urlopen(request)
+                k = response.read()
+                results = json.loads(k)
+                test_results += results['results']
+    except URLError as err:
+        print('Got an error code:', err)
 
     if test_results is not None:
         test_results.reverse()
-
         scenario_results = {}
 
         for r in test_results:
@@ -157,7 +167,6 @@ def getScenarioStats(scenario_results):
     return scenario_stats
 
 
-# TODO convergence with above function getScenarios
 def getScenarioStatus(installer, version):
     period = get_config('general.period')
     url_base = get_config('testapi.url')
@@ -213,8 +222,8 @@ def getQtipResults(version, installer):
         k = response.read()
         response.close()
         results = json.loads(k)['results']
-    except URLError as e:
-        print('Got an error code:', e)
+    except URLError as err:
+        print('Got an error code:', err)
 
     result_dict = {}
     if results:
@@ -427,9 +436,9 @@ def export_csv(scenario_file_name, installer, version):
                                     "/functest/scenario_history_" +
                                     installer + ".csv")
     scenario_installer_file = open(scenario_installer_file_name, "a")
-    with open(scenario_file_name, "r") as f:
+    with open(scenario_file_name, "r") as scenario_file:
         scenario_installer_file.write("date,scenario,installer,detail,score\n")
-        for line in f:
+        for line in scenario_file:
             if installer in line:
                 scenario_installer_file.write(line)
         scenario_installer_file.close
index 4a2f23a..8c701c3 100644 (file)
         $stateProvider.
             state('home', {
                 url: '/',
-                templateUrl: '/testapi-ui/components/home/home.html'
+                templateUrl: 'testapi-ui/components/home/home.html'
             }).
             state('about', {
                 url: '/about',
-                templateUrl: '/testapi-ui/components/about/about.html'
+                templateUrl: 'testapi-ui/components/about/about.html'
             }).
             state('guidelines', {
                 url: '/guidelines',
-                templateUrl: '/testapi-ui/components/guidelines/guidelines.html',
+                templateUrl: 'testapi-ui/components/guidelines/guidelines.html',
                 controller: 'GuidelinesController as ctrl'
             }).
             state('communityResults', {
                 url: '/community_results',
-                templateUrl: '/testapi-ui/components/results/results.html',
+                templateUrl: 'testapi-ui/components/results/results.html',
                 controller: 'ResultsController as ctrl'
             }).
             state('userResults', {
-                url: '/user_results',
+                url: 'user_results',
                 templateUrl: '/testapi-ui/components/results/results.html',
                 controller: 'ResultsController as ctrl'
             }).
             state('resultsDetail', {
                 url: '/results/:testID',
-                templateUrl: '/testapi-ui/components/results-report' +
+                templateUrl: 'testapi-ui/components/results-report' +
                              '/resultsReport.html',
                 controller: 'ResultsReportController as ctrl'
             }).
             }).
             state('authFailure', {
                 url: '/auth_failure',
-                templateUrl: '/testapi-ui/components/home/home.html',
+                templateUrl: 'testapi-ui/components/home/home.html',
                 controller: 'AuthFailureController as ctrl'
             }).
             state('logout', {
                 url: '/logout',
-                templateUrl: '/testapi-ui/components/logout/logout.html',
+                templateUrl: 'testapi-ui/components/logout/logout.html',
                 controller: 'LogoutController as ctrl'
             }).
             state('userVendors', {
index 93a549a..9e3540d 100644 (file)
             ctrl.resultsRequest =
                 $http.get(content_url).success(function (data) {
                     ctrl.data = data;
-                    ctrl.totalItems = 20 // ctrl.data.pagination.total_pages * ctrl.itemsPerPage;
-                    ctrl.currentPage = 1 // ctrl.data.pagination.current_page;
+                    ctrl.totalItems = ctrl.data.pagination.total_pages * ctrl.itemsPerPage;
+                    ctrl.currentPage = ctrl.data.pagination.current_page;
                 }).error(function (error) {
                     ctrl.data = null;
                     ctrl.totalItems = 0;
index 5d48c7b..9fdd85f 100644 (file)
@@ -1 +1 @@
-{"testapiApiUrl": "http://localhost:8000/api/v1"}
+{"testapiApiUrl": "http://testresults.opnfv.org/test/api/v1"}
index 692e488..dad59d2 100644 (file)
@@ -8,8 +8,12 @@ dbname = test_results_collection
 
 [api]
 # Listening port
-url = http://localhost:8000/api/v1
+url = http://testresults.opnfv.org/test/api/v1
 port = 8000
+
+# Number of results for one page (integer value)
+#results_per_page = 20
+
 # With debug_on set to true, error traces will be shown in HTTP responses
 debug = True
 authenticate = False
@@ -18,7 +22,7 @@ authenticate = False
 base_url = http://localhost:8000
 
 [ui]
-url = http://localhost:8000
+url = http://testresults.opnfv.org/test
 
 [osid]
 
@@ -41,7 +45,7 @@ openid_ns = http://specs.openid.net/auth/2.0
 # Return endpoint in Refstack's API. Value indicating the endpoint
 # where the user should be returned to after signing in. Openstack Id
 # Idp only supports HTTPS address types. (string value)
-openid_return_to = /api/v1/auth/signin_return
+openid_return_to = v1/auth/signin_return
 
 # Claimed identifier. This value must be set to
 # "http://specs.openid.net/auth/2.0/identifier_select". or to user
index b8c4fb4..4576d9b 100644 (file)
@@ -40,13 +40,13 @@ if __name__ == '__main__':
                         type=str,
                         required=False,
                         default=('http://testresults.opnfv.org'
-                                 '/test/swagger/spec.json'),
+                                 '/test/swagger/resources.json'),
                         help='Resource Listing Spec File')
     parser.add_argument('-au', '--api-declaration-url',
                         type=str,
                         required=False,
                         default=('http://testresults.opnfv.org'
-                                 '/test/swagger/spec'),
+                                 '/test/swagger/APIs'),
                         help='API Declaration Spec File')
     parser.add_argument('-o', '--output-directory',
                         required=True,
index 46765ff..f73c0ab 100644 (file)
@@ -17,6 +17,7 @@ class Config(object):
     def __init__(self):
         self.file = self.CONFIG if self.CONFIG else self._default_config()
         self._parse()
+        self._parse_per_page()
         self.static_path = os.path.join(
             os.path.dirname(os.path.normpath(__file__)),
             os.pardir,
@@ -37,6 +38,10 @@ class Config(object):
         [setattr(self, '{}_{}'.format(section, k), self._parse_value(v))
          for k, v in config.items(section)]
 
+    def _parse_per_page(self):
+        if not hasattr(self, 'api_results_per_page'):
+            self.api_results_per_page = 20
+
     @staticmethod
     def _parse_value(value):
         try:
index 2fc31ca..0234c8a 100644 (file)
@@ -104,17 +104,50 @@ class GenericApiHandler(web.RequestHandler):
         if query is None:
             query = {}
         data = []
+        sort = kwargs.get('sort')
+        page = kwargs.get('page', 0)
+        last = kwargs.get('last', 0)
+        per_page = kwargs.get('per_page', 0)
+
         cursor = self._eval_db(self.table, 'find', query)
-        if 'sort' in kwargs:
-            cursor = cursor.sort(kwargs.get('sort'))
-        if 'last' in kwargs:
-            cursor = cursor.limit(kwargs.get('last'))
+        records_count = yield cursor.count()
+        records_nr = records_count
+        if (records_count > last) and (last > 0):
+            records_nr = last
+
+        pipelines = list()
+        if query:
+            pipelines.append({'$match': query})
+        if sort:
+            pipelines.append({'$sort': sort})
+
+        if page > 0:
+            total_pages, remainder = divmod(records_nr, per_page)
+            if remainder > 0:
+                total_pages += 1
+            pipelines.append({'$skip': (page - 1) * per_page})
+            pipelines.append({'$limit': per_page})
+        else:
+            pipelines.append({'$limit': records_nr})
+
+        cursor = self._eval_db(self.table,
+                               'aggregate',
+                               pipelines,
+                               allowDiskUse=True)
+
         while (yield cursor.fetch_next):
             data.append(self.format_data(cursor.next_object()))
         if res_op is None:
             res = {self.table: data}
         else:
             res = res_op(data, *args)
+        if page:
+            res.update({
+                'pagination': {
+                    'current_page': page,
+                    'total_pages': total_pages
+                }
+            })
         self.finish_request(res)
 
     @web.asynchronous
index 824a89e..1773216 100644 (file)
@@ -11,12 +11,15 @@ from datetime import timedelta
 
 from bson import objectid
 
+from opnfv_testapi.common import config
 from opnfv_testapi.common import message
 from opnfv_testapi.common import raises
 from opnfv_testapi.resources import handlers
 from opnfv_testapi.resources import result_models
 from opnfv_testapi.tornado_swagger import swagger
 
+CONF = config.Config()
+
 
 class GenericResultHandler(handlers.GenericApiHandler):
     def __init__(self, application, request, **kwargs):
@@ -135,22 +138,28 @@ class ResultsCLHandler(GenericResultHandler):
             @type last: L{string}
             @in last: query
             @required last: False
+            @param page: which page to list
+            @type page: L{int}
+            @in page: query
+            @required page: False
             @param trust_indicator: must be float
             @type trust_indicator: L{float}
             @in trust_indicator: query
             @required trust_indicator: False
         """
+        limitations = {'sort': {'start_date': -1}}
         last = self.get_query_argument('last', 0)
         if last is not None:
             last = self.get_int('last', last)
+            limitations.update({'last': last})
 
-        page = self.get_query_argument('page', 0)
-        if page:
-            last = 20
+        page = self.get_query_argument('page', None)
+        if page is not None:
+            page = self.get_int('page', page)
+            limitations.update({'page': page,
+                                'per_page': CONF.api_results_per_page})
 
-        self._list(query=self.set_query(),
-                   sort=[('start_date', -1)],
-                   last=last)
+        self._list(query=self.set_query(), **limitations)
 
     @swagger.operation(nickname="createTestResult")
     def post(self):
index ef74a08..adaf6f7 100644 (file)
@@ -20,38 +20,52 @@ def thread_execute(method, *args, **kwargs):
 class MemCursor(object):
     def __init__(self, collection):
         self.collection = collection
-        self.count = len(self.collection)
+        self.length = len(self.collection)
         self.sorted = []
 
     def _is_next_exist(self):
-        return self.count != 0
+        return self.length != 0
 
     @property
     def fetch_next(self):
         return thread_execute(self._is_next_exist)
 
     def next_object(self):
-        self.count -= 1
+        self.length -= 1
         return self.collection.pop()
 
     def sort(self, key_or_list):
-        key = key_or_list[0][0]
-        if key_or_list[0][1] == -1:
-            reverse = True
-        else:
-            reverse = False
+        for k, v in key_or_list.iteritems():
+            if v == -1:
+                reverse = True
+            else:
+                reverse = False
 
-        if key_or_list is not None:
             self.collection = sorted(self.collection,
-                                     key=itemgetter(key), reverse=reverse)
+                                     key=itemgetter(k), reverse=reverse)
         return self
 
     def limit(self, limit):
         if limit != 0 and limit < len(self.collection):
-            self.collection = self.collection[0:limit]
-            self.count = limit
+            self.collection = self.collection[0: limit]
+            self.length = limit
+        return self
+
+    def skip(self, skip):
+        if skip < self.length and (skip > 0):
+            self.collection = self.collection[self.length - skip: -1]
+            self.length -= skip
+        elif skip >= self.length:
+            self.collection = []
+            self.length = 0
         return self
 
+    def _count(self):
+        return self.length
+
+    def count(self):
+        return thread_execute(self._count)
+
 
 class MemDb(object):
 
@@ -187,6 +201,27 @@ class MemDb(object):
     def find(self, *args):
         return MemCursor(self._find(*args))
 
+    def _aggregate(self, *args, **kwargs):
+        res = self.contents
+        print args
+        for arg in args[0]:
+            for k, v in arg.iteritems():
+                if k == '$match':
+                    res = self._find(v)
+        cursor = MemCursor(res)
+        for arg in args[0]:
+            for k, v in arg.iteritems():
+                if k == '$sort':
+                    cursor = cursor.sort(v)
+                elif k == '$skip':
+                    cursor = cursor.skip(v)
+                elif k == '$limit':
+                    cursor = cursor.limit(v)
+        return cursor
+
+    def aggregate(self, *args, **kwargs):
+        return self._aggregate(*args, **kwargs)
+
     def _update(self, spec, document, check_keys=True):
         updated = False