Merge "Make arm-virtual1 pod not available for CI jobs"
authorAric Gardner <agardner@linuxfoundation.org>
Thu, 20 Apr 2017 15:25:24 +0000 (15:25 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Thu, 20 Apr 2017 15:25:24 +0000 (15:25 +0000)
28 files changed:
jjb/apex/apex-iso-verify.sh [new file with mode: 0755]
jjb/apex/apex-upload-artifact.sh
jjb/apex/apex.yml
prototypes/xci/file/exports [deleted file]
prototypes/xci/file/ha/flavor-vars.yml
prototypes/xci/file/ha/openstack_user_config.yml
prototypes/xci/file/mini/configure-targethosts.yml [deleted file]
prototypes/xci/file/mini/flavor-vars.yml
prototypes/xci/file/mini/openstack_user_config.yml
prototypes/xci/file/modules [deleted file]
prototypes/xci/file/noha/configure-targethosts.yml [deleted file]
prototypes/xci/file/noha/flavor-vars.yml
prototypes/xci/file/noha/openstack_user_config.yml
prototypes/xci/playbooks/configure-localhost.yml
prototypes/xci/playbooks/configure-targethosts.yml [moved from prototypes/xci/file/ha/configure-targethosts.yml with 62% similarity]
prototypes/xci/playbooks/roles/configure-network/tasks/main.yml
prototypes/xci/playbooks/roles/configure-nfs/tasks/main.yml
prototypes/xci/playbooks/roles/synchronize-time/tasks/main.yml [new file with mode: 0644]
prototypes/xci/template/compute.interface.j2
prototypes/xci/template/controller.interface.j2
prototypes/xci/template/opnfv.interface.j2
utils/test/testapi/opnfv_testapi/common/check.py [new file with mode: 0644]
utils/test/testapi/opnfv_testapi/resources/handlers.py
utils/test/testapi/opnfv_testapi/resources/pod_handlers.py
utils/test/testapi/opnfv_testapi/resources/project_handlers.py
utils/test/testapi/opnfv_testapi/resources/result_handlers.py
utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py
utils/test/testapi/opnfv_testapi/resources/testcase_handlers.py

diff --git a/jjb/apex/apex-iso-verify.sh b/jjb/apex/apex-iso-verify.sh
new file mode 100755 (executable)
index 0000000..900a3eb
--- /dev/null
@@ -0,0 +1,102 @@
+#!/bin/bash
+set -o errexit
+set -o nounset
+set -o pipefail
+
+# log info to console
+echo "Starting the Apex iso verify."
+echo "--------------------------------------------------------"
+echo
+
+source $BUILD_DIRECTORY/../opnfv.properties
+
+if ! rpm -q virt-install > /dev/null; then
+  sudo yum -y install virt-install
+fi
+
+# define a clean function
+rm_apex_iso_verify () {
+if sudo virsh list --all | grep apex-iso-verify | grep running; then
+    sudo virsh destroy apex-iso-verify
+fi
+if sudo virsh list --all | grep apex-iso-verify; then
+    sudo virsh undefine apex-iso-verify
+fi
+}
+
+# Make sure a pre-existing iso-verify isn't there
+rm_apex_iso_verify
+
+# run an install from the iso
+# This streams a serial console to tcp port 3737 on localhost
+sudo virt-install -n apex-iso-verify -r 4096 --vcpus 4 --os-variant=rhel7 \
+ --accelerate -v --noautoconsole --nographics \
+ --disk path=/var/lib/libvirt/images/apex-iso-verify.qcow2,size=30,format=qcow2 \
+ -l $BUILD_DIRECTORY/release/OPNFV-CentOS-7-x86_64-$OPNFV_ARTIFACT_VERSION.iso \
+ --extra-args 'console=ttyS0 console=ttyS0,115200n8 serial inst.ks=file:/iso-verify.ks inst.stage2=hd:LABEL=OPNFV\x20CentOS\x207\x20x86_64:/' \
+ --initrd-inject $BUILD_DIRECTORY/../ci/iso-verify.ks \
+ --serial tcp,host=:3737,protocol=raw
+
+# Attach to tcpport 3737 and echo the output to stdout
+# watch for a 5 min time out, a power off message or a tcp disconnect
+python << EOP
+#!/usr/bin/env python
+
+import sys
+import socket
+from time import sleep
+from time import time
+
+
+TCP_IP = '127.0.0.1'
+TCP_PORT = 3737
+BUFFER_SIZE = 1024
+
+try:
+    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    s.connect((TCP_IP, TCP_PORT))
+except Exception, e:
+    print "Failed to connect to the iso-verofy vm's serial console"
+    print "this probably means that the VM failed to start"
+    raise e
+
+activity = time()
+data = s.recv(BUFFER_SIZE)
+last_data = data
+while time() - activity < 300:
+    try:
+        if data != last_data:
+            activity = time()
+        last_data = data
+        data = s.recv(BUFFER_SIZE)
+        sys.stdout.write(data)
+        if 'Powering off' in data:
+            break
+        sleep(.5)
+    except socket.error, e:
+        # for now assuming that the connection was closed
+        # which is good, means the vm finished installing
+        # printing the error output just in case we need to debug
+        print "VM console connection lost: %s" % msg
+        break
+s.close()
+
+if time() - activity > 300:
+    print "failing due to console inactivity"
+    exit(1)
+else:
+    print "Success!"
+EOP
+
+# save the python return code for after cleanup
+python_rc=$?
+
+# clean up
+rm_apex_iso_verify
+
+# Exit with the RC of the Python job
+exit $python_rc
+
+echo
+echo "--------------------------------------------------------"
+echo "Done!"
index c2de7d7..e93a46b 100755 (executable)
@@ -3,8 +3,13 @@ set -o errexit
 set -o nounset
 set -o pipefail
 
+if [ -z "$ARTIFACT_TYPE" ]; then
+  echo "ERROR: ARTIFACT_TYPE not provided...exiting"
+  exit 1
+fi
+
 # log info to console
-echo "Uploading the Apex artifact. This could take some time..."
+echo "Uploading the Apex ${ARTIFACT_TYPE} artifact. This could take some time..."
 echo "--------------------------------------------------------"
 echo
 
@@ -18,7 +23,7 @@ echo "Cloning releng repository..."
 [ -d releng ] && rm -rf releng
 git clone https://gerrit.opnfv.org/gerrit/releng $WORKSPACE/releng/ &> /dev/null
 #this is where we import the siging key
-if [ -f $WORKSPACE/releng/utils/gpg_import_key.sh ]; then 
+if [ -f $WORKSPACE/releng/utils/gpg_import_key.sh ]; then
   source $WORKSPACE/releng/utils/gpg_import_key.sh
 fi
 
@@ -84,21 +89,25 @@ uploadsnap () {
   echo "Upload complete for Snapshot"
 }
 
-if echo $WORKSPACE | grep promote > /dev/null; then
+if [ "$ARTIFACT_TYPE" == 'snapshot' ]; then
   uploadsnap
 elif gpg2 --list-keys | grep "opnfv-helpdesk@rt.linuxfoundation.org"; then
   echo "Signing Key avaliable"
-  signiso
-  uploadiso
-  signrpm
-  uploadrpm
+  if [ "$ARTIFACT_TYPE" == 'iso' ]; then
+    signiso
+    uploadiso
+  fi
+  if [ "$ARTIFACT_TYPE" == 'rpm' ]; then
+    signrpm
+    uploadrpm
+  fi
 else
-  uploadiso
-  uploadrpm
+  if [ "$ARTIFACT_TYPE" == 'iso' ]; then uploadiso; fi
+  if [ "$ARTIFACT_TYPE" == 'rpm' ]; then uploadrpm; fi
 fi
 
 echo
 echo "--------------------------------------------------------"
 echo "Done!"
-echo "ISO Artifact is available as http://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso"
-echo "RPM Artifact is available as http://$GS_URL/$(basename $OPNFV_RPM_URL)"
+if [ "$ARTIFACT_TYPE" == 'iso' ]; then echo "ISO Artifact is available as http://$GS_URL/opnfv-$OPNFV_ARTIFACT_VERSION.iso"; fi
+if [ "$ARTIFACT_TYPE" == 'rpm' ]; then echo "RPM Artifact is available as http://$GS_URL/$(basename $OPNFV_RPM_URL)"; fi
index e7982ba..3f5ef5a 100644 (file)
@@ -12,6 +12,7 @@
         - 'apex-daily-{stream}'
         - 'apex-csit-promote-daily-{stream}'
         - 'apex-fdio-promote-daily-{stream}'
+        - 'apex-verify-iso-{stream}'
 
     # stream:    branch with - in place of / (eg. stable-arno)
     # branch:    branch (eg. stable/arno)
             git-revision: false
             same-node: true
             block: true
+        - inject:
+           properties-content: ARTIFACT_TYPE=rpm
+        - 'apex-upload-artifact'
+        - trigger-builds:
+          - project: 'apex-verify-iso-{stream}'
+            predefined-parameters: |
+              BUILD_DIRECTORY=apex-build-{stream}/.build
+            git-revision: false
+            block: true
+            same-node: true
+        - inject:
+           properties-content: ARTIFACT_TYPE=iso
         - 'apex-upload-artifact'
 
+# ISO verify job
+- job-template:
+    name: 'apex-verify-iso-{stream}'
+
+    # Job template for builds
+    #
+    # Required Variables:
+    #     stream:    branch with - in place of / (eg. stable)
+    #     branch:    branch (eg. stable)
+    node: '{daily-slave}'
+
+    disabled: false
+
+    concurrent: true
+
+    parameters:
+        - project-parameter:
+            project: '{project}'
+            branch: '{branch}'
+        - apex-parameter:
+            gs-pathname: '{gs-pathname}'
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/$PROJECT
+            description: "Used for overriding the GIT URL coming from parameters macro."
+
+    scm:
+        - git-scm
+
+    properties:
+        - logrotate-default
+        - build-blocker:
+            use-build-blocker: true
+            block-level: 'NODE'
+            blocking-jobs:
+                - 'apex-deploy.*'
+        - throttle:
+            max-per-node: 1
+            max-total: 10
+            option: 'project'
+
+    builders:
+        - 'apex-iso-verify'
+
 - job-template:
     name: 'apex-deploy-virtual-{scenario}-{stream}'
 
             same-node: true
         - shell:
             !include-raw-escape: ./apex-snapshot-create.sh
-        - shell:
-            !include-raw-escape: ./apex-upload-artifact.sh
+        - inject:
+           properties-content: ARTIFACT_TYPE=snapshot
+        - 'apex-upload-artifact'
 
 # FDIO promote
 - job-template:
             same-node: true
         - shell:
             !include-raw-escape: ./apex-snapshot-create.sh
-        - shell:
-            !include-raw-escape: ./apex-upload-artifact.sh
+        - inject:
+           properties-content: ARTIFACT_TYPE=snapshot
+        - 'apex-upload-artifact'
 
 - job-template:
     name: 'apex-gs-clean-{stream}'
         - shell:
             !include-raw: ./apex-workspace-cleanup.sh
 
+- builder:
+    name: 'apex-iso-verify'
+    builders:
+        - shell:
+            !include-raw: ./apex-iso-verify.sh
+
+
 - builder:
     name: 'apex-upload-artifact'
     builders:
diff --git a/prototypes/xci/file/exports b/prototypes/xci/file/exports
deleted file mode 100644 (file)
index af64d61..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-# /etc/exports: the access control list for filesystems which may be exported
-#               to NFS clients.  See exports(5).
-#
-# Example for NFSv2 and NFSv3:
-# /srv/homes       hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
-#
-# Example for NFSv4:
-# /srv/nfs4        gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
-# /srv/nfs4/homes  gss/krb5i(rw,sync,no_subtree_check)
-#
-# glance images are stored on compute host and made available to image hosts via nfs
-# see image_hosts section in openstack_user_config.yml for details
-/images         *(rw,sync,no_subtree_check,no_root_squash)
-
index 3cd1d62..167502c 100644 (file)
@@ -1,37 +1,39 @@
 ---
 host_info: {
     'opnfv': {
-        'MGMT_IP': '172.29.236.10',
         'VLAN_IP': '192.168.122.2',
+        'MGMT_IP': '172.29.236.10',
+        'VXLAN_IP': '172.29.240.10',
         'STORAGE_IP': '172.29.244.10'
     },
     'controller00': {
-        'MGMT_IP': '172.29.236.11',
         'VLAN_IP': '192.168.122.3',
+        'MGMT_IP': '172.29.236.11',
+        'VXLAN_IP': '172.29.240.11',
         'STORAGE_IP': '172.29.244.11'
     },
     'controller01': {
-        'MGMT_IP': '172.29.236.12',
         'VLAN_IP': '192.168.122.4',
+        'MGMT_IP': '172.29.236.12',
+        'VXLAN_IP': '172.29.240.12',
         'STORAGE_IP': '172.29.244.12'
     },
     'controller02': {
-        'MGMT_IP': '172.29.236.13',
         'VLAN_IP': '192.168.122.5',
+        'MGMT_IP': '172.29.236.13',
+        'VXLAN_IP': '172.29.240.13',
         'STORAGE_IP': '172.29.244.13'
     },
     'compute00': {
-        'MGMT_IP': '172.29.236.14',
         'VLAN_IP': '192.168.122.6',
-        'STORAGE_IP': '172.29.244.14',
-        'VLAN_IP_SECOND': '173.29.241.1',
-        'VXLAN_IP': '172.29.240.14'
+        'MGMT_IP': '172.29.236.14',
+        'VXLAN_IP': '172.29.240.14',
+        'STORAGE_IP': '172.29.244.14'
     },
     'compute01': {
-        'MGMT_IP': '172.29.236.15',
         'VLAN_IP': '192.168.122.7',
-        'STORAGE_IP': '172.29.244.15',
-        'VLAN_IP_SECOND': '173.29.241.2',
-        'VXLAN_IP': '172.29.240.15'
+        'MGMT_IP': '172.29.236.15',
+        'VXLAN_IP': '172.29.240.15',
+        'STORAGE_IP': '172.29.244.15'
     }
 }
index 0c43702..09fb734 100644 (file)
@@ -138,7 +138,7 @@ image_hosts:
     container_vars:
       limit_container_types: glance
       glance_nfs_client:
-        - server: "172.29.244.15"
+        - server: "172.29.244.14"
           remote_path: "/images"
           local_path: "/var/lib/glance/images"
           type: "nfs"
@@ -148,7 +148,7 @@ image_hosts:
     container_vars:
       limit_container_types: glance
       glance_nfs_client:
-        - server: "172.29.244.15"
+        - server: "172.29.244.14"
           remote_path: "/images"
           local_path: "/var/lib/glance/images"
           type: "nfs"
@@ -158,7 +158,7 @@ image_hosts:
     container_vars:
       limit_container_types: glance
       glance_nfs_client:
-        - server: "172.29.244.15"
+        - server: "172.29.244.14"
           remote_path: "/images"
           local_path: "/var/lib/glance/images"
           type: "nfs"
@@ -218,28 +218,37 @@ storage_hosts:
     container_vars:
       cinder_backends:
         limit_container_types: cinder_volume
-        lvm:
-          volume_group: cinder-volumes
-          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
-          volume_backend_name: LVM_iSCSI
-          iscsi_ip_address: "172.29.244.11"
+        nfs_volume:
+          volume_backend_name: NFS_VOLUME1
+          volume_driver: cinder.volume.drivers.nfs.NfsDriver
+          nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+          nfs_shares_config: /etc/cinder/nfs_shares
+          shares:
+            - ip: "172.29.244.14"
+              share: "/volumes"
   controller01:
     ip: 172.29.236.12
     container_vars:
       cinder_backends:
         limit_container_types: cinder_volume
-        lvm:
-          volume_group: cinder-volumes
-          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
-          volume_backend_name: LVM_iSCSI
-          iscsi_ip_address: "172.29.244.12"
+        nfs_volume:
+          volume_backend_name: NFS_VOLUME1
+          volume_driver: cinder.volume.drivers.nfs.NfsDriver
+          nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+          nfs_shares_config: /etc/cinder/nfs_shares
+          shares:
+            - ip: "172.29.244.14"
+              share: "/volumes"
   controller02:
     ip: 172.29.236.13
     container_vars:
       cinder_backends:
         limit_container_types: cinder_volume
-        lvm:
-          volume_group: cinder-volumes
-          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
-          volume_backend_name: LVM_iSCSI
-          iscsi_ip_address: "172.29.244.13"
+        nfs_volume:
+          volume_backend_name: NFS_VOLUME1
+          volume_driver: cinder.volume.drivers.nfs.NfsDriver
+          nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+          nfs_shares_config: /etc/cinder/nfs_shares
+          shares:
+            - ip: "172.29.244.14"
+              share: "/volumes"
diff --git a/prototypes/xci/file/mini/configure-targethosts.yml b/prototypes/xci/file/mini/configure-targethosts.yml
deleted file mode 100644 (file)
index 395f44a..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
----
-- hosts: all
-  remote_user: root
-  tasks:
-    - name: add public key to host
-      copy:
-        src: ../file/authorized_keys
-        dest: /root/.ssh/authorized_keys
-    - name: configure modules
-      copy:
-        src: ../file/modules
-        dest: /etc/modules
-
-- hosts: controller
-  remote_user: root
-  vars_files:
-    - ../var/{{ ansible_os_family }}.yml
-    - ../var/flavor-vars.yml
-  roles:
-    # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
-    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
-
-- hosts: compute
-  remote_user: root
-  vars_files:
-    - ../var/{{ ansible_os_family }}.yml
-    - ../var/flavor-vars.yml
-  roles:
-    # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
-    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
-  # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros
-    - role: configure-nfs
index 01fba71..0d446ba 100644 (file)
@@ -1,19 +1,20 @@
 ---
 host_info: {
     'opnfv': {
-        'MGMT_IP': '172.29.236.10',
         'VLAN_IP': '192.168.122.2',
+        'MGMT_IP': '172.29.236.10',
+        'VXLAN_IP': '172.29.240.10',
         'STORAGE_IP': '172.29.244.10'
     },
     'controller00': {
-        'MGMT_IP': '172.29.236.11',
         'VLAN_IP': '192.168.122.3',
+        'MGMT_IP': '172.29.236.11',
+        'VXLAN_IP': '172.29.240.11',
         'STORAGE_IP': '172.29.244.11'
     },
     'compute00': {
-        'MGMT_IP': '172.29.236.12',
         'VLAN_IP': '192.168.122.4',
-        'VLAN_IP_SECOND': '173.29.241.1',
+        'MGMT_IP': '172.29.236.12',
         'VXLAN_IP': '172.29.240.12',
         'STORAGE_IP': '172.29.244.12'
     },
index 70429ce..f9ccee2 100644 (file)
@@ -160,8 +160,11 @@ storage_hosts:
     container_vars:
       cinder_backends:
         limit_container_types: cinder_volume
-        lvm:
-          volume_group: cinder-volumes
-          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
-          volume_backend_name: LVM_iSCSI
-          iscsi_ip_address: "172.29.244.11"
+        nfs_volume:
+          volume_backend_name: NFS_VOLUME1
+          volume_driver: cinder.volume.drivers.nfs.NfsDriver
+          nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+          nfs_shares_config: /etc/cinder/nfs_shares
+          shares:
+            - ip: "172.29.244.12"
+              share: "/volumes"
diff --git a/prototypes/xci/file/modules b/prototypes/xci/file/modules
deleted file mode 100644 (file)
index 60a517f..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-# /etc/modules: kernel modules to load at boot time.
-#
-# This file contains the names of kernel modules that should be loaded
-# at boot time, one per line. Lines beginning with "#" are ignored.
-# Parameters can be specified after the module name.
-
-bonding
-8021q
diff --git a/prototypes/xci/file/noha/configure-targethosts.yml b/prototypes/xci/file/noha/configure-targethosts.yml
deleted file mode 100644 (file)
index 6dc147f..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
----
-- hosts: all
-  remote_user: root
-  tasks:
-    - name: add public key to host
-      copy:
-        src: ../file/authorized_keys
-        dest: /root/.ssh/authorized_keys
-    - name: configure modules
-      copy:
-        src: ../file/modules
-        dest: /etc/modules
-
-- hosts: controller
-  remote_user: root
-  vars_files:
-    - ../var/{{ ansible_os_family }}.yml
-    - ../var/flavor-vars.yml
-  roles:
-    # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
-    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
-
-- hosts: compute
-  remote_user: root
-  vars_files:
-    - ../var/{{ ansible_os_family }}.yml
-    - ../var/flavor-vars.yml
-  roles:
-    # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
-    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
-
-- hosts: compute01
-  remote_user: root
-  # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros
-  roles:
-    - role: configure-nfs
index 7f52d34..3c69a34 100644 (file)
@@ -1,26 +1,26 @@
 ---
 host_info: {
     'opnfv': {
-        'MGMT_IP': '172.29.236.10',
         'VLAN_IP': '192.168.122.2',
+        'MGMT_IP': '172.29.236.10',
+        'VXLAN_IP': '172.29.240.10',
         'STORAGE_IP': '172.29.244.10'
     },
     'controller00': {
-        'MGMT_IP': '172.29.236.11',
         'VLAN_IP': '192.168.122.3',
+        'MGMT_IP': '172.29.236.11',
+        'VXLAN_IP': '172.29.240.11',
         'STORAGE_IP': '172.29.244.11'
     },
     'compute00': {
-        'MGMT_IP': '172.29.236.12',
         'VLAN_IP': '192.168.122.4',
-        'VLAN_IP_SECOND': '173.29.241.1',
+        'MGMT_IP': '172.29.236.12',
         'VXLAN_IP': '172.29.240.12',
         'STORAGE_IP': '172.29.244.12'
     },
     'compute01': {
-        'MGMT_IP': '172.29.236.13',
         'VLAN_IP': '192.168.122.5',
-        'VLAN_IP_SECOND': '173.29.241.2',
+        'MGMT_IP': '172.29.236.13',
         'VXLAN_IP': '172.29.240.13',
         'STORAGE_IP': '172.29.244.13'
     }
index 05de6a9..fb12655 100644 (file)
@@ -118,7 +118,7 @@ image_hosts:
     container_vars:
       limit_container_types: glance
       glance_nfs_client:
-        - server: "172.29.244.13"
+        - server: "172.29.244.12"
           remote_path: "/images"
           local_path: "/var/lib/glance/images"
           type: "nfs"
@@ -162,8 +162,11 @@ storage_hosts:
     container_vars:
       cinder_backends:
         limit_container_types: cinder_volume
-        lvm:
-          volume_group: cinder-volumes
-          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
-          volume_backend_name: LVM_iSCSI
-          iscsi_ip_address: "172.29.244.11"
+        nfs_volume:
+          volume_backend_name: NFS_VOLUME1
+          volume_driver: cinder.volume.drivers.nfs.NfsDriver
+          nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+          nfs_shares_config: /etc/cinder/nfs_shares
+          shares:
+            - ip: "172.29.244.12"
+              share: "/volumes"
index 2a55964..34b974c 100644 (file)
         path: "{{LOG_PATH}}"
         state: directory
         recurse: no
-    # when the deployment is not aio, we use playbook, configure-targethosts.yml, to configure all the hosts
-    - name: copy multihost playbook
-      copy:
-        src: "{{XCI_FLAVOR_ANSIBLE_FILE_PATH}}/configure-targethosts.yml"
-        dest: "{{OPNFV_RELENG_PATH}}/prototypes/xci/playbooks"
-      when: XCI_FLAVOR != "aio"
     # when the deployment is aio, we overwrite and use playbook, configure-opnfvhost.yml, since everything gets installed on opnfv host
     - name: copy aio playbook
       copy:
@@ -6,10 +6,6 @@
       copy:
         src: ../file/authorized_keys
         dest: /root/.ssh/authorized_keys
-    - name: configure modules
-      copy:
-        src: ../file/modules
-        dest: /etc/modules
 
 - hosts: controller
   remote_user: root
@@ -18,7 +14,9 @@
     - ../var/flavor-vars.yml
   roles:
     # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
-    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
+    - { role: configure-network, src: "../template/controller.interface.j2", dest: "/etc/network/interfaces" }
+    # we need to force sync time with ntp or the nodes will be out of sync timewise
+    - role: synchronize-time
 
 - hosts: compute
   remote_user: root
     - ../var/flavor-vars.yml
   roles:
     # TODO: this only works for ubuntu/xenial and need to be adjusted for other distros
-    - { role: configure-network, when: ansible_distribution_release == "xenial", src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
+    - { role: configure-network, src: "../template/compute.interface.j2", dest: "/etc/network/interfaces" }
+    # we need to force sync time with ntp or the nodes will be out of sync timewise
+    - role: synchronize-time
 
-- hosts: compute01
+- hosts: compute00
   remote_user: root
   # TODO: this role is for configuring NFS on xenial and adjustment needed for other distros
   roles:
index 8bc8482..aafadf7 100644 (file)
@@ -8,9 +8,27 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 # TODO: this role needs to be adjusted for different distros
-- name: configure network for {{ ansible_os_family }} on interface {{ interface }}
-  template:
-    src: "{{ src }}"
-    dest: "{{ dest }}"
-- name: restart ubuntu xenial network service
-  shell: "/sbin/ifconfig {{ interface }} 0 &&/sbin/ifdown -a && /sbin/ifup -a"
+- block:
+    - name: configure modules
+      lineinfile:
+        dest: /etc/modules
+        state: present
+        create: yes
+        line: "8021q"
+    - name: add modules
+      modprobe:
+        name: 8021q
+        state: present
+    - name: ensure glean rules are removed
+      file:
+        path: "/etc/udev/rules.d/99-glean.rules"
+        state: absent
+    - name: ensure interfaces.d folder is empty
+      shell: "/bin/rm -rf /etc/network/interfaces.d/*"
+    - name: ensure interfaces file is updated
+      template:
+        src: "{{ src }}"
+        dest: "{{ dest }}"
+    - name: restart network service
+      shell: "/sbin/ifconfig {{ interface }} 0 && /sbin/ifdown -a && /sbin/ifup -a"
+  when: ansible_distribution_release == "xenial"
index 66dd0af..c52da0b 100644 (file)
@@ -9,11 +9,14 @@
 ##############################################################################
 # TODO: this is for xenial and needs to be adjusted for different distros
 - block:
-    - name: make NFS dir
+    - name: make NFS directories
       file:
-        dest: /images
+        dest: "{{ item }}"
         mode: 0777
         state: directory
+      with_items:
+        - "/images"
+        - "/volumes"
     - name: configure NFS service
       lineinfile:
         dest: /etc/services
       with_items:
         - "nfs        2049/tcp"
         - "nfs        2049/udp"
-    - name: configure NFS exports on ubuntu xenial
-      copy:
-        src: ../file/exports
+    - name: configure NFS exports
+      lineinfile:
         dest: /etc/exports
-      when: ansible_distribution_release == "xenial"
+        state: present
+        create: yes
+        line: "{{ item }}"
+      with_items:
+        - "/images         *(rw,sync,no_subtree_check,no_root_squash)"
+        - "/volumes        *(rw,sync,no_subtree_check,no_root_squash)"
     # TODO: the service name might be different on other distros and needs to be adjusted
     - name: restart ubuntu xenial NFS service
       service:
diff --git a/prototypes/xci/playbooks/roles/synchronize-time/tasks/main.yml b/prototypes/xci/playbooks/roles/synchronize-time/tasks/main.yml
new file mode 100644 (file)
index 0000000..5c39d89
--- /dev/null
@@ -0,0 +1,18 @@
+---
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2017 Ericsson AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# TODO: this role needs to be adjusted for different distros
+- block:
+    - name: restart chrony
+      service:
+        name: chrony
+        state: restarted
+    - name: synchronize time
+      shell: "chronyc -a 'burst 4/4' && chronyc -a makestep"
+  when: ansible_distribution_release == "xenial"
index 0c5147c..094544c 100644 (file)
@@ -1,11 +1,7 @@
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
 # The loopback network interface
 auto lo
 iface lo inet loopback
 
-
 # Physical interface
 auto {{ interface }}
 iface {{ interface }} inet manual
@@ -20,7 +16,7 @@ auto {{ interface }}.30
 iface {{ interface }}.30 inet manual
     vlan-raw-device {{ interface }}
 
-# Storage network VLAN interface (optional)
+# Storage network VLAN interface
 auto {{ interface }}.20
 iface {{ interface }}.20 inet manual
     vlan-raw-device {{ interface }}
@@ -55,6 +51,7 @@ iface br-vlan inet static
     address {{host_info[inventory_hostname].VLAN_IP}}
     netmask 255.255.255.0
     gateway 192.168.122.1
+    dns-nameserver 8.8.8.8 8.8.4.4
     offload-sg off
     # Create veth pair, don't bomb if already exists
     pre-up ip link add br-vlan-veth type veth peer name eth12 || true
@@ -65,17 +62,7 @@ iface br-vlan inet static
     post-down ip link del br-vlan-veth || true
     bridge_ports br-vlan-veth
 
-# Add an additional address to br-vlan
-iface br-vlan inet static
-    # Flat network default gateway
-    # -- This needs to exist somewhere for network reachability
-    # -- from the router namespace for floating IP paths.
-    # -- Putting this here is primarily for tempest to work.
-    address {{host_info[inventory_hostname].VLAN_IP_SECOND}}
-    netmask 255.255.252.0
-    dns-nameserver 8.8.8.8 8.8.4.4
-
-# compute1 Storage bridge
+# OpenStack Storage bridge
 auto br-storage
 iface br-storage inet static
     bridge_stp off
index fbaa8b8..638e78e 100644 (file)
@@ -1,6 +1,3 @@
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
 # The loopback network interface
 auto lo
 iface lo inet loopback
@@ -35,18 +32,14 @@ iface br-mgmt inet static
     netmask 255.255.252.0
 
 # OpenStack Networking VXLAN (tunnel/overlay) bridge
-#
-# Only the COMPUTE and NETWORK nodes must have an IP address
-# on this bridge. When used by infrastructure nodes, the
-# IP addresses are assigned to containers which use this
-# bridge.
-#
 auto br-vxlan
-iface br-vxlan inet manual
+iface br-vxlan inet static
     bridge_stp off
     bridge_waitport 0
     bridge_fd 0
     bridge_ports {{ interface }}.30
+    address {{host_info[inventory_hostname].VXLAN_IP}}
+    netmask 255.255.252.0
 
 # OpenStack Networking VLAN bridge
 auto br-vlan
@@ -60,7 +53,7 @@ iface br-vlan inet static
     gateway 192.168.122.1
     dns-nameserver 8.8.8.8 8.8.4.4
 
-# compute1 Storage bridge
+# OpenStack Storage bridge
 auto br-storage
 iface br-storage inet static
     bridge_stp off
index fbaa8b8..e9f8649 100644 (file)
@@ -1,6 +1,3 @@
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
 # The loopback network interface
 auto lo
 iface lo inet loopback
@@ -35,18 +32,14 @@ iface br-mgmt inet static
     netmask 255.255.252.0
 
 # OpenStack Networking VXLAN (tunnel/overlay) bridge
-#
-# Only the COMPUTE and NETWORK nodes must have an IP address
-# on this bridge. When used by infrastructure nodes, the
-# IP addresses are assigned to containers which use this
-# bridge.
-#
 auto br-vxlan
-iface br-vxlan inet manual
+iface br-vxlan inet static
     bridge_stp off
     bridge_waitport 0
     bridge_fd 0
     bridge_ports {{ interface }}.30
+    address {{ host_info[inventory_hostname].VXLAN_IP }}
+    netmask 255.255.252.0
 
 # OpenStack Networking VLAN bridge
 auto br-vlan
@@ -60,7 +53,7 @@ iface br-vlan inet static
     gateway 192.168.122.1
     dns-nameserver 8.8.8.8 8.8.4.4
 
-# compute1 Storage bridge
+# OpenStack Storage bridge
 auto br-storage
 iface br-storage inet static
     bridge_stp off
diff --git a/utils/test/testapi/opnfv_testapi/common/check.py b/utils/test/testapi/opnfv_testapi/common/check.py
new file mode 100644 (file)
index 0000000..be4b1df
--- /dev/null
@@ -0,0 +1,111 @@
+##############################################################################
+# Copyright (c) 2017 ZTE Corp
+# feng.xiaowei@zte.com.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import functools
+
+from tornado import web, gen
+
+from opnfv_testapi.common import raises, message
+
+
+def authenticate(method):
+    @web.asynchronous
+    @gen.coroutine
+    @functools.wraps(method)
+    def wrapper(self, *args, **kwargs):
+        if self.auth:
+            try:
+                token = self.request.headers['X-Auth-Token']
+            except KeyError:
+                raises.Unauthorized(message.unauthorized())
+            query = {'access_token': token}
+            check = yield self._eval_db_find_one(query, 'tokens')
+            if not check:
+                raises.Forbidden(message.invalid_token())
+        ret = yield gen.coroutine(method)(self, *args, **kwargs)
+        raise gen.Return(ret)
+    return wrapper
+
+
+def not_exist(xstep):
+    @functools.wraps(xstep)
+    def wrap(self, *args, **kwargs):
+        query = kwargs.get('query')
+        data = yield self._eval_db_find_one(query)
+        if not data:
+            raises.NotFound(message.not_found(self.table, query))
+        ret = yield gen.coroutine(xstep)(self, data, *args, **kwargs)
+        raise gen.Return(ret)
+
+    return wrap
+
+
+def no_body(xstep):
+    @functools.wraps(xstep)
+    def wrap(self, *args, **kwargs):
+        if self.json_args is None:
+            raises.BadRequest(message.no_body())
+        ret = yield gen.coroutine(xstep)(self, *args, **kwargs)
+        raise gen.Return(ret)
+
+    return wrap
+
+
+def miss_fields(xstep):
+    @functools.wraps(xstep)
+    def wrap(self, *args, **kwargs):
+        fields = kwargs.get('miss_fields')
+        if fields:
+            for miss in fields:
+                miss_data = self.json_args.get(miss)
+                if miss_data is None or miss_data == '':
+                    raises.BadRequest(message.missing(miss))
+        ret = yield gen.coroutine(xstep)(self, *args, **kwargs)
+        raise gen.Return(ret)
+    return wrap
+
+
+def carriers_exist(xstep):
+    @functools.wraps(xstep)
+    def wrap(self, *args, **kwargs):
+        carriers = kwargs.get('carriers')
+        if carriers:
+            for table, query in carriers:
+                exist = yield self._eval_db_find_one(query(), table)
+                if not exist:
+                    raises.Forbidden(message.not_found(table, query()))
+        ret = yield gen.coroutine(xstep)(self, *args, **kwargs)
+        raise gen.Return(ret)
+    return wrap
+
+
+def new_not_exists(xstep):
+    @functools.wraps(xstep)
+    def wrap(self, *args, **kwargs):
+        query = kwargs.get('query')
+        if query:
+            to_data = yield self._eval_db_find_one(query())
+            if to_data:
+                raises.Forbidden(message.exist(self.table, query()))
+        ret = yield gen.coroutine(xstep)(self, *args, **kwargs)
+        raise gen.Return(ret)
+    return wrap
+
+
+def updated_one_not_exist(xstep):
+    @functools.wraps(xstep)
+    def wrap(self, data, *args, **kwargs):
+        db_keys = kwargs.get('db_keys')
+        query = self._update_query(db_keys, data)
+        if query:
+            to_data = yield self._eval_db_find_one(query)
+            if to_data:
+                raises.Forbidden(message.exist(self.table, query))
+        ret = yield gen.coroutine(xstep)(self, data, *args, **kwargs)
+        raise gen.Return(ret)
+    return wrap
index 522bbe7..955fbbe 100644 (file)
 ##############################################################################
 
 from datetime import datetime
-import functools
 import json
 
 from tornado import gen
 from tornado import web
 
 import models
+from opnfv_testapi.common import check
 from opnfv_testapi.common import message
 from opnfv_testapi.common import raises
 from opnfv_testapi.tornado_swagger import swagger
@@ -73,48 +73,20 @@ class GenericApiHandler(web.RequestHandler):
         cls_data = self.table_cls.from_dict(data)
         return cls_data.format_http()
 
-    def authenticate(method):
-        @web.asynchronous
-        @gen.coroutine
-        @functools.wraps(method)
-        def wrapper(self, *args, **kwargs):
-            if self.auth:
-                try:
-                    token = self.request.headers['X-Auth-Token']
-                except KeyError:
-                    raises.Unauthorized(message.unauthorized())
-                query = {'access_token': token}
-                check = yield self._eval_db_find_one(query, 'tokens')
-                if not check:
-                    raises.Forbidden(message.invalid_token())
-            ret = yield gen.coroutine(method)(self, *args, **kwargs)
-            raise gen.Return(ret)
-        return wrapper
-
-    @authenticate
-    def _create(self, miss_checks, db_checks, **kwargs):
+    @check.authenticate
+    @check.no_body
+    @check.miss_fields
+    @check.carriers_exist
+    @check.new_not_exists
+    def _create(self, **kwargs):
         """
         :param miss_checks: [miss1, miss2]
         :param db_checks: [(table, exist, query, error)]
         """
-        if self.json_args is None:
-            raises.BadRequest(message.no_body())
-
         data = self.table_cls.from_dict(self.json_args)
-        for miss in miss_checks:
-            miss_data = data.__getattribute__(miss)
-            if miss_data is None or miss_data == '':
-                raises.BadRequest(message.missing(miss))
-
         for k, v in kwargs.iteritems():
             data.__setattr__(k, v)
 
-        for table, exist, query, error in db_checks:
-            check = yield self._eval_db_find_one(query(data), table)
-            if (exist and not check) or (not exist and check):
-                code, msg = error(data)
-                raises.CodeTBD(code, msg)
-
         if self.table != 'results':
             data.creation_date = datetime.now()
         _id = yield self._eval_db(self.table, 'insert', data.format(),
@@ -146,47 +118,27 @@ class GenericApiHandler(web.RequestHandler):
 
     @web.asynchronous
     @gen.coroutine
-    def _get_one(self, query):
-        data = yield self._eval_db_find_one(query)
-        if data is None:
-            raises.NotFound(message.not_found(self.table, query))
+    @check.not_exist
+    def _get_one(self, data, query=None):
         self.finish_request(self.format_data(data))
 
-    @authenticate
-    def _delete(self, query):
-        data = yield self._eval_db_find_one(query)
-        if data is None:
-            raises.NotFound(message.not_found(self.table, query))
-
+    @check.authenticate
+    @check.not_exist
+    def _delete(self, data, query=None):
         yield self._eval_db(self.table, 'remove', query)
         self.finish_request()
 
-    @authenticate
-    def _update(self, query, db_keys):
-        if self.json_args is None:
-            raises.BadRequest(message.no_body())
-
-        # check old data exist
-        from_data = yield self._eval_db_find_one(query)
-        if from_data is None:
-            raises.NotFound(message.not_found(self.table, query))
-
-        data = self.table_cls.from_dict(from_data)
-        # check new data exist
-        equal, new_query = self._update_query(db_keys, data)
-        if not equal:
-            to_data = yield self._eval_db_find_one(new_query)
-            if to_data is not None:
-                raises.Forbidden(message.exist(self.table, new_query))
-
-        # we merge the whole document """
-        edit_request = self._update_requests(data)
-
-        """ Updating the DB """
-        yield self._eval_db(self.table, 'update', query, edit_request,
+    @check.authenticate
+    @check.no_body
+    @check.not_exist
+    @check.updated_one_not_exist
+    def _update(self, data, query=None, **kwargs):
+        data = self.table_cls.from_dict(data)
+        update_req = self._update_requests(data)
+        yield self._eval_db(self.table, 'update', query, update_req,
                             check_keys=False)
-        edit_request['_id'] = str(data._id)
-        self.finish_request(edit_request)
+        update_req['_id'] = str(data._id)
+        self.finish_request(update_req)
 
     def _update_requests(self, data):
         request = dict()
@@ -219,13 +171,13 @@ class GenericApiHandler(web.RequestHandler):
         equal = True
         for key in keys:
             new = self.json_args.get(key)
-            old = data.__getattribute__(key)
+            old = data.get(key)
             if new is None:
                 new = old
             elif new != old:
                 equal = False
             query[key] = new
-        return equal, query
+        return query if not equal else dict()
 
     def _eval_db(self, table, method, *args, **kwargs):
         exec_collection = self.db.__getattr__(table)
index 2c303c9..e21841d 100644 (file)
@@ -6,10 +6,7 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-import httplib
-
 import handlers
-from opnfv_testapi.common import message
 from opnfv_testapi.tornado_swagger import swagger
 import pod_models
 
@@ -43,15 +40,10 @@ class PodCLHandler(GenericPodHandler):
             @raise 403: pod already exists
             @raise 400: body or name not provided
         """
-        def query(data):
-            return {'name': data.name}
-
-        def error(data):
-            return httplib.FORBIDDEN, message.exist('pod', data.name)
-
-        miss_checks = ['name']
-        db_checks = [(self.table, False, query, error)]
-        self._create(miss_checks, db_checks)
+        def query():
+            return {'name': self.json_args.get('name')}
+        miss_fields = ['name']
+        self._create(miss_fields=miss_fields, query=query)
 
 
 class PodGURHandler(GenericPodHandler):
@@ -63,9 +55,7 @@ class PodGURHandler(GenericPodHandler):
             @return 200: pod exist
             @raise 404: pod not exist
         """
-        query = dict()
-        query['name'] = pod_name
-        self._get_one(query)
+        self._get_one(query={'name': pod_name})
 
     def delete(self, pod_name):
         """ Remove a POD
index 59e0b88..d79cd3b 100644 (file)
@@ -6,10 +6,8 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-import httplib
 
 import handlers
-from opnfv_testapi.common import message
 from opnfv_testapi.tornado_swagger import swagger
 import project_models
 
@@ -45,15 +43,10 @@ class ProjectCLHandler(GenericProjectHandler):
             @raise 403: project already exists
             @raise 400:  body or name not provided
         """
-        def query(data):
-            return {'name': data.name}
-
-        def error(data):
-            return httplib.FORBIDDEN, message.exist('project', data.name)
-
-        miss_checks = ['name']
-        db_checks = [(self.table, False, query, error)]
-        self._create(miss_checks, db_checks)
+        def query():
+            return {'name': self.json_args.get('name')}
+        miss_fields = ['name']
+        self._create(miss_fields=miss_fields, query=query)
 
 
 class ProjectGURHandler(GenericProjectHandler):
@@ -65,7 +58,7 @@ class ProjectGURHandler(GenericProjectHandler):
             @return 200: project exist
             @raise 404: project not exist
         """
-        self._get_one({'name': project_name})
+        self._get_one(query={'name': project_name})
 
     @swagger.operation(nickname="updateProjectByName")
     def put(self, project_name):
@@ -81,7 +74,7 @@ class ProjectGURHandler(GenericProjectHandler):
         """
         query = {'name': project_name}
         db_keys = ['name']
-        self._update(querydb_keys)
+        self._update(query=query, db_keys=db_keys)
 
     @swagger.operation(nickname='deleteProjectByName')
     def delete(self, project_name):
@@ -90,4 +83,4 @@ class ProjectGURHandler(GenericProjectHandler):
             @return 200: delete success
             @raise 404: project not exist
         """
-        self._delete({'name': project_name})
+        self._delete(query={'name': project_name})
index fb5ed9e..214706f 100644 (file)
@@ -8,7 +8,6 @@
 ##############################################################################
 from datetime import datetime
 from datetime import timedelta
-import httplib
 
 from bson import objectid
 
@@ -127,7 +126,9 @@ class ResultsCLHandler(GenericResultHandler):
         if last is not None:
             last = self.get_int('last', last)
 
-        self._list(self.set_query(), sort=[('start_date', -1)], last=last)
+        self._list(query=self.set_query(),
+                   sort=[('start_date', -1)],
+                   last=last)
 
     @swagger.operation(nickname="createTestResult")
     def post(self):
@@ -141,31 +142,21 @@ class ResultsCLHandler(GenericResultHandler):
             @raise 404: pod/project/testcase not exist
             @raise 400: body/pod_name/project_name/case_name not provided
         """
-        def pod_query(data):
-            return {'name': data.pod_name}
+        def pod_query():
+            return {'name': self.json_args.get('pod_name')}
 
-        def pod_error(data):
-            return httplib.FORBIDDEN, message.not_found('pod', data.pod_name)
+        def project_query():
+            return {'name': self.json_args.get('project_name')}
 
-        def project_query(data):
-            return {'name': data.project_name}
+        def testcase_query():
+            return {'project_name': self.json_args.get('project_name'),
+                    'name': self.json_args.get('case_name')}
 
-        def project_error(data):
-            return httplib.FORBIDDEN, message.not_found('project',
-                                                        data.project_name)
-
-        def testcase_query(data):
-            return {'project_name': data.project_name, 'name': data.case_name}
-
-        def testcase_error(data):
-            return httplib.FORBIDDEN, message.not_found('testcase',
-                                                        data.case_name)
-
-        miss_checks = ['pod_name', 'project_name', 'case_name']
-        db_checks = [('pods', True, pod_query, pod_error),
-                     ('projects', True, project_query, project_error),
-                     ('testcases', True, testcase_query, testcase_error)]
-        self._create(miss_checks, db_checks)
+        miss_fields = ['pod_name', 'project_name', 'case_name']
+        carriers = [('pods', pod_query),
+                    ('projects', project_query),
+                    ('testcases', testcase_query)]
+        self._create(miss_fields=miss_fields, carriers=carriers)
 
 
 class ResultsGURHandler(GenericResultHandler):
@@ -179,7 +170,7 @@ class ResultsGURHandler(GenericResultHandler):
         """
         query = dict()
         query["_id"] = objectid.ObjectId(result_id)
-        self._get_one(query)
+        self._get_one(query=query)
 
     @swagger.operation(nickname="updateTestResultById")
     def put(self, result_id):
@@ -195,4 +186,4 @@ class ResultsGURHandler(GenericResultHandler):
         """
         query = {'_id': objectid.ObjectId(result_id)}
         db_keys = []
-        self._update(querydb_keys)
+        self._update(query=query, db_keys=db_keys)
index bad79fd..5d420a5 100644 (file)
@@ -1,5 +1,4 @@
 import functools
-import httplib
 
 from opnfv_testapi.common import message
 from opnfv_testapi.common import raises
@@ -65,7 +64,7 @@ class ScenariosCLHandler(GenericScenarioHandler):
                 query['installers'] = {'$elemMatch': elem_query}
             return query
 
-        self._list(_set_query())
+        self._list(query=_set_query())
 
     @swagger.operation(nickname="createScenario")
     def post(self):
@@ -79,15 +78,10 @@ class ScenariosCLHandler(GenericScenarioHandler):
             @raise 403: scenario already exists
             @raise 400:  body or name not provided
         """
-        def query(data):
-            return {'name': data.name}
-
-        def error(data):
-            return httplib.FORBIDDEN, message.exist('scenario', data.name)
-
-        miss_checks = ['name']
-        db_checks = [(self.table, False, query, error)]
-        self._create(miss_checks=miss_checks, db_checks=db_checks)
+        def query():
+            return {'name': self.json_args.get('name')}
+        miss_fields = ['name']
+        self._create(miss_fields=miss_fields, query=query)
 
 
 class ScenarioGURHandler(GenericScenarioHandler):
@@ -99,7 +93,7 @@ class ScenarioGURHandler(GenericScenarioHandler):
             @return 200: scenario exist
             @raise 404: scenario not exist
         """
-        self._get_one({'name': name})
+        self._get_one(query={'name': name})
         pass
 
     @swagger.operation(nickname="updateScenarioByName")
@@ -116,7 +110,7 @@ class ScenarioGURHandler(GenericScenarioHandler):
         """
         query = {'name': name}
         db_keys = ['name']
-        self._update(querydb_keys)
+        self._update(query=query, db_keys=db_keys)
 
     @swagger.operation(nickname="deleteScenarioByName")
     def delete(self, name):
@@ -126,19 +120,16 @@ class ScenarioGURHandler(GenericScenarioHandler):
         @raise 404: scenario not exist:
         """
 
-        query = {'name': name}
-        self._delete(query)
+        self._delete(query={'name': name})
 
     def _update_query(self, keys, data):
         query = dict()
-        equal = True
         if self._is_rename():
             new = self._term.get('name')
-            if data.name != new:
-                equal = False
+            if data.get('name') != new:
                 query['name'] = new
 
-        return equal, query
+        return query
 
     def _update_requests(self, data):
         updates = {
index bc22b74..9399326 100644 (file)
@@ -6,9 +6,7 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-import httplib
 
-from opnfv_testapi.common import message
 from opnfv_testapi.resources import handlers
 from opnfv_testapi.resources import testcase_models
 from opnfv_testapi.tornado_swagger import swagger
@@ -32,9 +30,7 @@ class TestcaseCLHandler(GenericTestcaseHandler):
                          empty list is no testcase exist in this project
             @rtype: L{TestCases}
         """
-        query = dict()
-        query['project_name'] = project_name
-        self._list(query)
+        self._list(query={'project_name': project_name})
 
     @swagger.operation(nickname="createTestCase")
     def post(self, project_name):
@@ -49,26 +45,18 @@ class TestcaseCLHandler(GenericTestcaseHandler):
                         or testcase already exists in this project
             @raise 400: body or name not provided
         """
-        def p_query(data):
-            return {'name': data.project_name}
-
-        def tc_query(data):
-            return {
-                'project_name': data.project_name,
-                'name': data.name
-            }
-
-        def p_error(data):
-            return httplib.FORBIDDEN, message.not_found('project',
-                                                        data.project_name)
-
-        def tc_error(data):
-            return httplib.FORBIDDEN, message.exist('testcase', data.name)
+        def project_query():
+            return {'name': project_name}
 
-        miss_checks = ['name']
-        db_checks = [(self.db_projects, True, p_query, p_error),
-                     (self.db_testcases, False, tc_query, tc_error)]
-        self._create(miss_checks, db_checks, project_name=project_name)
+        def testcase_query():
+            return {'project_name': project_name,
+                    'name': self.json_args.get('name')}
+        miss_fields = ['name']
+        carriers = [(self.db_projects, project_query)]
+        self._create(miss_fields=miss_fields,
+                     carriers=carriers,
+                     query=testcase_query,
+                     project_name=project_name)
 
 
 class TestcaseGURHandler(GenericTestcaseHandler):
@@ -84,7 +72,7 @@ class TestcaseGURHandler(GenericTestcaseHandler):
         query = dict()
         query['project_name'] = project_name
         query["name"] = case_name
-        self._get_one(query)
+        self._get_one(query=query)
 
     @swagger.operation(nickname="updateTestCaseByName")
     def put(self, project_name, case_name):
@@ -102,7 +90,7 @@ class TestcaseGURHandler(GenericTestcaseHandler):
         """
         query = {'project_name': project_name, 'name': case_name}
         db_keys = ['name', 'project_name']
-        self._update(querydb_keys)
+        self._update(query=query, db_keys=db_keys)
 
     @swagger.operation(nickname='deleteTestCaseByName')
     def delete(self, project_name, case_name):
@@ -112,4 +100,4 @@ class TestcaseGURHandler(GenericTestcaseHandler):
             @raise 404: testcase not exist
         """
         query = {'project_name': project_name, 'name': case_name}
-        self._delete(query)
+        self._delete(query=query)