Merge "[release] Yardstick release note 6.1.0"
authorRoss Brattain <ross.b.brattain@intel.com>
Fri, 25 May 2018 21:13:35 +0000 (21:13 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Fri, 25 May 2018 21:13:35 +0000 (21:13 +0000)
api/resources/v2/openrcs.py
api/server.py
docker/Dockerfile
docker/Dockerfile.aarch64.patch
docs/testing/user/userguide/15-list-of-tcs.rst
docs/testing/user/userguide/opnfv_yardstick_tc092.rst [new file with mode: 0644]
gui/bower.json
yardstick/common/constants.py
yardstick/common/exceptions.py

index cb506d0..4706b85 100644 (file)
@@ -21,6 +21,7 @@ from yardstick.common import constants as consts
 from yardstick.common.utils import result_handler
 from yardstick.common.utils import makedirs
 from yardstick.common.utils import source_env
+from yardstick.common import exceptions as y_exc
 
 LOG = logging.getLogger(__name__)
 LOG.setLevel(logging.DEBUG)
@@ -57,7 +58,7 @@ class V2Openrcs(ApiResource):
             openrc_data = self._get_openrc_dict()
         except Exception:
             LOG.exception('parse openrc failed')
-            return result_handler(consts.API_ERROR, 'parse openrc failed')
+            raise y_exc.UploadOpenrcError()
 
         openrc_id = str(uuid.uuid4())
         self._write_into_database(environment_id, openrc_id, openrc_data)
@@ -67,7 +68,7 @@ class V2Openrcs(ApiResource):
             self._generate_ansible_conf_file(openrc_data)
         except Exception:
             LOG.exception('write cloud conf failed')
-            return result_handler(consts.API_ERROR, 'genarate ansible conf failed')
+            raise y_exc.UploadOpenrcError()
         LOG.info('finish writing ansible cloud conf')
 
         return result_handler(consts.API_SUCCESS, {'openrc': openrc_data, 'uuid': openrc_id})
@@ -102,7 +103,7 @@ class V2Openrcs(ApiResource):
             source_env(consts.OPENRC)
         except Exception:
             LOG.exception('source openrc failed')
-            return result_handler(consts.API_ERROR, 'source openrc failed')
+            raise y_exc.UpdateOpenrcError()
         LOG.info('source openrc: Done')
 
         openrc_id = str(uuid.uuid4())
@@ -113,7 +114,7 @@ class V2Openrcs(ApiResource):
             self._generate_ansible_conf_file(openrc_vars)
         except Exception:
             LOG.exception('write cloud conf failed')
-            return result_handler(consts.API_ERROR, 'genarate ansible conf failed')
+            raise y_exc.UpdateOpenrcError()
         LOG.info('finish writing ansible cloud conf')
 
         return result_handler(consts.API_SUCCESS, {'openrc': openrc_vars, 'uuid': openrc_id})
@@ -174,7 +175,7 @@ class V2Openrcs(ApiResource):
 
         makedirs(consts.OPENSTACK_CONF_DIR)
         with open(consts.CLOUDS_CONF, 'w') as f:
-            yaml.dump(ansible_conf, f, default_flow_style=False)
+            yaml.safe_dump(ansible_conf, f, default_flow_style=False)
 
 
 class V2Openrc(ApiResource):
index 37a1ab6..914fe84 100644 (file)
@@ -39,11 +39,13 @@ app.config['MAX_CONTENT_LENGTH'] = 2 * 1024 * 1024 * 1024
 
 Swagger(app)
 
-api = Api(app)
+api = Api(app, errors=consts.API_ERRORS)
 
 
 @app.teardown_request
 def shutdown_session(exception=None):
+    if exception:
+        LOG.warning(exception.message)
     db_session.remove()
 
 
index 62ea0d0..7f85cbd 100644 (file)
@@ -24,7 +24,7 @@ ENV YARDSTICK_REPO_DIR="${REPOS_DIR}/yardstick/" \
     RELENG_REPO_DIR="${REPOS_DIR}/releng" \
     STORPERF_REPO_DIR="${REPOS_DIR}/storperf"
 
-RUN apt-get update && apt-get install -y git python python-setuptools python-pip && apt-get -y autoremove && apt-get clean
+RUN apt-get update && apt-get install -y git python python-setuptools python-pip iputils-ping && apt-get -y autoremove && apt-get clean
 RUN easy_install -U setuptools==30.0.0
 RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0 ansible==2.4.2
 
index 6c73812..21095cb 100644 (file)
@@ -27,8 +27,8 @@ index 62ea0d0..f2f41771 100644
      RELENG_REPO_DIR="${REPOS_DIR}/releng" \
      STORPERF_REPO_DIR="${REPOS_DIR}/storperf"
 
--RUN apt-get update && apt-get install -y git python python-setuptools python-pip && apt-get -y autoremove && apt-get clean
-+RUN apt-get update && apt-get install -y git python python-setuptools python-pip && apt-get -y autoremove && \
+-RUN apt-get update && apt-get install -y git python python-setuptools python-pip iputils-ping && apt-get -y autoremove && apt-get clean
++RUN apt-get update && apt-get install -y git python python-setuptools python-pip iputils-ping && apt-get -y autoremove && \
 +    apt-get install -y libssl-dev && apt-get -y install libffi-dev && apt-get clean
  RUN easy_install -U setuptools==30.0.0
  RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0 ansible==2.4.2
index 190df07..37ce819 100644 (file)
@@ -84,6 +84,7 @@ H A
    opnfv_yardstick_tc057.rst
    opnfv_yardstick_tc058.rst
    opnfv_yardstick_tc087.rst
+   opnfv_yardstick_tc092.rst
    opnfv_yardstick_tc093.rst
 
 IPv6
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc092.rst b/docs/testing/user/userguide/opnfv_yardstick_tc092.rst
new file mode 100644 (file)
index 0000000..895074a
--- /dev/null
@@ -0,0 +1,196 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Ericsson and others.
+
+*************************************
+Yardstick Test Case Description TC092
+*************************************
+
++-----------------------------------------------------------------------------+
+|SDN Controller resilience in HA configuration                                |
+|                                                                             |
++--------------+--------------------------------------------------------------+
+|test case id  | OPNFV_YARDSTICK_TC092: SDN controller resilience and high    |
+|              | availability HA configuration                                |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test purpose  | This test validates SDN controller node high availability by |
+|              | verifying there is no impact on the data plane connectivity  |
+|              | when one SDN controller fails in a HA configuration,         |
+|              | i.e. all existing configured network services DHCP, ARP, L2, |
+|              | L3VPN, Security Groups should continue to operate            |
+|              | between the existing VMs while one SDN controller instance   |
+|              | is offline and rebooting.                                    |
+|              |                                                              |
+|              | The test also validates that network service operations such |
+|              | as creating a new VM in an existing or new L2 network        |
+|              | network remain operational while one instance of the         |
+|              | SDN controller is offline and recovers from the failure.     |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test method   | This test case:                                              |
+|              |  1. fails one instance of a SDN controller cluster running   |
+|              |     in a HA configuration on the OpenStack controller node   |
+|              |                                                              |
+|              |  2. checks if already configured L2 connectivity between     |
+|              |     existing VMs is not impacted                             |
+|              |                                                              |
+|              |  3. verifies that the system never loses the ability to      |
+|              |     execute virtual network operations, even when the        |
+|              |     failed SDN Controller is still recovering                |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|attackers     | In this test case, an attacker called “kill-process” is      |
+|              | needed. This attacker includes three parameters:             |
+|              |  1. ``fault_type``: which is used for finding the attacker's |
+|              |     scripts. It should be set to 'kill-process' in this test |
+|              |                                                              |
+|              |  2. ``process_name``: should be set to sdn controller        |
+|              |     process                                                  |
+|              |                                                              |
+|              |  3. ``host``: which is the name of a control node where      |
+|              |     opendaylight process is running                          |
+|              |                                                              |
+|              | example:                                                     |
+|              |   - ``fault_type``: “kill-process”                           |
+|              |   - ``process_name``: “opendaylight-karaf” (TBD)             |
+|              |   - ``host``: node1                                          |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|monitors      | In this test case, the following monitors are needed         |
+|              |  1. ``ping_same_network_l2``: monitor pinging traffic        |
+|              |     between the VMs in same neutron network                  |
+|              |                                                              |
+|              |  2. ``ping_external_snat``: monitor ping traffic from VMs to |
+|              |     external destinations (e.g. google.com)                  |
+|              |                                                              |
+|              |  3. ``SDN controller process monitor``: a monitor checking   |
+|              |     the state of a specified SDN controller process. It      |
+|              |     measures the recovery time of the given process.         |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|operations    | In this test case, the following operations are needed:      |
+|              |  1. "nova-create-instance-in_network": create a VM instance  |
+|              |     in one of the existing neutron network.                  |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|metrics       | In this test case, there are two metrics:                    |
+|              |  1. process_recover_time: which indicates the maximun        |
+|              |     time (seconds) from the process being killed to          |
+|              |     recovered                                                |
+|              |                                                              |
+|              |  2. packet_drop: measure the packets that have been dropped  |
+|              |     by the monitors using pktgen.                            |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test tool     | Developed by the project. Please see folder:                 |
+|              | "yardstick/benchmark/scenarios/availability/ha_tools"        |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|references    | TBD                                                          |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|configuration | This test case needs two configuration files:                |
+|              |  1. test case file: opnfv_yardstick_tc092.yaml               |
+|              |     - Attackers: see above “attackers” discription           |
+|              |     - Monitors: see above “monitors” discription             |
+|              |       - waiting_time: which is the time (seconds) from the   |
+|              |         process being killed to stoping monitors the         |
+|              |         monitors                                             |
+|              |     - SLA: see above “metrics” discription                   |
+|              |                                                              |
+|              |  2. POD file: pod.yaml The POD configuration should record   |
+|              |     on pod.yaml first. the “host” item in this test case     |
+|              |     will use the node name in the pod.yaml.                  |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test sequence | Description and expected result                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|pre-action    |  1. The OpenStack cluster is set up with an SDN controller   |
+|              |     running in a three node cluster configuration.           |
+|              |                                                              |
+|              |  2. One or more neutron networks are created with two or     |
+|              |     more VMs attached to each of the neutron networks.       |
+|              |                                                              |
+|              |  3. The neutron networks are attached to a neutron router    |
+|              |     which is attached to an external network the towards     |
+|              |     DCGW.                                                    |
+|              |                                                              |
+|              |  4. The master node of SDN controller cluster is known.      |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 1        | Start ip connectivity monitors:                              |
+|              |  1. Check the L2 connectivity between the VMs in the same    |
+|              |     neutron network.                                         |
+|              |                                                              |
+|              |  2. Check the external connectivity of the VMs.              |
+|              |                                                              |
+|              | Each monitor runs in an independent process.                 |
+|              |                                                              |
+|              | Result: The monitor info will be collected.                  |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 2        | Start attacker:                                              |
+|              | SSH to the VIM node and kill the SDN controller process      |
+|              | determined in step 2.                                        |
+|              |                                                              |
+|              | Result: One SDN controller service will be shut down         |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 3        | Restart the SDN controller.                                  |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 4        | Create a new VM in the existing Neutron network while the    |
+|              | SDN controller is offline or still recovering.               |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 5        | Stop IP connectivity monitors after a period of time         |
+|              | specified by “waiting_time”                                  |
+|              |                                                              |
+|              | Result: The monitor info will be aggregated                  |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 6        | Verify the IP connectivity monitor result                    |
+|              |                                                              |
+|              | Result: IP connectivity monitor should not have any packet   |
+|              | drop failures reported                                       |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 7        | Verify process_recover_time, which indicates the maximun     |
+|              | time (seconds) from the process being killed to recovered,   |
+|              | is within the SLA. This step blocks until either the         |
+|              | process has recovered or a timeout occurred.                 |
+|              |                                                              |
+|              | Result: process_recover_time is within SLA limits, if not,   |
+|              | test case failed and stopped.                                |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 8        | Start IP connectivity monitors for the  new VM:              |
+|              |  1. Check the L2 connectivity from the existing VMs to the   |
+|              |     new VM in the Neutron network.                           |
+|              |                                                              |
+|              |  2. Check connectivity from one VM to an external host on    |
+|              |     the Internet to verify SNAT functionality.               |
+|              |                                                              |
+|              | Result: The monitor info will be collected.                  |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 9        | Stop IP connectivity monitors after a period of time         |
+|              | specified by “waiting_time”                                  |
+|              |                                                              |
+|              | Result: The monitor info will be aggregated                  |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 10       | Verify the IP connectivity monitor result                    |
+|              |                                                              |
+|              | Result: IP connectivity monitor should not have any packet   |
+|              | drop failures reported                                       |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test verdict  | Fails only if SLA is not passed, or if there is a test case  |
+|              | execution problem.                                           |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+
index d1d934f..311c759 100644 (file)
@@ -22,7 +22,7 @@
     "angular-sanitize": "^1.6.5"
   },
    "resolutions": {
-    "angular": "~1.6.x"
+    "angular": "~1.7.x"
   },
   "devDependencies": {
     "angular-mocks": "^1.4.0"
index 8640afb..f6e4ab7 100644 (file)
@@ -145,6 +145,21 @@ BASE_URL = 'http://localhost:5000'
 ENV_ACTION_API = BASE_URL + '/yardstick/env/action'
 ASYNC_TASK_API = BASE_URL + '/yardstick/asynctask'
 
+API_ERRORS = {
+    'UploadOpenrcError': {
+        'message': "Upload openrc ERROR!",
+        'status': API_ERROR,
+    },
+    'UpdateOpenrcError': {
+        'message': "Update openrc ERROR!",
+        'status': API_ERROR,
+    },
+    'ApiServerError': {
+        'message': "An unkown exception happened to Api Server!",
+        'status': API_ERROR,
+    },
+}
+
 # flags
 IS_EXISTING = 'is_existing'
 IS_PUBLIC = 'is_public'
index c7ba562..8a0c52d 100644 (file)
@@ -247,3 +247,15 @@ class ScenarioDeleteVolumeError(YardstickException):
 
 class ScenarioDetachVolumeError(YardstickException):
     message = 'Cinder Detach Volume Scenario failed'
+
+
+class ApiServerError(YardstickException):
+    message = 'An unkown exception happened to Api Server!'
+
+
+class UploadOpenrcError(ApiServerError):
+    message = 'Upload openrc ERROR!'
+
+
+class UpdateOpenrcError(ApiServerError):
+    message = 'Update openrc ERROR!'