Merge "Cleanup vPE VNF unit tests"
authorRodolfo Alonso Hernandez <rodolfo.alonso.hernandez@intel.com>
Tue, 3 Jul 2018 16:24:36 +0000 (16:24 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Tue, 3 Jul 2018 16:24:36 +0000 (16:24 +0000)
15 files changed:
ansible/install.yaml
ansible/roles/configure_uwsgi/templates/yardstick.ini.j2
docker/Dockerfile
docs/release/release-notes/release-notes.rst
docs/testing/developer/devguide/devguide_nsb_prox.rst
docs/testing/developer/devguide/images/PROX_Grafana_1.png [new file with mode: 0644]
docs/testing/developer/devguide/images/PROX_Grafana_2.png [new file with mode: 0644]
docs/testing/developer/devguide/images/PROX_Grafana_3.png [new file with mode: 0644]
docs/testing/developer/devguide/images/PROX_Grafana_4.png [new file with mode: 0644]
docs/testing/developer/devguide/images/PROX_Test_BM_Script.png
docs/testing/developer/devguide/images/PROX_Test_HEAT_Script.png [deleted file]
docs/testing/developer/devguide/images/PROX_Test_HEAT_Script1.png [new file with mode: 0644]
docs/testing/developer/devguide/images/PROX_Test_HEAT_Script2.png [new file with mode: 0644]
docs/testing/developer/devguide/index.rst
yardstick/tests/unit/benchmark/runner/test_proxduration.py

index d174579..e93232d 100644 (file)
@@ -35,6 +35,7 @@
     - install_yardstick
     - configure_uwsgi
     - configure_nginx
+    - configure_gui
     - download_trex
     - install_trex
     - configure_rabbitmq
index c049daf..044f42a 100644 (file)
@@ -1,7 +1,7 @@
 [uwsgi]
 master = true
 debug = true
-chdir = {{ yardstick_dir }}api
+chdir = {{ yardstick_dir }}/api
 module = server
 plugins = python
 processes = 10
@@ -15,4 +15,4 @@ close-on-exec = 1
 daemonize = {{ log_dir }}uwsgi.log
 socket = {{ socket_file }}
 {# If virtual environment, we need to add:
-   virtualenv = <virtual_env> #}
\ No newline at end of file
+   virtualenv = <virtual_env> #}
index 7f85cbd..4aa7237 100644 (file)
@@ -40,7 +40,7 @@ RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/yardstick ${Y
 RUN git clone --depth 1 https://gerrit.opnfv.org/gerrit/releng ${RELENG_REPO_DIR}
 RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/storperf ${STORPERF_REPO_DIR}
 
-RUN ansible-playbook -c local -vvv -e INSTALLATION_MODE="container" ${YARDSTICK_REPO_DIR}/ansible/install.yaml
+RUN ansible-playbook -i ${YARDSTICK_REPO_DIR}/ansible/install-inventory.ini -c local -vvv -e INSTALLATION_MODE="container" ${YARDSTICK_REPO_DIR}/ansible/install.yaml
 
 RUN ${YARDSTICK_REPO_DIR}/docker/supervisor.sh
 
index 7ea2616..daa4b81 100644 (file)
@@ -36,6 +36,12 @@ Version History
 | *Date*            | *Version* | *Comment*                       |
 |                   |           |                                 |
 +-------------------+-----------+---------------------------------+
+| Jul 2, 2018       | 6.2.1     | Yardstick for Fraser release    |
+|                   |           |                                 |
++-------------------+-----------+---------------------------------+
+| Jun 29, 2018      | 6.2.0     | Yardstick for Fraser release    |
+|                   |           |                                 |
++-------------------+-----------+---------------------------------+
 | May 25, 2018      | 6.1.0     | Yardstick for Fraser release    |
 |                   |           |                                 |
 +-------------------+-----------+---------------------------------+
@@ -120,19 +126,19 @@ Release Data
 | **Project**                    | Yardstick             |
 |                                |                       |
 +--------------------------------+-----------------------+
-| **Repo/tag**                   | yardstick/opnfv-6.1.0 |
+| **Repo/tag**                   | yardstick/opnfv-6.2.0 |
 |                                |                       |
 +--------------------------------+-----------------------+
-| **Yardstick Docker image tag** | opnfv-6.1.0           |
+| **Yardstick Docker image tag** | opnfv-6.2.0           |
 |                                |                       |
 +--------------------------------+-----------------------+
 | **Release designation**        | Fraser                |
 |                                |                       |
 +--------------------------------+-----------------------+
-| **Release date**               | May 25, 2018          |
+| **Release date**               | Jun 29, 2018          |
 |                                |                       |
 +--------------------------------+-----------------------+
-| **Purpose of the delivery**    | OPNFV Fraser 6.1.0    |
+| **Purpose of the delivery**    | OPNFV Fraser 6.2.0    |
 |                                |                       |
 +--------------------------------+-----------------------+
 
@@ -151,7 +157,7 @@ Documents
 Software Deliverables
 ---------------------
 
- - The Yardstick Docker image: https://hub.docker.com/r/opnfv/yardstick (tag: opnfv-6.1.0)
+ - The Yardstick Docker image: https://hub.docker.com/r/opnfv/yardstick (tag: opnfv-6.2.0)
 
 List of Contexts
 ^^^^^^^^^^^^^^^^
@@ -391,6 +397,110 @@ Known Issues/Faults
 Corrected Faults
 ----------------
 
+Fraser 6.2.1:
+
++--------------------+--------------------------------------------------------------------------+
+| **JIRA REFERENCE** |                             **DESCRIPTION**                              |
++====================+==========================================================================+
+|   YARDSTICK-1147   | Fix ansible scripts for running in container                             |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1157   | Bug Fix: correct the file path to build docker file                      |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1276   | Bugfix: docker build failed                                              |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1280   | Bugfix: uwsgi config file yardstick.ini output error                     |
++--------------------+--------------------------------------------------------------------------+
+
+Fraser 6.2.0:
+
++--------------------+--------------------------------------------------------------------------+
+| **JIRA REFERENCE** |                             **DESCRIPTION**                              |
++====================+==========================================================================+
+|   YARDSTICK-1246   | Update pmd/lcore mask for OVS-DPDK context                               |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-837    | Move tests: unit/network_services/{lib/,collector/,*.py}                 |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1144   | Correctly set PYTHONPATH in Dockerfile                                   |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1205   | Set "cmd2" library to version 0.8.6                                      |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1204   | Bump oslo.messaging version to 5.36.0                                    |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1210   | Remove __init__ method overriding in HeatContextTestCase                 |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1189   | Error when adding SR-IOV interfaces in SR-IOV context                    |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1214   | Remove AnsibleCommon class method mock                                   |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1159   | Add --hwlb options as a command line argument for SampleVNF              |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1203   | Add scale out TCs with availability zone support                         |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1167   | Do not start collectd twice when SampleVNF is running on Baremetal       |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1188   | Add "host_name_separator" variable to Context class                      |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1112   | MQ startup process refactor                                              |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1229   | Cleanup BaseMonitor unit tests                                           |
++--------------------+--------------------------------------------------------------------------+
+|         -          | Configure ACL via static file                                            |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1191   | Use TRex release v2.41 to support both x86 and aarch64                   |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1106   | Add IxNetwork API Python Binding package                                 |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1224   | Cleanup TestYardstickNSCli class                                         |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1225   | Remove print out of logger exception in TestUtils                        |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1194   | Add "duration" parameter to test case definition                         |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1209   | Remove instantiated contexts in "test_task"                              |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1192   | Standalone XML machine type is not longer valid                          |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1197   | Refactor RFC2455 TRex traffic profile injection                          |
++--------------------+--------------------------------------------------------------------------+
+|         -          | Fix "os.path" mock problems during tests                                 |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1218   | Refactor "utils.parse_ini_file" testing                                  |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1179   | Start nginx and uwsgi servicies only in not container mode               |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1177   | Install dependencies: bare-metal, standalone                             |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1126   | Migrate install.sh script to ansible                                     |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1146   | Fix nsb_setup.sh script                                                  |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1247   | NSB setup inventory name changed                                         |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1116   | Changed IxNextgen library load in IXIA RFC2544 traffic generator call.   |
++--------------------+--------------------------------------------------------------------------+
+|         -          | Corrected scale-up command line arguments                                |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-878    | OpenStack client replacement                                             |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1222   | Bugfix: HA kill process recovery has a conflict                          |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1139   | Add "os_cloud_config" as a new context flag parameter                    |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1255   | Extended Context class with get_physical_nodes functionality             |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1244   | NSB NFVi BNG test fails to run - stops after one step                    |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1219   | Decrease Sampling interval                                               |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1101   | NSB NFVi PROX BNG losing many packets                                    |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1217   | Fix NSB NfVi support for 25 and 40Gbps                                   |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1185   | NSB Topology fix for Prox 4 port test case                               |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-966    | Convert SLA asserts to raises                                            |
++--------------------+--------------------------------------------------------------------------+
+
 Fraser 6.1.0:
 
 +--------------------+--------------------------------------------------------------------------+
index 2262841..7999005 100755 (executable)
@@ -244,10 +244,13 @@ Now let's examine the components of the file in detail
 3. ``nodes`` - This names the Traffic Generator and the System
    under Test. Does not need to change.
 
-4. ``prox_path`` - Location of the Prox executable on the traffic
+4. ``interface_speed_gbps`` - This is an optional parameter. If not present
+   the system defaults to 10Gbps. This defines the speed of the interfaces.
+
+5. ``prox_path`` - Location of the Prox executable on the traffic
    generator (Either baremetal or Openstack Virtual Machine)
 
-5. ``prox_config`` - This is the ``SUT Config File``.
+6. ``prox_config`` - This is the ``SUT Config File``.
    In this case it is ``handle_l2fwd-2.cfg``
 
    A number of additional parameters can be added. This example
@@ -285,16 +288,31 @@ Now let's examine the components of the file in detail
    of a file called ``parameters.lua``, which contains information
    retrieved from either the hardware or the openstack configuration.
 
-6. ``prox_args`` - this specifies the command line arguments to start
+7. ``prox_args`` - this specifies the command line arguments to start
    prox. See `prox command line`_.
 
-7. ``prox_config`` - This specifies the Traffic Generator config file.
+8. ``prox_config`` - This specifies the Traffic Generator config file.
+
+9. ``runner`` - This is set to ``ProxDuration`` - This specifies that the
+   test runs for a set duration. Other runner types are available
+   but it is recommend to use ``ProxDuration``
+
+   The following parrameters are supported
+
+   ``interval`` - (optional) - This specifies the sampling interval.
+   Default is 1 sec
+
+   ``sampled`` - (optional) - This specifies if sampling information is
+   required. Default ``no``
+
+   ``duration`` - This is the length of the test in seconds. Default
+   is 60 seconds.
 
-8. ``runner`` - This is set to ``Duration`` - This specified that the
-   test run for a set duration. Other runner types are available
-   but it is recommend to use ``Duration``
+   ``confirmation`` - This specifies the number of confirmation retests to
+   be made before deciding to increase or decrease line speed. Default 0.
+
+10. ``context`` - This is ``context`` for a 2 port Baremetal configuration.
 
-9. ``context`` - This is ``context`` for a 2 port Baremetal configuration.
    If a 4 port configuration was required then file
    ``prox-baremetal-4.yaml`` would be used. This is the NSB Prox
    baremetal configuration file.
@@ -304,7 +322,8 @@ Now let's examine the components of the file in detail
 *Traffic Profile file*
 ----------------------
 
-This describes the details of the traffic flow. In this case ``prox_binsearch.yaml`` is used.
+This describes the details of the traffic flow. In this case
+``prox_binsearch.yaml`` is used.
 
 .. image:: images/PROX_Traffic_profile.png
    :width: 800px
@@ -326,21 +345,29 @@ This describes the details of the traffic flow. In this case ``prox_binsearch.ya
 
    Custom traffic types can be created by creating a new traffic profile class.
 
-3. ``tolerated_loss`` - This specifies the percentage of packets that can be lost/dropped before
-   we declare success or failure. Success is Transmitted-Packets from Traffic Generator is greater than or equal to
+3. ``tolerated_loss`` - This specifies the percentage of packets that
+   can be lost/dropped before
+   we declare success or failure. Success is Transmitted-Packets from
+   Traffic Generator is greater than or equal to
    packets received by Traffic Generator plus tolerated loss.
 
-4. ``test_precision`` - This specifies the precision of the test results. For some tests the success criteria
-   may never be achieved because the test precision may be greater than the successful throughput. For finer
-   results increase the precision by making this value smaller.
+4. ``test_precision`` - This specifies the precision of the test
+   results. For some tests the success criteria may never be
+   achieved because the test precision may be greater than the
+   successful throughput. For finer results increase the precision
+   by making this value smaller.
 
-5. ``packet_sizes`` - This specifies the range of packets size this test is run for.
+5. ``packet_sizes`` - This specifies the range of packets size this
+   test is run for.
 
-6. ``duration`` - This specifies the sample duration that the test uses to check for success or failure.
+6. ``duration`` - This specifies the sample duration that the test
+   uses to check for success or failure.
 
-7. ``lower_bound`` - This specifies the test initial lower bound sample rate. On success this value is increased.
+7. ``lower_bound`` - This specifies the test initial lower bound sample rate.
+   On success this value is increased.
 
-8. ``upper_bound`` - This specifies the test initial upper bound sample rate. On success this value is decreased.
+8. ``upper_bound`` - This specifies the test initial upper bound sample rate.
+   On success this value is decreased.
 
 Other traffic profiles exist eg prox_ACL.yaml which does not
 compare what is received with what is transmitted. It just
@@ -371,14 +398,18 @@ See this prox_vpe.yaml as example::
 We will use ``tc_prox_heat_context_l2fwd-2.yaml`` as a example to show
 you how to understand the test description file.
 
-.. image:: images/PROX_Test_HEAT_Script.png
+.. image:: images/PROX_Test_HEAT_Script1.png
    :width: 800px
-   :alt: NSB PROX Test Description File
+   :alt: NSB PROX Test Description File - Part 1
+
+.. image:: images/PROX_Test_HEAT_Script2.png
+   :width: 800px
+   :alt: NSB PROX Test Description File - Part 2
 
 Now lets examine the components of the file in detail
 
-Sections 1 to 8 are exactly the same in Baremetal and in Heat. Section
-``9`` is replaced with sections A to F. Section 9 was for a baremetal
+Sections 1 to 9 are exactly the same in Baremetal and in Heat. Section
+``10`` is replaced with sections A to F. Section 10 was for a baremetal
 configuration file. This has no place in a heat configuration.
 
 A. ``image`` - yardstick-samplevnfs. This is the name of the image
@@ -418,12 +449,12 @@ F. ``networks`` - is composed of a management network labeled ``mgmt``
         gateway_ip: 'null'
         port_security_enabled: False
         enable_dhcp: 'false'
-      downlink_1:
+      uplink_1:
         cidr: '10.0.4.0/24'
         gateway_ip: 'null'
         port_security_enabled: False
         enable_dhcp: 'false'
-      downlink_2:
+      downlink_1:
         cidr: '10.0.5.0/24'
         gateway_ip: 'null'
         port_security_enabled: False
@@ -1033,7 +1064,7 @@ If PROX NSB does not work on baremetal, problem is either in network configurati
      1. What is received on 0 is transmitted on 1, received on 1 transmitted on 0,
         received on 2 transmitted on 3 and received on 3 transmitted on 2.
      2. No packets are Failed.
-     3. No Packets are discarded.
+     3. No packets are discarded.
 
   We can also dump the packets being received or transmitted via the following commands. ::
 
@@ -1228,7 +1259,69 @@ Where
     4) ir.intel.com = local no proxy
 
 
+*How to Understand the Grafana output?*
+---------------------------------------
+
+         .. image:: images/PROX_Grafana_1.png
+            :width: 1000px
+            :alt: NSB PROX Grafana_1
+
+         .. image:: images/PROX_Grafana_2.png
+            :width: 1000px
+            :alt: NSB PROX Grafana_2
+
+         .. image:: images/PROX_Grafana_3.png
+            :width: 1000px
+            :alt: NSB PROX Grafana_3
+
+         .. image:: images/PROX_Grafana_4.png
+            :width: 1000px
+            :alt: NSB PROX Grafana_4
+
+A. Test Parameters - Test interval, Duartion, Tolerated Loss and Test Precision
+
+B. Overall No of packets send and received during test
+
+C. Generator Stats - packets sent, received and attempted by Generator
+
+D. Packets Size
+
+E. No of packets received by SUT
+
+F. No of packets forwarded by SUT
+
+G. This is the number of packets sent by the generator per port, for each interval.
+
+H. This is the number of packets received by the generator per port, for each interval.
+
+I. This is the number of packets send and received by the generator and lost by the SUT
+   that meet the success criteria
+
+J. This is the changes the Percentage of Line Rate used over a test, The MAX and the
+   MIN should converge to within the interval specified as the ``test-precision``.
+
+K. This is the packets Size supported during test. If "N/A" appears in any field the result has not been decided.
+
+L. This is the calculated throughput in MPPS(Million Packets Per second) for this line rate.
+
+M. This is the actual No, of packets sent by the generator in MPPS
+
+N. This is the actual No. of packets received by the generator in MPPS
+
+O. This is the total No. of packets sent by SUT.
+
+P. This is the total No. of packets received by the SUT
+
+Q. This is the total No. of packets dropped. (These packets were sent by the generator but not
+   received back by the generator, these may be dropped by the SUT or the Generator)
+
+R. This is the tolerated no of packets that can be dropped.
+
+S. This is the test Throughput in Gbps
+
+T. This is the Latencey per Port
 
+U. This is the CPU Utilization
 
 
 
diff --git a/docs/testing/developer/devguide/images/PROX_Grafana_1.png b/docs/testing/developer/devguide/images/PROX_Grafana_1.png
new file mode 100644 (file)
index 0000000..d272edc
Binary files /dev/null and b/docs/testing/developer/devguide/images/PROX_Grafana_1.png differ
diff --git a/docs/testing/developer/devguide/images/PROX_Grafana_2.png b/docs/testing/developer/devguide/images/PROX_Grafana_2.png
new file mode 100644 (file)
index 0000000..4f7fd4c
Binary files /dev/null and b/docs/testing/developer/devguide/images/PROX_Grafana_2.png differ
diff --git a/docs/testing/developer/devguide/images/PROX_Grafana_3.png b/docs/testing/developer/devguide/images/PROX_Grafana_3.png
new file mode 100644 (file)
index 0000000..5ae9676
Binary files /dev/null and b/docs/testing/developer/devguide/images/PROX_Grafana_3.png differ
diff --git a/docs/testing/developer/devguide/images/PROX_Grafana_4.png b/docs/testing/developer/devguide/images/PROX_Grafana_4.png
new file mode 100644 (file)
index 0000000..5353d1c
Binary files /dev/null and b/docs/testing/developer/devguide/images/PROX_Grafana_4.png differ
index 32530eb..c09f7bb 100644 (file)
Binary files a/docs/testing/developer/devguide/images/PROX_Test_BM_Script.png and b/docs/testing/developer/devguide/images/PROX_Test_BM_Script.png differ
diff --git a/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script.png b/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script.png
deleted file mode 100644 (file)
index 754973b..0000000
Binary files a/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script.png and /dev/null differ
diff --git a/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script1.png b/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script1.png
new file mode 100644 (file)
index 0000000..bd375db
Binary files /dev/null and b/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script1.png differ
diff --git a/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script2.png b/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script2.png
new file mode 100644 (file)
index 0000000..99d9d24
Binary files /dev/null and b/docs/testing/developer/devguide/images/PROX_Test_HEAT_Script2.png differ
index 92a18f6..9a76a32 100644 (file)
@@ -14,3 +14,4 @@ Yardstick Developer Guide
    :numbered:
 
    devguide
+   devguide_nsb_prox
index be1715a..3299c5b 100644 (file)
@@ -1,23 +1,29 @@
-##############################################################################
-# Copyright (c) 2018 Nokia and others.
+# Copyright (c) 2018 Intel Corporation
 #
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 
 import mock
 import unittest
 import multiprocessing
 import os
-import time
 
 from yardstick.benchmark.runners import proxduration
+from yardstick.common import constants
 from yardstick.common import exceptions as y_exc
 
 
 class ProxDurationRunnerTest(unittest.TestCase):
+
     class MyMethod(object):
         SLA_VALIDATION_ERROR_SIDE_EFFECT = 1
         BROAD_EXCEPTION_SIDE_EFFECT = 2
@@ -69,38 +75,37 @@ class ProxDurationRunnerTest(unittest.TestCase):
     @mock.patch.object(os, 'getpid')
     def test__worker_process_runner_id(self, mock_os_getpid):
         mock_os_getpid.return_value = 101
-        self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
-        proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
-                                 self.scenario_cfg, {},
-                                 multiprocessing.Event(), mock.Mock())
+        self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1}
+        proxduration._worker_process(
+            mock.Mock(), self.benchmark_cls, 'my_method', self.scenario_cfg,
+            {}, multiprocessing.Event(), mock.Mock())
 
-        self.assertEqual(self.scenario_cfg['runner']['runner_id'], 101)
+        self.assertEqual(101, self.scenario_cfg['runner']['runner_id'])
 
     def test__worker_process_called_with_cfg(self):
-        self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
-        proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
-                                 self.scenario_cfg, {},
-                                 multiprocessing.Event(), mock.Mock())
+        self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1}
+        proxduration._worker_process(
+            mock.Mock(), self.benchmark_cls, 'my_method', self.scenario_cfg,
+            {}, multiprocessing.Event(), mock.Mock())
 
         self._assert_defaults__worker_run_setup_and_teardown()
 
     def test__worker_process_called_with_cfg_loop(self):
-        self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.01}
-        proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
-                                 self.scenario_cfg, {},
-                                 multiprocessing.Event(), mock.Mock())
+        self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1}
+        proxduration._worker_process(
+            mock.Mock(), self.benchmark_cls, 'my_method', self.scenario_cfg,
+            {}, multiprocessing.Event(), mock.Mock())
 
         self._assert_defaults__worker_run_setup_and_teardown()
         self.assertGreater(self.benchmark.my_method.call_count, 2)
 
     def test__worker_process_called_without_cfg(self):
         scenario_cfg = {'runner': {}}
-
         aborted = multiprocessing.Event()
         aborted.set()
-
-        proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
-                                 scenario_cfg, {}, aborted, mock.Mock())
+        proxduration._worker_process(
+            mock.Mock(), self.benchmark_cls, 'my_method', scenario_cfg, {},
+            aborted, mock.Mock())
 
         self.benchmark_cls.assert_called_once_with(scenario_cfg, {})
         self.benchmark.setup.assert_called_once()
@@ -108,188 +113,174 @@ class ProxDurationRunnerTest(unittest.TestCase):
 
     def test__worker_process_output_queue(self):
         self.benchmark.my_method = mock.Mock(return_value='my_result')
-        self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
-        output_queue = multiprocessing.Queue()
-        proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
-                                 self.scenario_cfg, {},
-                                 multiprocessing.Event(), output_queue)
-        time.sleep(0.1)
+        self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1}
+        output_queue = mock.Mock()
+        proxduration._worker_process(
+            mock.Mock(), self.benchmark_cls, 'my_method', self.scenario_cfg,
+            {}, multiprocessing.Event(), output_queue)
 
         self._assert_defaults__worker_run_setup_and_teardown()
-        self.assertEquals(output_queue.get(), 'my_result')
+        output_queue.put.assert_has_calls(
+            [mock.call('my_result', True, constants.QUEUE_PUT_TIMEOUT)])
 
     def test__worker_process_output_queue_multiple_iterations(self):
-        self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
+        self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1}
         self.benchmark.my_method = self.MyMethod()
-
-        output_queue = multiprocessing.Queue()
-        proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
-                                 self.scenario_cfg, {},
-                                 multiprocessing.Event(), output_queue)
-        time.sleep(0.1)
+        output_queue = mock.Mock()
+        proxduration._worker_process(
+            mock.Mock(), self.benchmark_cls, 'my_method', self.scenario_cfg,
+            {}, multiprocessing.Event(), output_queue)
 
         self._assert_defaults__worker_run_setup_and_teardown()
-        self.assertGreater(self.benchmark.my_method.count, 103)
-
-        count = 101
-        while not output_queue.empty():
-            count += 1
-            self.assertEquals(output_queue.get(), count)
+        for idx in range(102, 101 + len(output_queue.method_calls)):
+            output_queue.put.assert_has_calls(
+                [mock.call(idx, True, constants.QUEUE_PUT_TIMEOUT)])
 
     def test__worker_process_queue(self):
         self.benchmark.my_method = self.MyMethod()
-        self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
-        queue = multiprocessing.Queue()
-        timestamp = time.time()
-        proxduration._worker_process(queue, self.benchmark_cls, 'my_method',
-                                 self.scenario_cfg, {},
-                                 multiprocessing.Event(), mock.Mock())
-        time.sleep(0.1)
+        self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1}
+        queue = mock.Mock()
+        proxduration._worker_process(
+            queue, self.benchmark_cls, 'my_method', self.scenario_cfg, {},
+            multiprocessing.Event(), mock.Mock())
 
         self._assert_defaults__worker_run_setup_and_teardown()
-
-        result = queue.get()
-        self.assertGreater(result['timestamp'], timestamp)
-        self.assertEqual(result['errors'], '')
-        self.assertEqual(result['data'], {'my_key': 102})
-        self.assertEqual(result['sequence'], 1)
+        benchmark_output = {'timestamp': mock.ANY,
+                            'sequence': 1,
+                            'data': {'my_key': 102},
+                            'errors': ''}
+        queue.put.assert_has_calls(
+            [mock.call(benchmark_output, True, constants.QUEUE_PUT_TIMEOUT)])
 
     def test__worker_process_queue_multiple_iterations(self):
-        self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
+        self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1}
         self.benchmark.my_method = self.MyMethod()
-
-        queue = multiprocessing.Queue()
-        timestamp = time.time()
-        proxduration._worker_process(queue, self.benchmark_cls, 'my_method',
-                                 self.scenario_cfg, {},
-                                 multiprocessing.Event(), mock.Mock())
-        time.sleep(0.1)
+        queue = mock.Mock()
+        proxduration._worker_process(
+            queue, self.benchmark_cls, 'my_method', self.scenario_cfg, {},
+            multiprocessing.Event(), mock.Mock())
 
         self._assert_defaults__worker_run_setup_and_teardown()
-        self.assertGreater(self.benchmark.my_method.count, 103)
-
-        count = 0
-        while not queue.empty():
-            count += 1
-            result = queue.get()
-            self.assertGreater(result['timestamp'], timestamp)
-            self.assertEqual(result['errors'], '')
-            self.assertEqual(result['data'], {'my_key': count + 101})
-            self.assertEqual(result['sequence'], count)
+        for idx in range(102, 101 + len(queue.method_calls)):
+            benchmark_output = {'timestamp': mock.ANY,
+                                'sequence': idx - 101,
+                                'data': {'my_key': idx},
+                                'errors': ''}
+            queue.put.assert_has_calls(
+                [mock.call(benchmark_output, True,
+                           constants.QUEUE_PUT_TIMEOUT)])
 
     def test__worker_process_except_sla_validation_error_no_sla_cfg(self):
         self.benchmark.my_method = mock.Mock(
             side_effect=y_exc.SLAValidationError)
-        self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
-        proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
-                                 self.scenario_cfg, {},
-                                 multiprocessing.Event(), mock.Mock())
+        self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1}
+        proxduration._worker_process(
+            mock.Mock(), self.benchmark_cls, 'my_method', self.scenario_cfg,
+            {}, multiprocessing.Event(), mock.Mock())
 
         self._assert_defaults__worker_run_setup_and_teardown()
 
-    def test__worker_process_except_sla_validation_error_sla_cfg_monitor(self):
+    @mock.patch.object(proxduration.LOG, 'warning')
+    def test__worker_process_except_sla_validation_error_sla_cfg_monitor(
+            self, *args):
         self.scenario_cfg['sla'] = {'action': 'monitor'}
-        self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
+        self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1}
         self.benchmark.my_method = mock.Mock(
             side_effect=y_exc.SLAValidationError)
-
-        proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
-                                 self.scenario_cfg, {},
-                                 multiprocessing.Event(), mock.Mock())
+        proxduration._worker_process(
+            mock.Mock(), self.benchmark_cls, 'my_method', self.scenario_cfg,
+            {}, multiprocessing.Event(), mock.Mock())
 
         self._assert_defaults__worker_run_setup_and_teardown()
 
     def test__worker_process_raise_sla_validation_error_sla_cfg_default(self):
         self.scenario_cfg['sla'] = {}
-        self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
+        self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1}
         self.benchmark.my_method = mock.Mock(
             side_effect=y_exc.SLAValidationError)
-
         with self.assertRaises(y_exc.SLAValidationError):
-            proxduration._worker_process(mock.Mock(), self.benchmark_cls,
-                                     'my_method', self.scenario_cfg, {},
-                                     multiprocessing.Event(), mock.Mock())
+            proxduration._worker_process(
+                mock.Mock(), self.benchmark_cls, 'my_method',
+                self.scenario_cfg, {}, multiprocessing.Event(), mock.Mock())
 
         self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
         self.benchmark.setup.assert_called_once()
         self.benchmark.my_method.assert_called_once_with({})
 
     def test__worker_process_raise_sla_validation_error_sla_cfg_assert(self):
-        self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
+        self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1}
         self.scenario_cfg['sla'] = {'action': 'assert'}
         self.benchmark.my_method = mock.Mock(
             side_effect=y_exc.SLAValidationError)
 
         with self.assertRaises(y_exc.SLAValidationError):
-            proxduration._worker_process(mock.Mock(), self.benchmark_cls,
-                                     'my_method', self.scenario_cfg, {},
-                                     multiprocessing.Event(), mock.Mock())
+            proxduration._worker_process(
+                mock.Mock(), self.benchmark_cls, 'my_method',
+                self.scenario_cfg, {}, multiprocessing.Event(), mock.Mock())
 
         self.benchmark_cls.assert_called_once_with(self.scenario_cfg, {})
         self.benchmark.setup.assert_called_once()
         self.benchmark.my_method.assert_called_once_with({})
 
-    def test__worker_process_queue_on_sla_validation_error_monitor(self):
+    @mock.patch.object(proxduration.LOG, 'warning')
+    def test__worker_process_queue_on_sla_validation_error_monitor(
+            self, *args):
         self.scenario_cfg['sla'] = {'action': 'monitor'}
-        self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
+        self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1}
         self.benchmark.my_method = self.MyMethod(
             side_effect=self.MyMethod.SLA_VALIDATION_ERROR_SIDE_EFFECT)
-
-        queue = multiprocessing.Queue()
-        timestamp = time.time()
-        proxduration._worker_process(queue, self.benchmark_cls, 'my_method',
-                                 self.scenario_cfg, {},
-                                 multiprocessing.Event(), mock.Mock())
-        time.sleep(0.1)
+        queue = mock.Mock()
+        proxduration._worker_process(
+            queue, self.benchmark_cls, 'my_method', self.scenario_cfg, {},
+            multiprocessing.Event(), mock.Mock())
 
         self._assert_defaults__worker_run_setup_and_teardown()
-
-        result = queue.get()
-        self.assertGreater(result['timestamp'], timestamp)
-        self.assertEqual(result['errors'], ('My Case SLA validation failed. '
-                                            'Error: my error message',))
-        self.assertEqual(result['data'], {'my_key': 102})
-        self.assertEqual(result['sequence'], 1)
-
-    def test__worker_process_broad_exception(self):
+        benchmark_output = {'timestamp': mock.ANY,
+                            'sequence': 1,
+                            'data': {'my_key': 102},
+                            'errors': ('My Case SLA validation failed. '
+                                       'Error: my error message', )}
+        queue.put.assert_has_calls(
+            [mock.call(benchmark_output, True, constants.QUEUE_PUT_TIMEOUT)])
+
+    @mock.patch.object(proxduration.LOG, 'exception')
+    def test__worker_process_broad_exception(self, *args):
         self.benchmark.my_method = mock.Mock(
             side_effect=y_exc.YardstickException)
-        self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
-        proxduration._worker_process(mock.Mock(), self.benchmark_cls, 'my_method',
-                                 self.scenario_cfg, {},
-                                 multiprocessing.Event(), mock.Mock())
+        self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1}
+        proxduration._worker_process(
+            mock.Mock(), self.benchmark_cls, 'my_method',
+            self.scenario_cfg, {}, multiprocessing.Event(), mock.Mock())
 
         self._assert_defaults__worker_run_setup_and_teardown()
 
-    def test__worker_process_queue_on_broad_exception(self):
+    @mock.patch.object(proxduration.LOG, 'exception')
+    def test__worker_process_queue_on_broad_exception(self, *args):
         self.benchmark.my_method = self.MyMethod(
             side_effect=self.MyMethod.BROAD_EXCEPTION_SIDE_EFFECT)
-
-        self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
-        queue = multiprocessing.Queue()
-        timestamp = time.time()
-        proxduration._worker_process(queue, self.benchmark_cls, 'my_method',
-                                 self.scenario_cfg, {},
-                                 multiprocessing.Event(), mock.Mock())
-        time.sleep(0.1)
-
-        self._assert_defaults__worker_run_setup_and_teardown()
-
-        result = queue.get()
-        self.assertGreater(result['timestamp'], timestamp)
-        self.assertNotEqual(result['errors'], '')
-        self.assertEqual(result['data'], {'my_key': 102})
-        self.assertEqual(result['sequence'], 1)
-
-    def test__worker_process_benchmark_teardown_on_broad_exception(self):
+        self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1}
+        queue = mock.Mock()
+        proxduration._worker_process(
+            queue, self.benchmark_cls, 'my_method', self.scenario_cfg, {},
+            multiprocessing.Event(), mock.Mock())
+
+        benchmark_output = {'timestamp': mock.ANY,
+                            'sequence': 1,
+                            'data': {'my_key': 102},
+                            'errors': mock.ANY}
+        queue.put.assert_has_calls(
+            [mock.call(benchmark_output, True, constants.QUEUE_PUT_TIMEOUT)])
+
+    @mock.patch.object(proxduration.LOG, 'exception')
+    def test__worker_process_benchmark_teardown_on_broad_exception(
+            self, *args):
         self.benchmark.teardown = mock.Mock(
             side_effect=y_exc.YardstickException)
-
-        self.scenario_cfg["runner"] = {"sampled": True, "duration": 1}
+        self.scenario_cfg["runner"] = {"sampled": True, "duration": 0.1}
 
         with self.assertRaises(SystemExit) as raised:
-            proxduration._worker_process(mock.Mock(), self.benchmark_cls,
-                                     'my_method', self.scenario_cfg, {},
-                                     multiprocessing.Event(), mock.Mock())
-        self.assertEqual(raised.exception.code, 1)
+            proxduration._worker_process(
+                mock.Mock(), self.benchmark_cls, 'my_method',
+                self.scenario_cfg, {}, multiprocessing.Event(), mock.Mock())
+        self.assertEqual(1, raised.exception.code)
         self._assert_defaults__worker_run_setup_and_teardown()