X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=yardstick%2Fbenchmark%2Frunners%2Fduration.py;h=60f1fa5368e4e7402620b632e63850aae8d372d9;hb=01711acbd1a8b9f5bcf20073f33ad6cc83ede970;hp=6a09131e1a13985928fdb0c68884223f4ff6ab56;hpb=def87da57962d932086840fc20950dc90de5c567;p=yardstick.git diff --git a/yardstick/benchmark/runners/duration.py b/yardstick/benchmark/runners/duration.py index 6a09131e1..60f1fa536 100644 --- a/yardstick/benchmark/runners/duration.py +++ b/yardstick/benchmark/runners/duration.py @@ -27,20 +27,19 @@ import traceback import time from yardstick.benchmark.runners import base +from yardstick.common import exceptions as y_exc LOG = logging.getLogger(__name__) +QUEUE_PUT_TIMEOUT = 10 + + def _worker_process(queue, cls, method_name, scenario_cfg, context_cfg, aborted, output_queue): sequence = 1 - # if we don't do this we can hang waiting for the queue to drain - # have to do this in the subprocess - queue.cancel_join_thread() - output_queue.cancel_join_thread() - runner_cfg = scenario_cfg['runner'] interval = runner_cfg.get("interval", 1) @@ -68,25 +67,29 @@ def _worker_process(queue, cls, method_name, scenario_cfg, data = {} errors = "" + benchmark.pre_run_wait_time(interval) + try: result = method(data) - except AssertionError as assertion: + except y_exc.SLAValidationError as error: # SLA validation failed in scenario, determine what to do now if sla_action == "assert": raise elif sla_action == "monitor": - LOG.warning("SLA validation failed: %s", assertion.args) - errors = assertion.args + LOG.warning("SLA validation failed: %s", error.args) + errors = error.args # catch all exceptions because with multiprocessing we can have un-picklable exception # problems https://bugs.python.org/issue9400 - except Exception: + except Exception: # pylint: disable=broad-except errors = traceback.format_exc() LOG.exception("") else: if result: - output_queue.put(result) + # add timeout for put so we don't block test + # if we do timeout we don't care about dropping individual KPIs + output_queue.put(result, True, QUEUE_PUT_TIMEOUT) - time.sleep(interval) + benchmark.post_run_wait_time(interval) benchmark_output = { 'timestamp': time.time(), @@ -95,7 +98,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg, 'errors': errors } - queue.put(benchmark_output) + queue.put(benchmark_output, True, QUEUE_PUT_TIMEOUT) LOG.debug("runner=%(runner)s seq=%(sequence)s END", {"runner": runner_cfg["runner_id"], "sequence": sequence}) @@ -116,6 +119,9 @@ def _worker_process(queue, cls, method_name, scenario_cfg, LOG.exception("") raise SystemExit(1) + LOG.debug("queue.qsize() = %s", queue.qsize()) + LOG.debug("output_queue.qsize() = %s", output_queue.qsize()) + class DurationRunner(base.Runner): """Run a scenario for a certain amount of time @@ -135,7 +141,9 @@ If the scenario ends before the time has elapsed, it will be started again. __execution_type__ = 'Duration' def _run_benchmark(self, cls, method, scenario_cfg, context_cfg): + name = "{}-{}-{}".format(self.__execution_type__, scenario_cfg.get("type"), os.getpid()) self.process = multiprocessing.Process( + name=name, target=_worker_process, args=(self.result_queue, cls, method, scenario_cfg, context_cfg, self.aborted, self.output_queue))