"""
from __future__ import absolute_import
-import os
-import multiprocessing
+
import logging
-import traceback
+import multiprocessing
import time
+import traceback
+
+import os
from yardstick.benchmark.runners import base
LOG = logging.getLogger(__name__)
+QUEUE_PUT_TIMEOUT = 10
+
+
def _worker_process(queue, cls, method_name, scenario_cfg,
context_cfg, aborted, output_queue):
data = {}
errors = ""
+ benchmark.pre_run_wait_time(interval)
+
try:
result = method(data)
except AssertionError as assertion:
scenario_cfg['options']['rate'] -= delta
sequence = 1
continue
- except Exception as e:
+ except Exception: # pylint: disable=broad-except
errors = traceback.format_exc()
- LOG.exception(e)
+ LOG.exception("")
else:
if result:
- LOG.debug("output_queue.put %s", result)
- output_queue.put(result, True, 1)
+ # add timeout for put so we don't block test
+ # if we do timeout we don't care about dropping individual KPIs
+ output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
- time.sleep(interval)
+ benchmark.post_run_wait_time(interval)
benchmark_output = {
'timestamp': time.time(),
'errors': errors
}
- LOG.debug("queue.put, %s", benchmark_output)
- queue.put(benchmark_output, True, 1)
+ queue.put(benchmark_output, True, QUEUE_PUT_TIMEOUT)
LOG.debug("runner=%(runner)s seq=%(sequence)s END",
{"runner": runner_cfg["runner_id"],
__execution_type__ = 'Iteration'
def _run_benchmark(self, cls, method, scenario_cfg, context_cfg):
+ name = "{}-{}-{}".format(self.__execution_type__, scenario_cfg.get("type"), os.getpid())
self.process = multiprocessing.Process(
+ name=name,
target=_worker_process,
args=(self.result_queue, cls, method, scenario_cfg,
context_cfg, self.aborted, self.output_queue))