Code Review
/
yardstick.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
review
|
tree
raw
|
inline
| side by side
Merge changes from topics 'feat/keep_vnf', 'YARDSTICK-886'
[yardstick.git]
/
yardstick
/
benchmark
/
runners
/
arithmetic.py
diff --git
a/yardstick/benchmark/runners/arithmetic.py
b/yardstick/benchmark/runners/arithmetic.py
index
974fb21
..
6aaaed8
100755
(executable)
--- a/
yardstick/benchmark/runners/arithmetic.py
+++ b/
yardstick/benchmark/runners/arithmetic.py
@@
-46,11
+46,6
@@
def _worker_process(queue, cls, method_name, scenario_cfg,
sequence = 1
sequence = 1
- # if we don't do this we can hang waiting for the queue to drain
- # have to do this in the subprocess
- queue.cancel_join_thread()
- output_queue.cancel_join_thread()
-
runner_cfg = scenario_cfg['runner']
interval = runner_cfg.get("interval", 1)
runner_cfg = scenario_cfg['runner']
interval = runner_cfg.get("interval", 1)
@@
-143,8
+138,18
@@
def _worker_process(queue, cls, method_name, scenario_cfg,
if errors and sla_action is None:
break
if errors and sla_action is None:
break
- benchmark.teardown()
+ try:
+ benchmark.teardown()
+ except Exception:
+ # catch any exception in teardown and convert to simple exception
+ # never pass exceptions back to multiprocessing, because some exceptions can
+ # be unpicklable
+ # https://bugs.python.org/issue9400
+ LOG.exception("")
+ raise SystemExit(1)
LOG.info("worker END")
LOG.info("worker END")
+ LOG.debug("queue.qsize() = %s", queue.qsize())
+ LOG.debug("output_queue.qsize() = %s", output_queue.qsize())
class ArithmeticRunner(base.Runner):
class ArithmeticRunner(base.Runner):
@@
-186,7
+191,9
@@
class ArithmeticRunner(base.Runner):
__execution_type__ = 'Arithmetic'
def _run_benchmark(self, cls, method, scenario_cfg, context_cfg):
__execution_type__ = 'Arithmetic'
def _run_benchmark(self, cls, method, scenario_cfg, context_cfg):
+ name = "{}-{}-{}".format(self.__execution_type__, scenario_cfg.get("type"), os.getpid())
self.process = multiprocessing.Process(
self.process = multiprocessing.Process(
+ name=name,
target=_worker_process,
args=(self.result_queue, cls, method, scenario_cfg,
context_cfg, self.aborted, self.output_queue))
target=_worker_process,
args=(self.result_queue, cls, method, scenario_cfg,
context_cfg, self.aborted, self.output_queue))