Code Review
/
yardstick.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
review
|
tree
raw
|
inline
| side by side
Merge "Adding Grafana dashboard for visualizing the vEPC default bearer metrics."
[yardstick.git]
/
yardstick
/
benchmark
/
runners
/
sequence.py
diff --git
a/yardstick/benchmark/runners/sequence.py
b/yardstick/benchmark/runners/sequence.py
index
f08ca5d
..
0148a45
100644
(file)
--- a/
yardstick/benchmark/runners/sequence.py
+++ b/
yardstick/benchmark/runners/sequence.py
@@
-21,13
+21,16
@@
The input value in the sequence is specified in a list in the input file.
"""
from __future__ import absolute_import
"""
from __future__ import absolute_import
-import os
-import multiprocessing
+
import logging
import logging
-import
traceback
+import
multiprocessing
import time
import time
+import traceback
+
+import os
from yardstick.benchmark.runners import base
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
LOG = logging.getLogger(__name__)
@@
-72,14
+75,14
@@
def _worker_process(queue, cls, method_name, scenario_cfg,
try:
result = method(data)
try:
result = method(data)
- except
AssertionError as assertion
:
+ except
y_exc.SLAValidationError as error
:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
raise
elif sla_action == "monitor":
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s",
assertion
.args)
- errors =
assertion
.args
- except Exception as e:
+ LOG.warning("SLA validation failed: %s",
error
.args)
+ errors =
error
.args
+ except Exception as e:
# pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception(e)
else:
errors = traceback.format_exc()
LOG.exception(e)
else:
@@
-140,7
+143,9
@@
class SequenceRunner(base.Runner):
__execution_type__ = 'Sequence'
def _run_benchmark(self, cls, method, scenario_cfg, context_cfg):
__execution_type__ = 'Sequence'
def _run_benchmark(self, cls, method, scenario_cfg, context_cfg):
+ name = "{}-{}-{}".format(self.__execution_type__, scenario_cfg.get("type"), os.getpid())
self.process = multiprocessing.Process(
self.process = multiprocessing.Process(
+ name=name,
target=_worker_process,
args=(self.result_queue, cls, method, scenario_cfg,
context_cfg, self.aborted, self.output_queue))
target=_worker_process,
args=(self.result_queue, cls, method, scenario_cfg,
context_cfg, self.aborted, self.output_queue))