Code Review
/
yardstick.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
review
|
tree
raw
|
inline
| side by side
Merge "Separate out test_parse_to_value_exception()"
[yardstick.git]
/
yardstick
/
benchmark
/
runners
/
duration.py
diff --git
a/yardstick/benchmark/runners/duration.py
b/yardstick/benchmark/runners/duration.py
index
fbf72a7
..
14fd8bb
100644
(file)
--- a/
yardstick/benchmark/runners/duration.py
+++ b/
yardstick/benchmark/runners/duration.py
@@
-27,6
+27,7
@@
import traceback
import time
from yardstick.benchmark.runners import base
import time
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
LOG = logging.getLogger(__name__)
@@
-66,18
+67,21
@@
def _worker_process(queue, cls, method_name, scenario_cfg,
data = {}
errors = ""
data = {}
errors = ""
+ benchmark.pre_run_wait_time(interval)
+
try:
result = method(data)
try:
result = method(data)
- except
AssertionError as assertion
:
+ except
y_exc.SLAValidationError as error
:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
+ benchmark.teardown()
raise
elif sla_action == "monitor":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s",
assertion
.args)
- errors =
assertion
.args
+ LOG.warning("SLA validation failed: %s",
error
.args)
+ errors =
error
.args
# catch all exceptions because with multiprocessing we can have un-picklable exception
# problems https://bugs.python.org/issue9400
# catch all exceptions because with multiprocessing we can have un-picklable exception
# problems https://bugs.python.org/issue9400
- except Exception:
+ except Exception:
# pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception("")
else:
errors = traceback.format_exc()
LOG.exception("")
else:
@@
-86,7
+90,7
@@
def _worker_process(queue, cls, method_name, scenario_cfg,
# if we do timeout we don't care about dropping individual KPIs
output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
# if we do timeout we don't care about dropping individual KPIs
output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
-
time.sleep
(interval)
+
benchmark.post_run_wait_time
(interval)
benchmark_output = {
'timestamp': time.time(),
benchmark_output = {
'timestamp': time.time(),