runners: add timeout to queue put
[yardstick.git] / yardstick / benchmark / runners / iteration.py
1 # Copyright 2014: Mirantis Inc.
2 # All Rights Reserved.
3 #
4 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
5 #    not use this file except in compliance with the License. You may obtain
6 #    a copy of the License at
7 #
8 #         http://www.apache.org/licenses/LICENSE-2.0
9 #
10 #    Unless required by applicable law or agreed to in writing, software
11 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 #    License for the specific language governing permissions and limitations
14 #    under the License.
15
16 # yardstick comment: this is a modified copy of
17 # rally/rally/benchmark/runners/constant.py
18
19 """A runner that runs a configurable number of times before it returns
20 """
21
22 from __future__ import absolute_import
23 import os
24 import multiprocessing
25 import logging
26 import traceback
27 import time
28
29 from yardstick.benchmark.runners import base
30
31 LOG = logging.getLogger(__name__)
32
33
34 QUEUE_PUT_TIMEOUT = 10
35
36
37 def _worker_process(queue, cls, method_name, scenario_cfg,
38                     context_cfg, aborted, output_queue):
39
40     sequence = 1
41
42     runner_cfg = scenario_cfg['runner']
43
44     interval = runner_cfg.get("interval", 1)
45     iterations = runner_cfg.get("iterations", 1)
46     run_step = runner_cfg.get("run_step", "setup,run,teardown")
47
48     delta = runner_cfg.get("delta", 2)
49     LOG.info("worker START, iterations %d times, class %s", iterations, cls)
50
51     runner_cfg['runner_id'] = os.getpid()
52
53     benchmark = cls(scenario_cfg, context_cfg)
54     if "setup" in run_step:
55         benchmark.setup()
56
57     method = getattr(benchmark, method_name)
58
59     sla_action = None
60     if "sla" in scenario_cfg:
61         sla_action = scenario_cfg["sla"].get("action", "assert")
62     if "run" in run_step:
63         while True:
64
65             LOG.debug("runner=%(runner)s seq=%(sequence)s START",
66                       {"runner": runner_cfg["runner_id"],
67                        "sequence": sequence})
68
69             data = {}
70             errors = ""
71
72             try:
73                 result = method(data)
74             except AssertionError as assertion:
75                 # SLA validation failed in scenario, determine what to do now
76                 if sla_action == "assert":
77                     raise
78                 elif sla_action == "monitor":
79                     LOG.warning("SLA validation failed: %s", assertion.args)
80                     errors = assertion.args
81                 elif sla_action == "rate-control":
82                     try:
83                         scenario_cfg['options']['rate']
84                     except KeyError:
85                         scenario_cfg.setdefault('options', {})
86                         scenario_cfg['options']['rate'] = 100
87
88                     scenario_cfg['options']['rate'] -= delta
89                     sequence = 1
90                     continue
91             except Exception as e:
92                 errors = traceback.format_exc()
93                 LOG.exception(e)
94             else:
95                 if result:
96                     # add timeout for put so we don't block test
97                     # if we do timeout we don't care about dropping individual KPIs
98                     output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
99
100             time.sleep(interval)
101
102             benchmark_output = {
103                 'timestamp': time.time(),
104                 'sequence': sequence,
105                 'data': data,
106                 'errors': errors
107             }
108
109             queue.put(benchmark_output, True, QUEUE_PUT_TIMEOUT)
110
111             LOG.debug("runner=%(runner)s seq=%(sequence)s END",
112                       {"runner": runner_cfg["runner_id"],
113                        "sequence": sequence})
114
115             sequence += 1
116
117             if (errors and sla_action is None) or \
118                     (sequence > iterations or aborted.is_set()):
119                 LOG.info("worker END")
120                 break
121     if "teardown" in run_step:
122         try:
123             benchmark.teardown()
124         except Exception:
125             # catch any exception in teardown and convert to simple exception
126             # never pass exceptions back to multiprocessing, because some exceptions can
127             # be unpicklable
128             # https://bugs.python.org/issue9400
129             LOG.exception("")
130             raise SystemExit(1)
131
132     LOG.debug("queue.qsize() = %s", queue.qsize())
133     LOG.debug("output_queue.qsize() = %s", output_queue.qsize())
134
135
136 class IterationRunner(base.Runner):
137     """Run a scenario for a configurable number of times
138
139 If the scenario ends before the time has elapsed, it will be started again.
140
141   Parameters
142     iterations - amount of times the scenario will be run for
143         type:    int
144         unit:    na
145         default: 1
146     interval - time to wait between each scenario invocation
147         type:    int
148         unit:    seconds
149         default: 1 sec
150     """
151     __execution_type__ = 'Iteration'
152
153     def _run_benchmark(self, cls, method, scenario_cfg, context_cfg):
154         self.process = multiprocessing.Process(
155             target=_worker_process,
156             args=(self.result_queue, cls, method, scenario_cfg,
157                   context_cfg, self.aborted, self.output_queue))
158         self.process.start()