Merge "Add testsuite "os-odl-ovs-noha""
[yardstick.git] / yardstick / benchmark / runners / iteration.py
1 # Copyright 2014: Mirantis Inc.
2 # All Rights Reserved.
3 #
4 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
5 #    not use this file except in compliance with the License. You may obtain
6 #    a copy of the License at
7 #
8 #         http://www.apache.org/licenses/LICENSE-2.0
9 #
10 #    Unless required by applicable law or agreed to in writing, software
11 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 #    License for the specific language governing permissions and limitations
14 #    under the License.
15
16 # yardstick comment: this is a modified copy of
17 # rally/rally/benchmark/runners/constant.py
18
19 """A runner that runs a configurable number of times before it returns
20 """
21
22 from __future__ import absolute_import
23
24 import logging
25 import multiprocessing
26 import time
27 import traceback
28
29 import os
30
31 from yardstick.benchmark.runners import base
32 from yardstick.common import exceptions as y_exc
33
34 LOG = logging.getLogger(__name__)
35
36
37 QUEUE_PUT_TIMEOUT = 10
38
39
40 def _worker_process(queue, cls, method_name, scenario_cfg,
41                     context_cfg, aborted, output_queue):
42
43     sequence = 1
44
45     runner_cfg = scenario_cfg['runner']
46
47     interval = runner_cfg.get("interval", 1)
48     iterations = runner_cfg.get("iterations", 1)
49     run_step = runner_cfg.get("run_step", "setup,run,teardown")
50
51     delta = runner_cfg.get("delta", 2)
52     LOG.info("worker START, iterations %d times, class %s", iterations, cls)
53
54     runner_cfg['runner_id'] = os.getpid()
55
56     benchmark = cls(scenario_cfg, context_cfg)
57     if "setup" in run_step:
58         benchmark.setup()
59
60     method = getattr(benchmark, method_name)
61
62     sla_action = None
63     if "sla" in scenario_cfg:
64         sla_action = scenario_cfg["sla"].get("action", "assert")
65     if "run" in run_step:
66         while True:
67
68             LOG.debug("runner=%(runner)s seq=%(sequence)s START",
69                       {"runner": runner_cfg["runner_id"],
70                        "sequence": sequence})
71
72             data = {}
73             errors = ""
74
75             benchmark.pre_run_wait_time(interval)
76
77             try:
78                 result = method(data)
79             except y_exc.SLAValidationError as error:
80                 # SLA validation failed in scenario, determine what to do now
81                 if sla_action == "assert":
82                     raise
83                 elif sla_action == "monitor":
84                     LOG.warning("SLA validation failed: %s", error.args)
85                     errors = error.args
86                 elif sla_action == "rate-control":
87                     try:
88                         scenario_cfg['options']['rate']
89                     except KeyError:
90                         scenario_cfg.setdefault('options', {})
91                         scenario_cfg['options']['rate'] = 100
92
93                     scenario_cfg['options']['rate'] -= delta
94                     sequence = 1
95                     continue
96             except Exception:  # pylint: disable=broad-except
97                 errors = traceback.format_exc()
98                 LOG.exception("")
99             else:
100                 if result:
101                     # add timeout for put so we don't block test
102                     # if we do timeout we don't care about dropping individual KPIs
103                     output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
104
105             benchmark.post_run_wait_time(interval)
106
107             benchmark_output = {
108                 'timestamp': time.time(),
109                 'sequence': sequence,
110                 'data': data,
111                 'errors': errors
112             }
113
114             queue.put(benchmark_output, True, QUEUE_PUT_TIMEOUT)
115
116             LOG.debug("runner=%(runner)s seq=%(sequence)s END",
117                       {"runner": runner_cfg["runner_id"],
118                        "sequence": sequence})
119
120             sequence += 1
121
122             if (errors and sla_action is None) or \
123                     (sequence > iterations or aborted.is_set()):
124                 LOG.info("worker END")
125                 break
126     if "teardown" in run_step:
127         try:
128             benchmark.teardown()
129         except Exception:
130             # catch any exception in teardown and convert to simple exception
131             # never pass exceptions back to multiprocessing, because some exceptions can
132             # be unpicklable
133             # https://bugs.python.org/issue9400
134             LOG.exception("")
135             raise SystemExit(1)
136
137     LOG.debug("queue.qsize() = %s", queue.qsize())
138     LOG.debug("output_queue.qsize() = %s", output_queue.qsize())
139
140
141 class IterationRunner(base.Runner):
142     """Run a scenario for a configurable number of times
143
144 If the scenario ends before the time has elapsed, it will be started again.
145
146   Parameters
147     iterations - amount of times the scenario will be run for
148         type:    int
149         unit:    na
150         default: 1
151     interval - time to wait between each scenario invocation
152         type:    int
153         unit:    seconds
154         default: 1 sec
155     """
156     __execution_type__ = 'Iteration'
157
158     def _run_benchmark(self, cls, method, scenario_cfg, context_cfg):
159         name = "{}-{}-{}".format(self.__execution_type__, scenario_cfg.get("type"), os.getpid())
160         self.process = multiprocessing.Process(
161             name=name,
162             target=_worker_process,
163             args=(self.result_queue, cls, method, scenario_cfg,
164                   context_cfg, self.aborted, self.output_queue))
165         self.process.start()