Pass parameters between scenarios
[yardstick.git] / yardstick / benchmark / runners / iteration.py
1 # Copyright 2014: Mirantis Inc.
2 # All Rights Reserved.
3 #
4 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
5 #    not use this file except in compliance with the License. You may obtain
6 #    a copy of the License at
7 #
8 #         http://www.apache.org/licenses/LICENSE-2.0
9 #
10 #    Unless required by applicable law or agreed to in writing, software
11 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 #    License for the specific language governing permissions and limitations
14 #    under the License.
15
16 # yardstick comment: this is a modified copy of
17 # rally/rally/benchmark/runners/constant.py
18
19 """A runner that runs a configurable number of times before it returns
20 """
21
22 from __future__ import absolute_import
23 import os
24 import multiprocessing
25 import logging
26 import traceback
27 import time
28
29 from yardstick.benchmark.runners import base
30
31 LOG = logging.getLogger(__name__)
32
33
34 def _worker_process(queue, cls, method_name, scenario_cfg,
35                     context_cfg, aborted, output_queue):
36
37     sequence = 1
38
39     runner_cfg = scenario_cfg['runner']
40
41     interval = runner_cfg.get("interval", 1)
42     iterations = runner_cfg.get("iterations", 1)
43     run_step = runner_cfg.get("run_step", "setup,run,teardown")
44     delta = runner_cfg.get("delta", 2)
45     options_cfg = scenario_cfg['options']
46     initial_rate = options_cfg.get("rate", 100)
47     scenario_cfg['options']['rate'] = initial_rate
48     LOG.info("worker START, iterations %d times, class %s", iterations, cls)
49
50     runner_cfg['runner_id'] = os.getpid()
51
52     benchmark = cls(scenario_cfg, context_cfg)
53     if "setup" in run_step:
54         benchmark.setup()
55
56     method = getattr(benchmark, method_name)
57
58     queue.put({'runner_id': runner_cfg['runner_id'],
59                'scenario_cfg': scenario_cfg,
60                'context_cfg': context_cfg})
61
62     sla_action = None
63     if "sla" in scenario_cfg:
64         sla_action = scenario_cfg["sla"].get("action", "assert")
65     if "run" in run_step:
66         while True:
67
68             LOG.debug("runner=%(runner)s seq=%(sequence)s START",
69                       {"runner": runner_cfg["runner_id"],
70                        "sequence": sequence})
71
72             data = {}
73             errors = ""
74
75             try:
76                 result = method(data)
77             except AssertionError as assertion:
78                 # SLA validation failed in scenario, determine what to do now
79                 if sla_action == "assert":
80                     raise
81                 elif sla_action == "monitor":
82                     LOG.warning("SLA validation failed: %s", assertion.args)
83                     errors = assertion.args
84                 elif sla_action == "rate-control":
85                     scenario_cfg['options']['rate'] -= delta
86                     sequence = 1
87                     continue
88             except Exception as e:
89                 errors = traceback.format_exc()
90                 LOG.exception(e)
91             else:
92                 if result:
93                     output_queue.put(result)
94
95             time.sleep(interval)
96
97             benchmark_output = {
98                 'timestamp': time.time(),
99                 'sequence': sequence,
100                 'data': data,
101                 'errors': errors
102             }
103
104             record = {'runner_id': runner_cfg['runner_id'],
105                       'benchmark': benchmark_output}
106
107             queue.put(record)
108
109             LOG.debug("runner=%(runner)s seq=%(sequence)s END",
110                       {"runner": runner_cfg["runner_id"],
111                        "sequence": sequence})
112
113             sequence += 1
114
115             if (errors and sla_action is None) or \
116                     (sequence > iterations or aborted.is_set()):
117                 LOG.info("worker END")
118                 break
119     if "teardown" in run_step:
120         benchmark.teardown()
121
122
123 class IterationRunner(base.Runner):
124     """Run a scenario for a configurable number of times
125
126 If the scenario ends before the time has elapsed, it will be started again.
127
128   Parameters
129     iterations - amount of times the scenario will be run for
130         type:    int
131         unit:    na
132         default: 1
133     interval - time to wait between each scenario invocation
134         type:    int
135         unit:    seconds
136         default: 1 sec
137     """
138     __execution_type__ = 'Iteration'
139
140     def _run_benchmark(self, cls, method, scenario_cfg, context_cfg):
141         self.process = multiprocessing.Process(
142             target=_worker_process,
143             args=(self.result_queue, cls, method, scenario_cfg,
144                   context_cfg, self.aborted, self.output_queue))
145         self.process.start()