1 # Copyright 2014: Mirantis Inc.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
16 # yardstick comment: this is a modified copy of
17 # rally/rally/benchmark/runners/constant.py
19 """A runner that runs a specific time before it returns
22 from __future__ import absolute_import
24 import multiprocessing
29 from collections import Mapping
30 from contextlib import contextmanager
31 from itertools import takewhile
32 from six.moves import zip
34 from yardstick.benchmark.runners import base
36 LOG = logging.getLogger(__name__)
39 class SearchRunnerHelper(object):
41 def __init__(self, cls, method_name, scenario_cfg, context_cfg, aborted):
42 super(SearchRunnerHelper, self).__init__()
44 self.method_name = method_name
45 self.scenario_cfg = scenario_cfg
46 self.context_cfg = context_cfg
47 self.aborted = aborted
48 self.runner_cfg = scenario_cfg['runner']
49 self.run_step = self.runner_cfg.get("run_step", "setup,run,teardown")
50 self.timeout = self.runner_cfg.get("timeout", 60)
51 self.interval = self.runner_cfg.get("interval", 1)
55 def __call__(self, *args, **kwargs):
56 if self.method is None:
58 return self.method(*args, **kwargs)
61 def get_benchmark_instance(self):
62 self.benchmark = self.cls(self.scenario_cfg, self.context_cfg)
64 if 'setup' in self.run_step:
65 self.benchmark.setup()
67 self.method = getattr(self.benchmark, self.method_name)
68 LOG.info("worker START, timeout %d sec, class %s", self.timeout, self.cls)
72 if 'teardown' in self.run_step:
73 self.benchmark.teardown()
75 def is_not_done(self):
76 if 'run' not in self.run_step:
79 max_time = time.time() + self.timeout
81 abort_iter = iter(self.aborted.is_set, True)
82 time_iter = takewhile(lambda t_now: t_now <= max_time, iter(time.time, -1))
84 for seq, _ in enumerate(zip(abort_iter, time_iter), 1):
86 time.sleep(self.interval)
89 class SearchRunner(base.Runner):
90 """Run a scenario for a certain amount of time
92 If the scenario ends before the time has elapsed, it will be started again.
95 timeout - amount of time the scenario will be run for
99 interval - time to wait between each scenario invocation
104 __execution_type__ = 'Search'
106 def __init__(self, config):
107 super(SearchRunner, self).__init__(config)
108 self.runner_cfg = None
109 self.runner_id = None
110 self.sla_action = None
111 self.worker_helper = None
113 def _worker_run_once(self, sequence):
114 LOG.debug("runner=%s seq=%s START", self.runner_id, sequence)
120 self.worker_helper(data)
121 except AssertionError as assertion:
122 # SLA validation failed in scenario, determine what to do now
123 if self.sla_action == "assert":
125 elif self.sla_action == "monitor":
126 LOG.warning("SLA validation failed: %s", assertion.args)
127 errors = assertion.args
128 except Exception as e:
129 errors = traceback.format_exc()
133 'runner_id': self.runner_id,
135 'timestamp': time.time(),
136 'sequence': sequence,
142 self.result_queue.put(record)
144 LOG.debug("runner=%s seq=%s END", self.runner_id, sequence)
146 # Have to search through all the VNF KPIs
147 kpi_done = any(kpi.get('done') for kpi in data.values() if isinstance(kpi, Mapping))
149 return kpi_done or (errors and self.sla_action is None)
151 def _worker_run(self, cls, method_name, scenario_cfg, context_cfg):
152 self.runner_cfg = scenario_cfg['runner']
153 self.runner_id = self.runner_cfg['runner_id'] = os.getpid()
155 self.worker_helper = SearchRunnerHelper(cls, method_name, scenario_cfg,
156 context_cfg, self.aborted)
159 self.sla_action = scenario_cfg['sla'].get('action', 'assert')
161 self.sla_action = None
163 self.result_queue.put({
164 'runner_id': self.runner_id,
165 'scenario_cfg': scenario_cfg,
166 'context_cfg': context_cfg
169 with self.worker_helper.get_benchmark_instance():
170 for sequence in self.worker_helper.is_not_done():
171 if self._worker_run_once(sequence):
172 LOG.info("worker END")
175 def _run_benchmark(self, cls, method, scenario_cfg, context_cfg):
176 self.process = multiprocessing.Process(
177 target=self._worker_run,
178 args=(cls, method, scenario_cfg, context_cfg))