1 # Copyright 2014: Mirantis Inc.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
16 # yardstick comment: this is a modified copy of
17 # rally/rally/benchmark/runners/constant.py
19 """A runner that runs a specific time before it returns
22 from __future__ import absolute_import
25 import multiprocessing
28 from contextlib import contextmanager
29 from itertools import takewhile
32 from collections import Mapping
33 from six.moves import zip
35 from yardstick.benchmark.runners import base
37 LOG = logging.getLogger(__name__)
40 class SearchRunnerHelper(object):
42 def __init__(self, cls, method_name, scenario_cfg, context_cfg, aborted):
43 super(SearchRunnerHelper, self).__init__()
45 self.method_name = method_name
46 self.scenario_cfg = scenario_cfg
47 self.context_cfg = context_cfg
48 self.aborted = aborted
49 self.runner_cfg = scenario_cfg['runner']
50 self.run_step = self.runner_cfg.get("run_step", "setup,run,teardown")
51 self.timeout = self.runner_cfg.get("timeout", 60)
52 self.interval = self.runner_cfg.get("interval", 1)
56 def __call__(self, *args, **kwargs):
57 if self.method is None:
59 return self.method(*args, **kwargs)
62 def get_benchmark_instance(self):
63 self.benchmark = self.cls(self.scenario_cfg, self.context_cfg)
65 if 'setup' in self.run_step:
66 self.benchmark.setup()
68 self.method = getattr(self.benchmark, self.method_name)
69 LOG.info("worker START, timeout %d sec, class %s", self.timeout, self.cls)
73 if 'teardown' in self.run_step:
74 self.benchmark.teardown()
76 def is_not_done(self):
77 if 'run' not in self.run_step:
80 max_time = time.time() + self.timeout
82 abort_iter = iter(self.aborted.is_set, True)
83 time_iter = takewhile(lambda t_now: t_now <= max_time, iter(time.time, -1))
85 for seq, _ in enumerate(zip(abort_iter, time_iter), 1):
87 time.sleep(self.interval)
90 class SearchRunner(base.Runner):
91 """Run a scenario for a certain amount of time
93 If the scenario ends before the time has elapsed, it will be started again.
96 timeout - amount of time the scenario will be run for
100 interval - time to wait between each scenario invocation
105 __execution_type__ = 'Search'
107 def __init__(self, config):
108 super(SearchRunner, self).__init__(config)
109 self.runner_cfg = None
110 self.runner_id = None
111 self.sla_action = None
112 self.worker_helper = None
114 def _worker_run_once(self, sequence):
115 LOG.debug("runner=%s seq=%s START", self.runner_id, sequence)
121 self.worker_helper(data)
122 except AssertionError as assertion:
123 # SLA validation failed in scenario, determine what to do now
124 if self.sla_action == "assert":
126 elif self.sla_action == "monitor":
127 LOG.warning("SLA validation failed: %s", assertion.args)
128 errors = assertion.args
129 except Exception as e:
130 errors = traceback.format_exc()
134 'runner_id': self.runner_id,
136 'timestamp': time.time(),
137 'sequence': sequence,
143 self.result_queue.put(record)
145 LOG.debug("runner=%s seq=%s END", self.runner_id, sequence)
147 # Have to search through all the VNF KPIs
148 kpi_done = any(kpi.get('done') for kpi in data.values() if isinstance(kpi, Mapping))
150 return kpi_done or (errors and self.sla_action is None)
152 def _worker_run(self, cls, method_name, scenario_cfg, context_cfg):
153 self.runner_cfg = scenario_cfg['runner']
154 self.runner_id = self.runner_cfg['runner_id'] = os.getpid()
156 self.worker_helper = SearchRunnerHelper(cls, method_name, scenario_cfg,
157 context_cfg, self.aborted)
160 self.sla_action = scenario_cfg['sla'].get('action', 'assert')
162 self.sla_action = None
164 self.result_queue.put({
165 'runner_id': self.runner_id,
166 'scenario_cfg': scenario_cfg,
167 'context_cfg': context_cfg
170 with self.worker_helper.get_benchmark_instance():
171 for sequence in self.worker_helper.is_not_done():
172 if self._worker_run_once(sequence):
173 LOG.info("worker END")
176 def _run_benchmark(self, cls, method, scenario_cfg, context_cfg):
177 name = "{}-{}-{}".format(self.__execution_type__, scenario_cfg.get("type"), os.getpid())
178 self.process = multiprocessing.Process(
180 target=self._worker_run,
181 args=(cls, method, scenario_cfg, context_cfg))