Add iteration type for Runner 74/1074/4
authorkubi <jean.gaoliang@huawei.com>
Sat, 25 Jul 2015 09:08:14 +0000 (09:08 +0000)
committerkubi <jean.gaoliang@huawei.com>
Mon, 17 Aug 2015 03:20:02 +0000 (23:20 -0400)
A Iteration runner is roughly the same as a Duration runner but runs
for a configurable number of times/iterations instead of time.
Basically just the termination condition is different.
modify the unit and default value of iteration.
rename iteration to iterations
JIRA:YARDSTICK-49

Change-Id: I67f4014dc3cf923cd31cc2e990e2f7219bce40fe
Signed-off-by: kubi <jean.gaoliang@huawei.com>
samples/ping-iteration.yaml [new file with mode: 0755]
yardstick/benchmark/runners/iteration.py [new file with mode: 0755]

diff --git a/samples/ping-iteration.yaml b/samples/ping-iteration.yaml
new file mode 100755 (executable)
index 0000000..810530c
--- /dev/null
@@ -0,0 +1,45 @@
+---
+# Sample benchmark task config file
+# measure network latency using ping
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+  type: Ping
+  options:
+    packetsize: 200
+  host: athena.demo
+  target: ares.demo
+
+  runner:
+    type: Iteration
+    iterations: 60
+    interval: 1
+
+  sla:
+    max_rtt: 10
+    action: monitor
+
+context:
+  name: demo
+  image: cirros-0.3.3
+  flavor: m1.tiny
+  user: cirros
+
+  placement_groups:
+    pgrp1:
+      policy: "availability"
+
+  servers:
+    athena:
+      floating_ip: true
+      placement: "pgrp1"
+    ares:
+      placement: "pgrp1"
+
+  networks:
+    test:
+      cidr: '10.0.1.0/24'
+      external_network: "net04_ext"
+
diff --git a/yardstick/benchmark/runners/iteration.py b/yardstick/benchmark/runners/iteration.py
new file mode 100755 (executable)
index 0000000..03dcfae
--- /dev/null
@@ -0,0 +1,111 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+'''A runner that runs a configurable number of times before it returns
+'''
+
+import os
+import multiprocessing
+import logging
+import traceback
+import time
+
+from yardstick.benchmark.runners import base
+
+LOG = logging.getLogger(__name__)
+
+
+def _worker_process(queue, cls, method_name, context, scenario_args):
+
+    sequence = 1
+
+    interval = context.get("interval", 1)
+    iterations = context.get("iterations", 1)
+    LOG.info("worker START, iterations %d times, class %s", iterations, cls)
+
+    context['runner'] = os.getpid()
+
+    benchmark = cls(context)
+    benchmark.setup()
+    method = getattr(benchmark, method_name)
+
+    record_context = {"runner": context["runner"],
+                      "host": context["host"]}
+
+    sla_action = None
+    if "sla" in scenario_args:
+        sla_action = scenario_args["sla"].get("action", "assert")
+
+    while True:
+
+        LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+                  {"runner": context["runner"], "sequence": sequence})
+
+        data = {}
+        errors = ""
+
+        try:
+            data = method(scenario_args)
+        except AssertionError as assertion:
+            # SLA validation failed in scenario, determine what to do now
+            if sla_action == "assert":
+                raise
+            elif sla_action == "monitor":
+                LOG.warning("SLA validation failed: %s" % assertion.args)
+                errors = assertion.args
+        except Exception as e:
+            errors = traceback.format_exc()
+            LOG.exception(e)
+
+        time.sleep(interval)
+
+        benchmark_output = {
+            'timestamp': time.time(),
+            'sequence': sequence,
+            'data': data,
+            'errors': errors
+        }
+
+        queue.put({'context': record_context, 'sargs': scenario_args,
+                   'benchmark': benchmark_output})
+
+        LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+                  {"runner": context["runner"], "sequence": sequence})
+
+        sequence += 1
+
+        if (errors and sla_action is None) or (sequence > iterations):
+            LOG.info("worker END")
+            break
+
+    benchmark.teardown()
+
+
+class IterationRunner(base.Runner):
+    '''Run a scenario for a configurable number of times
+
+If the scenario ends before the time has elapsed, it will be started again.
+
+  Parameters
+    iterations - amount of times the scenario will be run for
+        type:    int
+        unit:    na
+        default: 1
+    interval - time to wait between each scenario invocation
+        type:    int
+        unit:    seconds
+        default: 1 sec
+    '''
+    __execution_type__ = 'Iteration'
+
+    def _run_benchmark(self, cls, method, scenario_args):
+        self.process = multiprocessing.Process(
+            target=_worker_process,
+            args=(self.result_queue, cls, method, self.config, scenario_args))
+        self.process.start()