Add support for multiple contexts(stacks) 74/774/5
authorHans Feldt <hans.feldt@ericsson.com>
Fri, 5 Jun 2015 08:17:01 +0000 (10:17 +0200)
committerHans Feldt <hans.feldt@ericsson.com>
Tue, 9 Jun 2015 13:05:12 +0000 (13:05 +0000)
A list of contexts can be specified, cross referencing between
contexts is supported and shown in the added sample file.

TBD can placement group work between stacks?

Change-Id: I26dbe94e52ba0be5e49f50fd70540a57de2204cb
JIRA: YARDSTICK-31
Signed-off-by: Hans Feldt <hans.feldt@ericsson.com>
samples/ping-multiple-context.yaml [new file with mode: 0644]
yardstick/benchmark/context/model.py
yardstick/main.py
yardstick/orchestrator/heat.py

diff --git a/samples/ping-multiple-context.yaml b/samples/ping-multiple-context.yaml
new file mode 100644 (file)
index 0000000..8e1b1b7
--- /dev/null
@@ -0,0 +1,56 @@
+---
+# Sample benchmark task config file to measure network latency using ping
+# Client/server parts are located in different contexts(stacks)
+# => longer communication path and higher latency
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+  type: Ping
+  options:
+    packetsize: 100
+  host: client.demo1
+  target: server.demo2
+  runner:
+    type: Duration
+    duration: 60
+    interval: 1
+  sla:
+    max_rtt: 15
+    action: monitor
+
+contexts:
+-
+  name: demo1
+  image: cirros-0.3.3
+  flavor: m1.tiny
+  user: cirros
+  placement_groups:
+    pgrp1:
+      policy: "availability"
+  servers:
+    client:
+      floating_ip: true
+      placement: "pgrp1"
+  networks:
+    test:
+      cidr: '10.0.1.0/24'
+      external_network: "net04_ext"
+-
+  name: demo2
+  image: cirros-0.3.3
+  flavor: m1.tiny
+  user: cirros
+  placement_groups:
+    pgrp1:
+      policy: "availability"
+  servers:
+    server:
+      floating_ip: true
+      placement: "pgrp1"
+  networks:
+    test:
+      cidr: '10.0.1.0/24'
+      external_network: "net04_ext"
+
index 768c1d3..afb2d56 100644 (file)
@@ -123,6 +123,7 @@ class Server(Object):
         self.stack_name = context.name + "-" + self.name
         self.keypair_name = context.keypair_name
         self.secgroup_name = context.secgroup_name
+        self.context = context
 
         if attrs is None:
             attrs = {}
@@ -372,8 +373,8 @@ class Context(object):
         except Exception as err:
             sys.exit("error: failed to deploy stack: '%s'" % err)
 
-        # copy some vital stack output into context
-        for server in Server.list:
+        # Iterate the servers in this context and copy out needed info
+        for server in self.servers:
             for port in server.ports.itervalues():
                 port["ipaddr"] = self.stack.outputs[port["stack_name"]]
 
@@ -391,6 +392,16 @@ class Context(object):
             self.stack = None
             print "Context undeployed"
 
-    def get_server(self, name):
-        '''lookup server object by name from context'''
-        return self._server_map[name]
+    @staticmethod
+    def get_server(dn):
+        '''lookup server object by DN
+
+        dn is a distinguished name including the context name'''
+        if "." not in dn:
+            raise ValueError("dn '%s' is malformed" % dn)
+
+        for context in Context.list:
+            if dn in context._server_map:
+                return context._server_map[dn]
+
+        return None
index 050a564..5669fde 100755 (executable)
@@ -23,7 +23,6 @@ import pkg_resources
 from yardstick.benchmark.context.model import Context
 from yardstick.benchmark.runners import base as base_runner
 from yardstick.cmdparser import CmdParser
-from yardstick.orchestrator.heat import HeatStack
 
 
 class TaskParser(object):
@@ -43,43 +42,58 @@ class TaskParser(object):
         if cfg["schema"] != "yardstick:task:0.1":
             sys.exit("error: file %s has unknown schema %s" % (self.path,
                                                                cfg["schema"]))
-        context = Context()
-        context.init(cfg["context"])
+
+        # TODO: support one or many contexts? Many would simpler and precise
+        if "context" in cfg:
+            context_cfgs = [cfg["context"]]
+        else:
+            context_cfgs = cfg["contexts"]
+
+        for cfg_attrs in context_cfgs:
+            context = Context()
+            context.init(cfg_attrs)
 
         run_in_parallel = cfg.get("run_in_parallel", False)
 
         # TODO we need something better here, a class that represent the file
-        return cfg["scenarios"], run_in_parallel, context
+        return cfg["scenarios"], run_in_parallel
 
 
 def atexit_handler():
     '''handler for process termination'''
     base_runner.Runner.terminate_all()
 
-    if HeatStack.stacks_exist():
-        print "Deleting all stacks"
-    HeatStack.delete_all()
+    if len(Context.list) > 0:
+        print "Undeploying all contexts"
+        for context in Context.list:
+            context.undeploy()
 
 
-def run_one_scenario(scenario_cfg, context, output_file):
+def run_one_scenario(scenario_cfg, output_file):
     '''run one scenario using context'''
     key_filename = pkg_resources.resource_filename(
         'yardstick.resources', 'files/yardstick_key')
 
-    host = context.get_server(scenario_cfg["host"])
+    host = Context.get_server(scenario_cfg["host"])
 
     runner_cfg = scenario_cfg["runner"]
     runner_cfg['host'] = host.floating_ip["ipaddr"]
-    runner_cfg['user'] = context.user
+    runner_cfg['user'] = host.context.user
     runner_cfg['key_filename'] = key_filename
     runner_cfg['output_filename'] = output_file
 
-    target = context.get_server(scenario_cfg["target"])
+    # TODO target should be optional to support single VM scenarios
+    target = Context.get_server(scenario_cfg["target"])
     if target.floating_ip:
         runner_cfg['target'] = target.floating_ip["ipaddr"]
 
-    # TODO hardcoded name below, a server can be attached to several nets
-    scenario_cfg["ipaddr"] = target.ports["test"]["ipaddr"]
+    # TODO scenario_cfg["ipaddr"] is bad, "dest_ip" is better
+    if host.context != target.context:
+        # target is in another context, get its public IP
+        scenario_cfg["ipaddr"] = target.floating_ip["ipaddr"]
+    else:
+        # TODO hardcoded name below, a server can be attached to several nets
+        scenario_cfg["ipaddr"] = target.ports["test"]["ipaddr"]
 
     runner = base_runner.Runner.get(runner_cfg)
 
@@ -105,17 +119,18 @@ def main():
     prog_args = CmdParser().parse_args()
 
     parser = TaskParser(prog_args.taskfile[0])
-    scenarios, run_in_parallel, context = parser.parse()
+    scenarios, run_in_parallel = parser.parse()
 
     if prog_args.parse_only:
         sys.exit(0)
 
-    context.deploy()
+    for context in Context.list:
+        context.deploy()
 
     runners = []
     if run_in_parallel:
         for scenario in scenarios:
-            runner = run_one_scenario(scenario, context, prog_args.output_file)
+            runner = run_one_scenario(scenario, prog_args.output_file)
             runners.append(runner)
 
         # Wait for runners to finish
@@ -125,15 +140,16 @@ def main():
     else:
         # run serially
         for scenario in scenarios:
-            runner = run_one_scenario(scenario, context, prog_args.output_file)
+            runner = run_one_scenario(scenario, prog_args.output_file)
             runner_join(runner)
             print "Runner ended, output in", prog_args.output_file
 
     if prog_args.keep_deploy:
         # keep deployment, forget about stack (hide it for exit handler)
-        context.stack = None
+        Context.list = []
     else:
-        context.undeploy()
+        for context in Context.list:
+            context.undeploy()
 
     print "Done, exiting"
 
index 9c0d0f1..ddab896 100644 (file)
@@ -66,9 +66,9 @@ class HeatStack(HeatObject):
     ''' Represents a Heat stack (deployed template) '''
     stacks = []
 
-    def __init__(self, uuid, name):
+    def __init__(self, name):
         super(HeatStack, self).__init__()
-        self.uuid = uuid
+        self.uuid = None
         self.name = name
         self.outputs = None
         HeatStack.stacks.append(self)
@@ -80,6 +80,9 @@ class HeatStack(HeatObject):
 
     def _delete(self):
         '''deletes a stack from the target cloud using heat'''
+        if self.uuid is None:
+            return
+
         log.info("Deleting stack '%s', uuid:%s", self.name, self.uuid)
         heat = self._get_heat_client()
         template = heat.stacks.get(self.uuid)
@@ -131,7 +134,7 @@ class HeatStack(HeatObject):
 
     @staticmethod
     def delete_all():
-        for stack in HeatStack.stacks:
+        for stack in HeatStack.stacks[:]:
             stack.delete()
 
     def update(self):
@@ -394,14 +397,14 @@ class HeatTemplate(HeatObject):
         returns a dict with the requested output values from the template'''
         log.info("Creating stack '%s'", self.name)
 
+        # create stack early to support cleanup, e.g. ctrl-c while waiting
+        stack = HeatStack(self.name)
+
         heat = self._get_heat_client()
         json_template = json.dumps(self._template)
         start_time = time.time()
-        self.uuid = heat.stacks.create(stack_name=self.name,
-                                       template=json_template)['stack']['id']
-
-        # create stack early to support cleanup, e.g. ctrl-c while waiting
-        stack = HeatStack(self.uuid, self.name)
+        stack.uuid = self.uuid = heat.stacks.create(
+            stack_name=self.name, template=json_template)['stack']['id']
 
         status = self.status()