Merge "Add a new monitor type: MultiMonitor that can run any number of other monitors...
[yardstick.git] / yardstick / benchmark / contexts / heat.py
1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 from __future__ import absolute_import
11 from __future__ import print_function
12
13 import collections
14 import logging
15 import os
16 import sys
17 import uuid
18
19 import paramiko
20 import pkg_resources
21
22 from yardstick.benchmark.contexts.base import Context
23 from yardstick.benchmark.contexts.model import Network
24 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
25 from yardstick.benchmark.contexts.model import Server
26 from yardstick.benchmark.contexts.model import update_scheduler_hints
27 from yardstick.orchestrator.heat import HeatTemplate, get_short_key_uuid
28 from yardstick.common.constants import YARDSTICK_ROOT_PATH
29
30 LOG = logging.getLogger(__name__)
31
32
33 class HeatContext(Context):
34     """Class that represents a context in the logical model"""
35
36     __context_type__ = "Heat"
37
38     def __init__(self):
39         self.name = None
40         self.stack = None
41         self.networks = []
42         self.servers = []
43         self.placement_groups = []
44         self.server_groups = []
45         self.keypair_name = None
46         self.secgroup_name = None
47         self._server_map = {}
48         self._image = None
49         self._flavor = None
50         self.flavors = set()
51         self._user = None
52         self.template_file = None
53         self.heat_parameters = None
54         # generate an uuid to identify yardstick_key
55         # the first 8 digits of the uuid will be used
56         self.key_uuid = uuid.uuid4()
57         self.key_filename = ''.join(
58             [YARDSTICK_ROOT_PATH, 'yardstick/resources/files/yardstick_key-',
59              get_short_key_uuid(self.key_uuid)])
60         super(HeatContext, self).__init__()
61
62     def assign_external_network(self, networks):
63         sorted_networks = sorted(networks.items())
64         external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
65         have_external_network = [(name, net)
66                                  for name, net in sorted_networks if
67                                  net.get("external_network")]
68         # no external net defined, assign it to first network usig os.environ
69         if sorted_networks and not have_external_network:
70             sorted_networks[0][1]["external_network"] = external_network
71
72     def init(self, attrs):     # pragma: no cover
73         """initializes itself from the supplied arguments"""
74         self.name = attrs["name"]
75
76         self._user = attrs.get("user")
77
78         self.template_file = attrs.get("heat_template")
79         if self.template_file:
80             self.heat_parameters = attrs.get("heat_parameters")
81             return
82
83         self.keypair_name = self.name + "-key"
84         self.secgroup_name = self.name + "-secgroup"
85
86         self._image = attrs.get("image")
87
88         self._flavor = attrs.get("flavor")
89
90         self.placement_groups = [PlacementGroup(name, self, pgattrs["policy"])
91                                  for name, pgattrs in attrs.get(
92                                  "placement_groups", {}).items()]
93
94         self.server_groups = [ServerGroup(name, self, sgattrs["policy"])
95                               for name, sgattrs in attrs.get(
96                               "server_groups", {}).items()]
97
98         self.assign_external_network(attrs["networks"])
99
100         self.networks = [Network(name, self, netattrs) for name, netattrs in
101                          sorted(attrs["networks"].items())]
102
103         for name, serverattrs in attrs["servers"].items():
104             server = Server(name, self, serverattrs)
105             self.servers.append(server)
106             self._server_map[server.dn] = server
107
108         rsa_key = paramiko.RSAKey.generate(bits=2048, progress_func=None)
109         rsa_key.write_private_key_file(self.key_filename)
110         print("Writing %s ..." % self.key_filename)
111         with open(self.key_filename + ".pub", "w") as pubkey_file:
112             pubkey_file.write(
113                 "%s %s\n" % (rsa_key.get_name(), rsa_key.get_base64()))
114         del rsa_key
115
116     @property
117     def image(self):
118         """returns application's default image name"""
119         return self._image
120
121     @property
122     def flavor(self):
123         """returns application's default flavor name"""
124         return self._flavor
125
126     @property
127     def user(self):
128         """return login user name corresponding to image"""
129         return self._user
130
131     def _add_resources_to_template(self, template):
132         """add to the template the resources represented by this context"""
133
134         if self.flavor:
135             if isinstance(self.flavor, dict):
136                 flavor = self.flavor.setdefault("name", self.name + "-flavor")
137                 template.add_flavor(**self.flavor)
138                 self.flavors.add(flavor)
139
140         template.add_keypair(self.keypair_name, self.key_uuid)
141         template.add_security_group(self.secgroup_name)
142
143         for network in self.networks:
144             template.add_network(network.stack_name,
145                                  network.physical_network,
146                                  network.provider)
147             template.add_subnet(network.subnet_stack_name, network.stack_name,
148                                 network.subnet_cidr)
149
150             if network.router:
151                 template.add_router(network.router.stack_name,
152                                     network.router.external_gateway_info,
153                                     network.subnet_stack_name)
154                 template.add_router_interface(network.router.stack_if_name,
155                                               network.router.stack_name,
156                                               network.subnet_stack_name)
157
158         # create a list of servers sorted by increasing no of placement groups
159         list_of_servers = sorted(self.servers,
160                                  key=lambda s: len(s.placement_groups))
161
162         #
163         # add servers with scheduler hints derived from placement groups
164         #
165
166         # create list of servers with availability policy
167         availability_servers = []
168         for server in list_of_servers:
169             for pg in server.placement_groups:
170                 if pg.policy == "availability":
171                     availability_servers.append(server)
172                     break
173
174         for server in availability_servers:
175             if isinstance(server.flavor, dict):
176                 try:
177                     self.flavors.add(server.flavor["name"])
178                 except KeyError:
179                     self.flavors.add(server.stack_name + "-flavor")
180
181         # add servers with availability policy
182         added_servers = []
183         for server in availability_servers:
184             scheduler_hints = {}
185             for pg in server.placement_groups:
186                 update_scheduler_hints(scheduler_hints, added_servers, pg)
187             # workround for openstack nova bug, check JIRA: YARDSTICK-200
188             # for details
189             if len(availability_servers) == 2:
190                 if not scheduler_hints["different_host"]:
191                     scheduler_hints.pop("different_host", None)
192                     server.add_to_template(template,
193                                            self.networks,
194                                            scheduler_hints)
195                 else:
196                     scheduler_hints["different_host"] = \
197                         scheduler_hints["different_host"][0]
198                     server.add_to_template(template,
199                                            self.networks,
200                                            scheduler_hints)
201             else:
202                 server.add_to_template(template,
203                                        self.networks,
204                                        scheduler_hints)
205             added_servers.append(server.stack_name)
206
207         # create list of servers with affinity policy
208         affinity_servers = []
209         for server in list_of_servers:
210             for pg in server.placement_groups:
211                 if pg.policy == "affinity":
212                     affinity_servers.append(server)
213                     break
214
215         # add servers with affinity policy
216         for server in affinity_servers:
217             if server.stack_name in added_servers:
218                 continue
219             scheduler_hints = {}
220             for pg in server.placement_groups:
221                 update_scheduler_hints(scheduler_hints, added_servers, pg)
222             server.add_to_template(template, self.networks, scheduler_hints)
223             added_servers.append(server.stack_name)
224
225         # add server group
226         for sg in self.server_groups:
227             template.add_server_group(sg.name, sg.policy)
228
229         # add remaining servers with no placement group configured
230         for server in list_of_servers:
231             # TODO placement_group and server_group should combine
232             if not server.placement_groups:
233                 scheduler_hints = {}
234                 # affinity/anti-aff server group
235                 sg = server.server_group
236                 if sg:
237                     scheduler_hints["group"] = {'get_resource': sg.name}
238                 server.add_to_template(template,
239                                        self.networks, scheduler_hints)
240
241     def deploy(self):
242         """deploys template into a stack using cloud"""
243         print("Deploying context '%s'" % self.name)
244
245         heat_template = HeatTemplate(self.name, self.template_file,
246                                      self.heat_parameters)
247
248         if self.template_file is None:
249             self._add_resources_to_template(heat_template)
250
251         try:
252             self.stack = heat_template.create()
253         except KeyboardInterrupt:
254             sys.exit("\nStack create interrupted")
255         except RuntimeError as err:
256             sys.exit("error: failed to deploy stack: '%s'" % err.args)
257         except Exception as err:
258             sys.exit("error: failed to deploy stack: '%s'" % err)
259
260         # copy some vital stack output into server objects
261         for server in self.servers:
262             if server.ports:
263                 # TODO(hafe) can only handle one internal network for now
264                 port = next(iter(server.ports.values()))
265                 server.private_ip = self.stack.outputs[port["stack_name"]]
266
267             if server.floating_ip:
268                 server.public_ip = \
269                     self.stack.outputs[server.floating_ip["stack_name"]]
270
271         print("Context '%s' deployed" % self.name)
272
273     def undeploy(self):
274         """undeploys stack from cloud"""
275         if self.stack:
276             print("Undeploying context '%s'" % self.name)
277             self.stack.delete()
278             self.stack = None
279             print("Context '%s' undeployed" % self.name)
280
281         if os.path.exists(self.key_filename):
282             try:
283                 os.remove(self.key_filename)
284                 os.remove(self.key_filename + ".pub")
285             except OSError:
286                 LOG.exception("Key filename %s", self.key_filename)
287
288         super(HeatContext, self).undeploy()
289
290     def _get_server(self, attr_name):
291         """lookup server info by name from context
292         attr_name: either a name for a server created by yardstick or a dict
293         with attribute name mapping when using external heat templates
294         """
295         key_filename = pkg_resources.resource_filename(
296             'yardstick.resources',
297             'files/yardstick_key-' + get_short_key_uuid(self.key_uuid))
298
299         if isinstance(attr_name, collections.Mapping):
300             cname = attr_name["name"].split(".")[1]
301             if cname != self.name:
302                 return None
303
304             public_ip = None
305             private_ip = None
306             if "public_ip_attr" in attr_name:
307                 public_ip = self.stack.outputs[attr_name["public_ip_attr"]]
308             if "private_ip_attr" in attr_name:
309                 private_ip = self.stack.outputs[
310                     attr_name["private_ip_attr"]]
311
312             # Create a dummy server instance for holding the *_ip attributes
313             server = Server(attr_name["name"].split(".")[0], self, {})
314             server.public_ip = public_ip
315             server.private_ip = private_ip
316         else:
317             if attr_name not in self._server_map:
318                 return None
319             server = self._server_map[attr_name]
320
321         if server is None:
322             return None
323
324         result = {
325             "user": server.context.user,
326             "key_filename": key_filename,
327             "private_ip": server.private_ip
328         }
329         # Target server may only have private_ip
330         if server.public_ip:
331             result["ip"] = server.public_ip
332
333         return result