Merge "Yardstick output format unified"
[yardstick.git] / yardstick / benchmark / contexts / heat.py
1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 from __future__ import absolute_import
11 from __future__ import print_function
12
13 import collections
14 import logging
15 import os
16 import uuid
17 from collections import OrderedDict
18
19 import ipaddress
20 import paramiko
21 import pkg_resources
22
23 from yardstick.benchmark.contexts.base import Context
24 from yardstick.benchmark.contexts.model import Network
25 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
26 from yardstick.benchmark.contexts.model import Server
27 from yardstick.benchmark.contexts.model import update_scheduler_hints
28 from yardstick.orchestrator.heat import HeatTemplate, get_short_key_uuid
29 from yardstick.common.constants import YARDSTICK_ROOT_PATH
30
31 LOG = logging.getLogger(__name__)
32
33 DEFAULT_HEAT_TIMEOUT = 3600
34
35
36 class HeatContext(Context):
37     """Class that represents a context in the logical model"""
38
39     __context_type__ = "Heat"
40
41     def __init__(self):
42         self.name = None
43         self.stack = None
44         self.networks = OrderedDict()
45         self.servers = []
46         self.placement_groups = []
47         self.server_groups = []
48         self.keypair_name = None
49         self.secgroup_name = None
50         self._server_map = {}
51         self._image = None
52         self._flavor = None
53         self.flavors = set()
54         self._user = None
55         self.template_file = None
56         self.heat_parameters = None
57         # generate an uuid to identify yardstick_key
58         # the first 8 digits of the uuid will be used
59         self.key_uuid = uuid.uuid4()
60         self.key_filename = ''.join(
61             [YARDSTICK_ROOT_PATH, 'yardstick/resources/files/yardstick_key-',
62              get_short_key_uuid(self.key_uuid)])
63         super(HeatContext, self).__init__()
64
65     def assign_external_network(self, networks):
66         sorted_networks = sorted(networks.items())
67         external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
68         have_external_network = [(name, net)
69                                  for name, net in sorted_networks if
70                                  net.get("external_network")]
71         # no external net defined, assign it to first network usig os.environ
72         if sorted_networks and not have_external_network:
73             sorted_networks[0][1]["external_network"] = external_network
74         return sorted_networks
75
76     def init(self, attrs):     # pragma: no cover
77         """initializes itself from the supplied arguments"""
78         self.name = attrs["name"]
79
80         self._user = attrs.get("user")
81
82         self.template_file = attrs.get("heat_template")
83         if self.template_file:
84             self.heat_parameters = attrs.get("heat_parameters")
85             return
86
87         self.keypair_name = self.name + "-key"
88         self.secgroup_name = self.name + "-secgroup"
89
90         self._image = attrs.get("image")
91
92         self._flavor = attrs.get("flavor")
93
94         self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
95
96         self.placement_groups = [PlacementGroup(name, self, pgattrs["policy"])
97                                  for name, pgattrs in attrs.get(
98                                  "placement_groups", {}).items()]
99
100         self.server_groups = [ServerGroup(name, self, sgattrs["policy"])
101                               for name, sgattrs in attrs.get(
102                               "server_groups", {}).items()]
103
104         # we have to do this first, because we are injecting external_network
105         # into the dict
106         sorted_networks = self.assign_external_network(attrs["networks"])
107
108         self.networks = OrderedDict(
109             (name, Network(name, self, netattrs)) for name, netattrs in
110             sorted_networks)
111
112         for name, serverattrs in sorted(attrs["servers"].items()):
113             server = Server(name, self, serverattrs)
114             self.servers.append(server)
115             self._server_map[server.dn] = server
116
117         rsa_key = paramiko.RSAKey.generate(bits=2048, progress_func=None)
118         rsa_key.write_private_key_file(self.key_filename)
119         print("Writing %s ..." % self.key_filename)
120         with open(self.key_filename + ".pub", "w") as pubkey_file:
121             pubkey_file.write(
122                 "%s %s\n" % (rsa_key.get_name(), rsa_key.get_base64()))
123         del rsa_key
124
125     @property
126     def image(self):
127         """returns application's default image name"""
128         return self._image
129
130     @property
131     def flavor(self):
132         """returns application's default flavor name"""
133         return self._flavor
134
135     @property
136     def user(self):
137         """return login user name corresponding to image"""
138         return self._user
139
140     def _add_resources_to_template(self, template):
141         """add to the template the resources represented by this context"""
142
143         if self.flavor:
144             if isinstance(self.flavor, dict):
145                 flavor = self.flavor.setdefault("name", self.name + "-flavor")
146                 template.add_flavor(**self.flavor)
147                 self.flavors.add(flavor)
148
149         template.add_keypair(self.keypair_name, self.key_uuid)
150         template.add_security_group(self.secgroup_name)
151
152         for network in self.networks.values():
153             template.add_network(network.stack_name,
154                                  network.physical_network,
155                                  network.provider)
156             template.add_subnet(network.subnet_stack_name, network.stack_name,
157                                 network.subnet_cidr)
158
159             if network.router:
160                 template.add_router(network.router.stack_name,
161                                     network.router.external_gateway_info,
162                                     network.subnet_stack_name)
163                 template.add_router_interface(network.router.stack_if_name,
164                                               network.router.stack_name,
165                                               network.subnet_stack_name)
166
167         # create a list of servers sorted by increasing no of placement groups
168         list_of_servers = sorted(self.servers,
169                                  key=lambda s: len(s.placement_groups))
170
171         #
172         # add servers with scheduler hints derived from placement groups
173         #
174
175         # create list of servers with availability policy
176         availability_servers = []
177         for server in list_of_servers:
178             for pg in server.placement_groups:
179                 if pg.policy == "availability":
180                     availability_servers.append(server)
181                     break
182
183         for server in availability_servers:
184             if isinstance(server.flavor, dict):
185                 try:
186                     self.flavors.add(server.flavor["name"])
187                 except KeyError:
188                     self.flavors.add(server.stack_name + "-flavor")
189
190         # add servers with availability policy
191         added_servers = []
192         for server in availability_servers:
193             scheduler_hints = {}
194             for pg in server.placement_groups:
195                 update_scheduler_hints(scheduler_hints, added_servers, pg)
196             # workround for openstack nova bug, check JIRA: YARDSTICK-200
197             # for details
198             if len(availability_servers) == 2:
199                 if not scheduler_hints["different_host"]:
200                     scheduler_hints.pop("different_host", None)
201                     server.add_to_template(template,
202                                            list(self.networks.values()),
203                                            scheduler_hints)
204                 else:
205                     scheduler_hints["different_host"] = \
206                         scheduler_hints["different_host"][0]
207                     server.add_to_template(template,
208                                            list(self.networks.values()),
209                                            scheduler_hints)
210             else:
211                 server.add_to_template(template,
212                                        list(self.networks.values()),
213                                        scheduler_hints)
214             added_servers.append(server.stack_name)
215
216         # create list of servers with affinity policy
217         affinity_servers = []
218         for server in list_of_servers:
219             for pg in server.placement_groups:
220                 if pg.policy == "affinity":
221                     affinity_servers.append(server)
222                     break
223
224         # add servers with affinity policy
225         for server in affinity_servers:
226             if server.stack_name in added_servers:
227                 continue
228             scheduler_hints = {}
229             for pg in server.placement_groups:
230                 update_scheduler_hints(scheduler_hints, added_servers, pg)
231             server.add_to_template(template, list(self.networks.values()),
232                                    scheduler_hints)
233             added_servers.append(server.stack_name)
234
235         # add server group
236         for sg in self.server_groups:
237             template.add_server_group(sg.name, sg.policy)
238
239         # add remaining servers with no placement group configured
240         for server in list_of_servers:
241             # TODO placement_group and server_group should combine
242             if not server.placement_groups:
243                 scheduler_hints = {}
244                 # affinity/anti-aff server group
245                 sg = server.server_group
246                 if sg:
247                     scheduler_hints["group"] = {'get_resource': sg.name}
248                 server.add_to_template(template,
249                                        list(self.networks.values()),
250                                        scheduler_hints)
251
252     def deploy(self):
253         """deploys template into a stack using cloud"""
254         print("Deploying context '%s'" % self.name)
255
256         heat_template = HeatTemplate(self.name, self.template_file,
257                                      self.heat_parameters)
258
259         if self.template_file is None:
260             self._add_resources_to_template(heat_template)
261
262         try:
263             self.stack = heat_template.create(block=True,
264                                               timeout=self.heat_timeout)
265         except KeyboardInterrupt:
266             raise SystemExit("\nStack create interrupted")
267         except:
268             LOG.exception("stack failed")
269             raise
270         # let the other failures happend, we want stack trace
271
272         # copy some vital stack output into server objects
273         for server in self.servers:
274             if server.ports:
275                 # TODO(hafe) can only handle one internal network for now
276                 port = next(iter(server.ports.values()))
277                 server.private_ip = self.stack.outputs[port["stack_name"]]
278                 server.interfaces = {}
279                 for network_name, port in server.ports.items():
280                     self.make_interface_dict(network_name, port['stack_name'],
281                                              server,
282                                              self.stack.outputs)
283
284             if server.floating_ip:
285                 server.public_ip = \
286                     self.stack.outputs[server.floating_ip["stack_name"]]
287
288         print("Context '%s' deployed" % self.name)
289
290     def make_interface_dict(self, network_name, stack_name, server, outputs):
291         server.interfaces[network_name] = {
292             "private_ip": outputs[stack_name],
293             "subnet_id": outputs[stack_name + "-subnet_id"],
294             "subnet_cidr": outputs[
295                 "{}-{}-subnet-cidr".format(self.name, network_name)],
296             "netmask": str(ipaddress.ip_network(
297                 outputs["{}-{}-subnet-cidr".format(self.name,
298                                                    network_name)]).netmask),
299             "gateway_ip": outputs[
300                 "{}-{}-subnet-gateway_ip".format(self.name, network_name)],
301             "mac_address": outputs[stack_name + "-mac_address"],
302             "device_id": outputs[stack_name + "-device_id"],
303             "network_id": outputs[stack_name + "-network_id"],
304             "network_name": network_name,
305             # to match vnf_generic
306             "local_mac": outputs[stack_name + "-mac_address"],
307             "local_ip": outputs[stack_name],
308             "vld_id": self.networks[network_name].vld_id,
309         }
310
311     def undeploy(self):
312         """undeploys stack from cloud"""
313         if self.stack:
314             print("Undeploying context '%s'" % self.name)
315             self.stack.delete()
316             self.stack = None
317             print("Context '%s' undeployed" % self.name)
318
319         if os.path.exists(self.key_filename):
320             try:
321                 os.remove(self.key_filename)
322                 os.remove(self.key_filename + ".pub")
323             except OSError:
324                 LOG.exception("Key filename %s", self.key_filename)
325
326         super(HeatContext, self).undeploy()
327
328     def _get_server(self, attr_name):
329         """lookup server info by name from context
330         attr_name: either a name for a server created by yardstick or a dict
331         with attribute name mapping when using external heat templates
332         """
333         key_filename = pkg_resources.resource_filename(
334             'yardstick.resources',
335             'files/yardstick_key-' + get_short_key_uuid(self.key_uuid))
336
337         if isinstance(attr_name, collections.Mapping):
338             cname = attr_name["name"].split(".")[1]
339             if cname != self.name:
340                 return None
341
342             public_ip = None
343             private_ip = None
344             if "public_ip_attr" in attr_name:
345                 public_ip = self.stack.outputs[attr_name["public_ip_attr"]]
346             if "private_ip_attr" in attr_name:
347                 private_ip = self.stack.outputs[
348                     attr_name["private_ip_attr"]]
349
350             # Create a dummy server instance for holding the *_ip attributes
351             server = Server(attr_name["name"].split(".")[0], self, {})
352             server.public_ip = public_ip
353             server.private_ip = private_ip
354         else:
355             if attr_name not in self._server_map:
356                 return None
357             server = self._server_map[attr_name]
358
359         if server is None:
360             return None
361
362         result = {
363             "user": server.context.user,
364             "key_filename": key_filename,
365             "private_ip": server.private_ip,
366             "interfaces": server.interfaces,
367         }
368         # Target server may only have private_ip
369         if server.public_ip:
370             result["ip"] = server.public_ip
371
372         return result