Merge "ansible: disable Extra cloud image kernel stub"
[yardstick.git] / yardstick / benchmark / contexts / heat.py
1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 from __future__ import absolute_import
11 from __future__ import print_function
12
13 import collections
14 import logging
15 import os
16 import errno
17 from collections import OrderedDict
18
19 import ipaddress
20 import pkg_resources
21
22 from yardstick.benchmark.contexts.base import Context
23 from yardstick.benchmark.contexts.model import Network
24 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
25 from yardstick.benchmark.contexts.model import Server
26 from yardstick.benchmark.contexts.model import update_scheduler_hints
27 from yardstick.common import exceptions as y_exc
28 from yardstick.common.openstack_utils import get_shade_client
29 from yardstick.orchestrator.heat import HeatStack
30 from yardstick.orchestrator.heat import HeatTemplate
31 from yardstick.common import constants as consts
32 from yardstick.common import utils
33 from yardstick.common.utils import source_env
34 from yardstick.ssh import SSH
35
36 LOG = logging.getLogger(__name__)
37
38 DEFAULT_HEAT_TIMEOUT = 3600
39
40
41 def join_args(sep, *args):
42     return sep.join(args)
43
44
45 def h_join(*args):
46     return '-'.join(args)
47
48
49 class HeatContext(Context):
50     """Class that represents a context in the logical model"""
51
52     __context_type__ = "Heat"
53
54     def __init__(self):
55         self.stack = None
56         self.networks = OrderedDict()
57         self.heat_timeout = None
58         self.servers = []
59         self.placement_groups = []
60         self.server_groups = []
61         self.keypair_name = None
62         self.secgroup_name = None
63         self._server_map = {}
64         self.attrs = {}
65         self._image = None
66         self._flavor = None
67         self.flavors = set()
68         self._user = None
69         self.template_file = None
70         self.heat_parameters = None
71         self.shade_client = None
72         self.heat_timeout = None
73         self.key_filename = None
74         super(HeatContext, self).__init__()
75
76     @staticmethod
77     def assign_external_network(networks):
78         sorted_networks = sorted(networks.items())
79         external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
80
81         have_external_network = any(net.get("external_network") for net in networks.values())
82         if not have_external_network:
83             # try looking for mgmt network first
84             try:
85                 networks['mgmt']["external_network"] = external_network
86             except KeyError:
87                 if sorted_networks:
88                     # otherwise assign it to first network using os.environ
89                     sorted_networks[0][1]["external_network"] = external_network
90
91         return sorted_networks
92
93     def init(self, attrs):
94         """Initializes itself from the supplied arguments"""
95         super(HeatContext, self).init(attrs)
96
97         self.check_environment()
98         self._user = attrs.get("user")
99
100         self.template_file = attrs.get("heat_template")
101
102         self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
103         if self.template_file:
104             self.heat_parameters = attrs.get("heat_parameters")
105             return
106
107         self.keypair_name = h_join(self.name, "key")
108         self.secgroup_name = h_join(self.name, "secgroup")
109
110         self._image = attrs.get("image")
111
112         self._flavor = attrs.get("flavor")
113
114         self.placement_groups = [PlacementGroup(name, self, pg_attrs["policy"])
115                                  for name, pg_attrs in attrs.get(
116                                  "placement_groups", {}).items()]
117
118         self.server_groups = [ServerGroup(name, self, sg_attrs["policy"])
119                               for name, sg_attrs in attrs.get(
120                               "server_groups", {}).items()]
121
122         # we have to do this first, because we are injecting external_network
123         # into the dict
124         sorted_networks = self.assign_external_network(attrs["networks"])
125
126         self.networks = OrderedDict(
127             (name, Network(name, self, net_attrs)) for name, net_attrs in
128             sorted_networks)
129
130         for name, server_attrs in sorted(attrs["servers"].items()):
131             server = Server(name, self, server_attrs)
132             self.servers.append(server)
133             self._server_map[server.dn] = server
134
135         self.attrs = attrs
136
137     def check_environment(self):
138         try:
139             os.environ['OS_AUTH_URL']
140         except KeyError:
141             try:
142                 source_env(consts.OPENRC)
143             except IOError as e:
144                 if e.errno != errno.EEXIST:
145                     LOG.error('OPENRC file not found')
146                     raise
147                 else:
148                     LOG.error('OS_AUTH_URL not found')
149
150     @property
151     def image(self):
152         """returns application's default image name"""
153         return self._image
154
155     @property
156     def flavor(self):
157         """returns application's default flavor name"""
158         return self._flavor
159
160     @property
161     def user(self):
162         """return login user name corresponding to image"""
163         return self._user
164
165     def _add_resources_to_template(self, template):
166         """add to the template the resources represented by this context"""
167
168         if self.flavor:
169             if isinstance(self.flavor, dict):
170                 flavor = self.flavor.setdefault("name", self.name + "-flavor")
171                 template.add_flavor(**self.flavor)
172                 self.flavors.add(flavor)
173
174         template.add_keypair(self.keypair_name, self.name)
175         template.add_security_group(self.secgroup_name)
176
177         for network in self.networks.values():
178             # Using existing network
179             if network.is_existing():
180                 continue
181             template.add_network(network.stack_name,
182                                  network.physical_network,
183                                  network.provider,
184                                  network.segmentation_id,
185                                  network.port_security_enabled,
186                                  network.network_type)
187             template.add_subnet(network.subnet_stack_name, network.stack_name,
188                                 network.subnet_cidr,
189                                 network.enable_dhcp,
190                                 network.gateway_ip)
191
192             if network.router:
193                 template.add_router(network.router.stack_name,
194                                     network.router.external_gateway_info,
195                                     network.subnet_stack_name)
196                 template.add_router_interface(network.router.stack_if_name,
197                                               network.router.stack_name,
198                                               network.subnet_stack_name)
199
200         # create a list of servers sorted by increasing no of placement groups
201         list_of_servers = sorted(self.servers,
202                                  key=lambda s: len(s.placement_groups))
203
204         #
205         # add servers with scheduler hints derived from placement groups
206         #
207
208         # create list of servers with availability policy
209         availability_servers = []
210         for server in list_of_servers:
211             for pg in server.placement_groups:
212                 if pg.policy == "availability":
213                     availability_servers.append(server)
214                     break
215
216         for server in availability_servers:
217             if isinstance(server.flavor, dict):
218                 try:
219                     self.flavors.add(server.flavor["name"])
220                 except KeyError:
221                     self.flavors.add(h_join(server.stack_name, "flavor"))
222
223         # add servers with availability policy
224         added_servers = []
225         for server in availability_servers:
226             scheduler_hints = {}
227             for pg in server.placement_groups:
228                 update_scheduler_hints(scheduler_hints, added_servers, pg)
229             # workaround for openstack nova bug, check JIRA: YARDSTICK-200
230             # for details
231             if len(availability_servers) == 2:
232                 if not scheduler_hints["different_host"]:
233                     scheduler_hints.pop("different_host", None)
234                     server.add_to_template(template,
235                                            list(self.networks.values()),
236                                            scheduler_hints)
237                 else:
238                     scheduler_hints["different_host"] = \
239                         scheduler_hints["different_host"][0]
240                     server.add_to_template(template,
241                                            list(self.networks.values()),
242                                            scheduler_hints)
243             else:
244                 server.add_to_template(template,
245                                        list(self.networks.values()),
246                                        scheduler_hints)
247             added_servers.append(server.stack_name)
248
249         # create list of servers with affinity policy
250         affinity_servers = []
251         for server in list_of_servers:
252             for pg in server.placement_groups:
253                 if pg.policy == "affinity":
254                     affinity_servers.append(server)
255                     break
256
257         # add servers with affinity policy
258         for server in affinity_servers:
259             if server.stack_name in added_servers:
260                 continue
261             scheduler_hints = {}
262             for pg in server.placement_groups:
263                 update_scheduler_hints(scheduler_hints, added_servers, pg)
264             server.add_to_template(template, list(self.networks.values()),
265                                    scheduler_hints)
266             added_servers.append(server.stack_name)
267
268         # add server group
269         for sg in self.server_groups:
270             template.add_server_group(sg.name, sg.policy)
271
272         # add remaining servers with no placement group configured
273         for server in list_of_servers:
274             # TODO placement_group and server_group should combine
275             if not server.placement_groups:
276                 scheduler_hints = {}
277                 # affinity/anti-aff server group
278                 sg = server.server_group
279                 if sg:
280                     scheduler_hints["group"] = {'get_resource': sg.name}
281                 server.add_to_template(template,
282                                        list(self.networks.values()),
283                                        scheduler_hints)
284
285     def get_neutron_info(self):
286         if not self.shade_client:
287             self.shade_client = get_shade_client()
288
289         networks = self.shade_client.list_networks()
290         for network in self.networks.values():
291             for neutron_net in (net for net in networks if net.name == network.stack_name):
292                     network.segmentation_id = neutron_net.get('provider:segmentation_id')
293                     # we already have physical_network
294                     # network.physical_network = neutron_net.get('provider:physical_network')
295                     network.network_type = neutron_net.get('provider:network_type')
296                     network.neutron_info = neutron_net
297
298     def _create_new_stack(self, heat_template):
299          try:
300              return heat_template.create(block=True,
301                                          timeout=self.heat_timeout)
302          except KeyboardInterrupt:
303              raise y_exc.StackCreationInterrupt
304          except Exception:
305              LOG.exception("stack failed")
306              # let the other failures happen, we want stack trace
307              raise
308
309     def _retrieve_existing_stack(self, stack_name):
310         stack = HeatStack(stack_name)
311         if stack.get():
312             return stack
313         else:
314             LOG.warning("Stack %s does not exist", self.name)
315             return None
316
317     def deploy(self):
318         """deploys template into a stack using cloud"""
319         LOG.info("Deploying context '%s' START", self.name)
320
321         self.key_filename = ''.join(
322             [consts.YARDSTICK_ROOT_PATH,
323              'yardstick/resources/files/yardstick_key-',
324              self.name])
325         # Permissions may have changed since creation; this can be fixed. If we
326         # overwrite the file, we lose future access to VMs using this key.
327         # As long as the file exists, even if it is unreadable, keep it intact
328         if not os.path.exists(self.key_filename):
329             SSH.gen_keys(self.key_filename)
330
331         heat_template = HeatTemplate(self.name, self.template_file,
332                                      self.heat_parameters)
333
334         if self.template_file is None:
335             self._add_resources_to_template(heat_template)
336
337         if self._flags.no_setup:
338             # Try to get an existing stack, returns a stack or None
339             self.stack = self._retrieve_existing_stack(self.name)
340             if not self.stack:
341                 self.stack = self._create_new_stack(heat_template)
342
343         else:
344             self.stack = self._create_new_stack(heat_template)
345
346         # TODO: use Neutron to get segmentation-id
347         self.get_neutron_info()
348
349         # copy some vital stack output into server objects
350         for server in self.servers:
351             if server.ports:
352                 self.add_server_port(server)
353
354             if server.floating_ip:
355                 server.public_ip = \
356                     self.stack.outputs[server.floating_ip["stack_name"]]
357
358         LOG.info("Deploying context '%s' DONE", self.name)
359
360     @staticmethod
361     def _port_net_is_existing(port_info):
362         net_flags = port_info.get('net_flags', {})
363         return net_flags.get(consts.IS_EXISTING)
364
365     @staticmethod
366     def _port_net_is_public(port_info):
367         net_flags = port_info.get('net_flags', {})
368         return net_flags.get(consts.IS_PUBLIC)
369
370     def add_server_port(self, server):
371         server_ports = server.ports.values()
372         for server_port in server_ports:
373             port_info = server_port[0]
374             port_ip = self.stack.outputs[port_info["stack_name"]]
375             port_net_is_existing = self._port_net_is_existing(port_info)
376             port_net_is_public = self._port_net_is_public(port_info)
377             if port_net_is_existing and (port_net_is_public or
378                                          len(server_ports) == 1):
379                 server.public_ip = port_ip
380             if not server.private_ip or len(server_ports) == 1:
381                 server.private_ip = port_ip
382
383         server.interfaces = {}
384         for network_name, ports in server.ports.items():
385             for port in ports:
386                 # port['port'] is either port name from mapping or default network_name
387                 if self._port_net_is_existing(port):
388                     continue
389                 server.interfaces[port['port']] = self.make_interface_dict(network_name,
390                                                                            port['port'],
391                                                                            port['stack_name'],
392                                                                            self.stack.outputs)
393                 server.override_ip(network_name, port)
394
395     def make_interface_dict(self, network_name, port, stack_name, outputs):
396         private_ip = outputs[stack_name]
397         mac_address = outputs[h_join(stack_name, "mac_address")]
398         # these are attributes of the network, not the port
399         output_subnet_cidr = outputs[h_join(self.name, network_name,
400                                             'subnet', 'cidr')]
401
402         # these are attributes of the network, not the port
403         output_subnet_gateway = outputs[h_join(self.name, network_name,
404                                                'subnet', 'gateway_ip')]
405
406         return {
407             # add default port name
408             "name": port,
409             "private_ip": private_ip,
410             "subnet_id": outputs[h_join(stack_name, "subnet_id")],
411             "subnet_cidr": output_subnet_cidr,
412             "network": str(ipaddress.ip_network(output_subnet_cidr).network_address),
413             "netmask": str(ipaddress.ip_network(output_subnet_cidr).netmask),
414             "gateway_ip": output_subnet_gateway,
415             "mac_address": mac_address,
416             "device_id": outputs[h_join(stack_name, "device_id")],
417             "network_id": outputs[h_join(stack_name, "network_id")],
418             # this should be == vld_id for NSB tests
419             "network_name": network_name,
420             # to match vnf_generic
421             "local_mac": mac_address,
422             "local_ip": private_ip,
423         }
424
425     def _delete_key_file(self):
426         try:
427             utils.remove_file(self.key_filename)
428             utils.remove_file(self.key_filename + ".pub")
429         except OSError:
430             LOG.exception("There was an error removing the key file %s",
431                           self.key_filename)
432
433     def undeploy(self):
434         """undeploys stack from cloud"""
435         if self._flags.no_teardown:
436             LOG.info("Undeploying context '%s' SKIP", self.name)
437             return
438
439         if self.stack:
440             LOG.info("Undeploying context '%s' START", self.name)
441             self.stack.delete()
442             self.stack = None
443             LOG.info("Undeploying context '%s' DONE", self.name)
444
445             self._delete_key_file()
446
447         super(HeatContext, self).undeploy()
448
449     @staticmethod
450     def generate_routing_table(server):
451         routes = [
452             {
453                 "network": intf["network"],
454                 "netmask": intf["netmask"],
455                 "if": name,
456                 # We have to encode a None gateway as '' for Jinja2 to YAML conversion
457                 "gateway": intf["gateway_ip"] if intf["gateway_ip"] else '',
458             }
459             for name, intf in server.interfaces.items()
460         ]
461         return routes
462
463     def _get_server(self, attr_name):
464         """lookup server info by name from context
465         attr_name: either a name for a server created by yardstick or a dict
466         with attribute name mapping when using external heat templates
467         """
468         if isinstance(attr_name, collections.Mapping):
469             node_name, cname = self.split_name(attr_name['name'])
470             if cname is None or cname != self.name:
471                 return None
472
473             # Create a dummy server instance for holding the *_ip attributes
474             server = Server(node_name, self, {})
475             server.public_ip = self.stack.outputs.get(
476                 attr_name.get("public_ip_attr", object()), None)
477
478             server.private_ip = self.stack.outputs.get(
479                 attr_name.get("private_ip_attr", object()), None)
480         else:
481             try:
482                 server = self._server_map[attr_name]
483             except KeyError:
484                 attr_name_no_suffix = attr_name.split("-")[0]
485                 server = self._server_map.get(attr_name_no_suffix, None)
486             if server is None:
487                 return None
488
489         pkey = pkg_resources.resource_string(
490             'yardstick.resources',
491             h_join('files/yardstick_key', self.name)).decode('utf-8')
492
493         result = {
494             "user": server.context.user,
495             "pkey": pkey,
496             "private_ip": server.private_ip,
497             "interfaces": server.interfaces,
498             "routing_table": self.generate_routing_table(server),
499             # empty IPv6 routing table
500             "nd_route_tbl": [],
501             # we want to save the contex name so we can generate pod.yaml
502             "name": server.name,
503         }
504         # Target server may only have private_ip
505         if server.public_ip:
506             result["ip"] = server.public_ip
507
508         return result
509
510     def _get_network(self, attr_name):
511         if not isinstance(attr_name, collections.Mapping):
512             network = self.networks.get(attr_name, None)
513
514         else:
515             # Only take the first key, value
516             key, value = next(iter(attr_name.items()), (None, None))
517             if key is None:
518                 return None
519             network_iter = (n for n in self.networks.values() if getattr(n, key) == value)
520             network = next(network_iter, None)
521
522         if network is None:
523             return None
524
525         result = {
526             "name": network.name,
527             "segmentation_id": network.segmentation_id,
528             "network_type": network.network_type,
529             "physical_network": network.physical_network,
530         }
531         return result