Merge "[dovetail] split the sla check results into process recovery and service recov...
[yardstick.git] / yardstick / benchmark / contexts / heat.py
1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import collections
11 import logging
12 import os
13 import errno
14 from collections import OrderedDict
15
16 import ipaddress
17 import pkg_resources
18
19 from yardstick.benchmark import contexts
20 from yardstick.benchmark.contexts.base import Context
21 from yardstick.benchmark.contexts.model import Network
22 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
23 from yardstick.benchmark.contexts.model import Server
24 from yardstick.benchmark.contexts.model import update_scheduler_hints
25 from yardstick.common import exceptions as y_exc
26 from yardstick.common.openstack_utils import get_shade_client
27 from yardstick.orchestrator.heat import HeatStack
28 from yardstick.orchestrator.heat import HeatTemplate
29 from yardstick.common import constants as consts
30 from yardstick.common import utils
31 from yardstick.common.utils import source_env
32 from yardstick.ssh import SSH
33 from yardstick.common import openstack_utils
34
35 LOG = logging.getLogger(__name__)
36
37 DEFAULT_HEAT_TIMEOUT = 3600
38
39
40 def join_args(sep, *args):
41     return sep.join(args)
42
43
44 def h_join(*args):
45     return '-'.join(args)
46
47
48 class HeatContext(Context):
49     """Class that represents a context in the logical model"""
50
51     __context_type__ = contexts.CONTEXT_HEAT
52
53     def __init__(self):
54         self.stack = None
55         self.networks = OrderedDict()
56         self.heat_timeout = None
57         self.servers = []
58         self.placement_groups = []
59         self.server_groups = []
60         self.keypair_name = None
61         self.secgroup_name = None
62         self.security_group = None
63         self._server_map = {}
64         self.attrs = {}
65         self._image = None
66         self._flavor = None
67         self.flavors = set()
68         self._user = None
69         self.template_file = None
70         self.heat_parameters = None
71         self.shade_client = None
72         self.heat_timeout = None
73         self.key_filename = None
74         self.shade_client = None
75         self.operator_client = None
76         self.nodes = []
77         self.controllers = []
78         self.computes = []
79         self.baremetals = []
80         super(HeatContext, self).__init__()
81
82     @staticmethod
83     def assign_external_network(networks):
84         sorted_networks = sorted(networks.items())
85         external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
86
87         have_external_network = any(net.get("external_network") for net in networks.values())
88         if not have_external_network:
89             # try looking for mgmt network first
90             try:
91                 networks['mgmt']["external_network"] = external_network
92             except KeyError:
93                 if sorted_networks:
94                     # otherwise assign it to first network using os.environ
95                     sorted_networks[0][1]["external_network"] = external_network
96
97         return sorted_networks
98
99     def init(self, attrs):
100         """Initializes itself from the supplied arguments"""
101         super(HeatContext, self).init(attrs)
102
103         self.check_environment()
104         self._user = attrs.get("user")
105
106         self.template_file = attrs.get("heat_template")
107
108         self.shade_client = openstack_utils.get_shade_client()
109         self.operator_client = openstack_utils.get_shade_operator_client()
110
111         try:
112             self.read_pod_file(attrs)
113         except IOError:
114             LOG.warning("No pod file specified. NVFi metrics will be disabled")
115
116         self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
117         if self.template_file:
118             self.heat_parameters = attrs.get("heat_parameters")
119             return
120
121         self.keypair_name = h_join(self.name, "key")
122
123         self.secgroup_name = h_join(self.name, "secgroup")
124
125         self.security_group = attrs.get("security_group")
126
127         self._image = attrs.get("image")
128
129         self._flavor = attrs.get("flavor")
130
131         self.placement_groups = [PlacementGroup(name, self, pg_attrs["policy"])
132                                  for name, pg_attrs in attrs.get(
133                                  "placement_groups", {}).items()]
134
135         self.server_groups = [ServerGroup(name, self, sg_attrs["policy"])
136                               for name, sg_attrs in attrs.get(
137                               "server_groups", {}).items()]
138
139         # we have to do this first, because we are injecting external_network
140         # into the dict
141         sorted_networks = self.assign_external_network(attrs["networks"])
142
143         self.networks = OrderedDict(
144             (name, Network(name, self, net_attrs)) for name, net_attrs in
145             sorted_networks)
146
147         for name, server_attrs in sorted(attrs["servers"].items()):
148             server = Server(name, self, server_attrs)
149             self.servers.append(server)
150             self._server_map[server.dn] = server
151
152         self.attrs = attrs
153
154     def check_environment(self):
155         try:
156             os.environ['OS_AUTH_URL']
157         except KeyError:
158             try:
159                 source_env(consts.OPENRC)
160             except IOError as e:
161                 if e.errno != errno.EEXIST:
162                     LOG.error('OPENRC file not found')
163                     raise
164                 else:
165                     LOG.error('OS_AUTH_URL not found')
166
167     @property
168     def image(self):
169         """returns application's default image name"""
170         return self._image
171
172     @property
173     def flavor(self):
174         """returns application's default flavor name"""
175         return self._flavor
176
177     @property
178     def user(self):
179         """return login user name corresponding to image"""
180         return self._user
181
182     def _add_resources_to_template(self, template):
183         """add to the template the resources represented by this context"""
184
185         if self.flavor:
186             if isinstance(self.flavor, dict):
187                 flavor = self.flavor.setdefault("name", self.name + "-flavor")
188                 template.add_flavor(**self.flavor)
189                 self.flavors.add(flavor)
190
191         template.add_keypair(self.keypair_name, self.name)
192         template.add_security_group(self.secgroup_name, self.security_group)
193
194         for network in self.networks.values():
195             # Using existing network
196             if network.is_existing():
197                 continue
198             template.add_network(network.stack_name,
199                                  network.physical_network,
200                                  network.provider,
201                                  network.segmentation_id,
202                                  network.port_security_enabled,
203                                  network.network_type)
204             template.add_subnet(network.subnet_stack_name, network.stack_name,
205                                 network.subnet_cidr,
206                                 network.enable_dhcp,
207                                 network.gateway_ip)
208
209             if network.router:
210                 template.add_router(network.router.stack_name,
211                                     network.router.external_gateway_info,
212                                     network.subnet_stack_name)
213                 template.add_router_interface(network.router.stack_if_name,
214                                               network.router.stack_name,
215                                               network.subnet_stack_name)
216
217         # create a list of servers sorted by increasing no of placement groups
218         list_of_servers = sorted(self.servers,
219                                  key=lambda s: len(s.placement_groups))
220
221         #
222         # add servers with scheduler hints derived from placement groups
223         #
224
225         # create list of servers with availability policy
226         availability_servers = []
227         for server in list_of_servers:
228             for pg in server.placement_groups:
229                 if pg.policy == "availability":
230                     availability_servers.append(server)
231                     break
232
233         for server in availability_servers:
234             if isinstance(server.flavor, dict):
235                 try:
236                     self.flavors.add(server.flavor["name"])
237                 except KeyError:
238                     self.flavors.add(h_join(server.stack_name, "flavor"))
239
240         # add servers with availability policy
241         added_servers = []
242         for server in availability_servers:
243             scheduler_hints = {}
244             for pg in server.placement_groups:
245                 update_scheduler_hints(scheduler_hints, added_servers, pg)
246             # workaround for openstack nova bug, check JIRA: YARDSTICK-200
247             # for details
248             if len(availability_servers) == 2:
249                 if not scheduler_hints["different_host"]:
250                     scheduler_hints.pop("different_host", None)
251                     server.add_to_template(template,
252                                            list(self.networks.values()),
253                                            scheduler_hints)
254                 else:
255                     scheduler_hints["different_host"] = \
256                         scheduler_hints["different_host"][0]
257                     server.add_to_template(template,
258                                            list(self.networks.values()),
259                                            scheduler_hints)
260             else:
261                 server.add_to_template(template,
262                                        list(self.networks.values()),
263                                        scheduler_hints)
264             added_servers.append(server.stack_name)
265
266         # create list of servers with affinity policy
267         affinity_servers = []
268         for server in list_of_servers:
269             for pg in server.placement_groups:
270                 if pg.policy == "affinity":
271                     affinity_servers.append(server)
272                     break
273
274         # add servers with affinity policy
275         for server in affinity_servers:
276             if server.stack_name in added_servers:
277                 continue
278             scheduler_hints = {}
279             for pg in server.placement_groups:
280                 update_scheduler_hints(scheduler_hints, added_servers, pg)
281             server.add_to_template(template, list(self.networks.values()),
282                                    scheduler_hints)
283             added_servers.append(server.stack_name)
284
285         # add server group
286         for sg in self.server_groups:
287             template.add_server_group(sg.name, sg.policy)
288
289         # add remaining servers with no placement group configured
290         for server in list_of_servers:
291             # TODO placement_group and server_group should combine
292             if not server.placement_groups:
293                 scheduler_hints = {}
294                 # affinity/anti-aff server group
295                 sg = server.server_group
296                 if sg:
297                     scheduler_hints["group"] = {'get_resource': sg.name}
298                 server.add_to_template(template,
299                                        list(self.networks.values()),
300                                        scheduler_hints)
301
302     def get_neutron_info(self):
303         if not self.shade_client:
304             self.shade_client = get_shade_client()
305
306         networks = self.shade_client.list_networks()
307         for network in self.networks.values():
308             for neutron_net in (net for net in networks if net.name == network.stack_name):
309                     network.segmentation_id = neutron_net.get('provider:segmentation_id')
310                     # we already have physical_network
311                     # network.physical_network = neutron_net.get('provider:physical_network')
312                     network.network_type = neutron_net.get('provider:network_type')
313                     network.neutron_info = neutron_net
314
315     def _create_new_stack(self, heat_template):
316          try:
317              return heat_template.create(block=True,
318                                          timeout=self.heat_timeout)
319          except KeyboardInterrupt:
320              raise y_exc.StackCreationInterrupt
321          except Exception:
322              LOG.exception("stack failed")
323              # let the other failures happen, we want stack trace
324              raise
325
326     def _retrieve_existing_stack(self, stack_name):
327         stack = HeatStack(stack_name)
328         if stack.get():
329             return stack
330         else:
331             LOG.warning("Stack %s does not exist", self.name)
332             return None
333
334     def deploy(self):
335         """deploys template into a stack using cloud"""
336         LOG.info("Deploying context '%s' START", self.name)
337
338         self.key_filename = ''.join(
339             [consts.YARDSTICK_ROOT_PATH,
340              'yardstick/resources/files/yardstick_key-',
341              self.name])
342         # Permissions may have changed since creation; this can be fixed. If we
343         # overwrite the file, we lose future access to VMs using this key.
344         # As long as the file exists, even if it is unreadable, keep it intact
345         if not os.path.exists(self.key_filename):
346             SSH.gen_keys(self.key_filename)
347
348         heat_template = HeatTemplate(
349             self.name, template_file=self.template_file,
350             heat_parameters=self.heat_parameters,
351             os_cloud_config=self._flags.os_cloud_config)
352
353         if self.template_file is None:
354             self._add_resources_to_template(heat_template)
355
356         if self._flags.no_setup:
357             # Try to get an existing stack, returns a stack or None
358             self.stack = self._retrieve_existing_stack(self.name)
359             if not self.stack:
360                 self.stack = self._create_new_stack(heat_template)
361
362         else:
363             self.stack = self._create_new_stack(heat_template)
364
365         # TODO: use Neutron to get segmentation-id
366         self.get_neutron_info()
367
368         # copy some vital stack output into server objects
369         for server in self.servers:
370             if server.ports:
371                 self.add_server_port(server)
372
373             if server.floating_ip:
374                 server.public_ip = \
375                     self.stack.outputs[server.floating_ip["stack_name"]]
376
377         LOG.info("Deploying context '%s' DONE", self.name)
378
379     @staticmethod
380     def _port_net_is_existing(port_info):
381         net_flags = port_info.get('net_flags', {})
382         return net_flags.get(consts.IS_EXISTING)
383
384     @staticmethod
385     def _port_net_is_public(port_info):
386         net_flags = port_info.get('net_flags', {})
387         return net_flags.get(consts.IS_PUBLIC)
388
389     def add_server_port(self, server):
390         server_ports = server.ports.values()
391         for server_port in server_ports:
392             port_info = server_port[0]
393             port_ip = self.stack.outputs[port_info["stack_name"]]
394             port_net_is_existing = self._port_net_is_existing(port_info)
395             port_net_is_public = self._port_net_is_public(port_info)
396             if port_net_is_existing and (port_net_is_public or
397                                          len(server_ports) == 1):
398                 server.public_ip = port_ip
399             if not server.private_ip or len(server_ports) == 1:
400                 server.private_ip = port_ip
401
402         server.interfaces = {}
403         for network_name, ports in server.ports.items():
404             for port in ports:
405                 # port['port'] is either port name from mapping or default network_name
406                 if self._port_net_is_existing(port):
407                     continue
408                 server.interfaces[port['port']] = self.make_interface_dict(network_name,
409                                                                            port['port'],
410                                                                            port['stack_name'],
411                                                                            self.stack.outputs)
412                 server.override_ip(network_name, port)
413
414     def make_interface_dict(self, network_name, port, stack_name, outputs):
415         private_ip = outputs[stack_name]
416         mac_address = outputs[h_join(stack_name, "mac_address")]
417         # these are attributes of the network, not the port
418         output_subnet_cidr = outputs[h_join(self.name, network_name,
419                                             'subnet', 'cidr')]
420
421         # these are attributes of the network, not the port
422         output_subnet_gateway = outputs[h_join(self.name, network_name,
423                                                'subnet', 'gateway_ip')]
424
425         return {
426             # add default port name
427             "name": port,
428             "private_ip": private_ip,
429             "subnet_id": outputs[h_join(stack_name, "subnet_id")],
430             "subnet_cidr": output_subnet_cidr,
431             "network": str(ipaddress.ip_network(output_subnet_cidr).network_address),
432             "netmask": str(ipaddress.ip_network(output_subnet_cidr).netmask),
433             "gateway_ip": output_subnet_gateway,
434             "mac_address": mac_address,
435             "device_id": outputs[h_join(stack_name, "device_id")],
436             "network_id": outputs[h_join(stack_name, "network_id")],
437             # this should be == vld_id for NSB tests
438             "network_name": network_name,
439             # to match vnf_generic
440             "local_mac": mac_address,
441             "local_ip": private_ip,
442         }
443
444     def _delete_key_file(self):
445         try:
446             utils.remove_file(self.key_filename)
447             utils.remove_file(self.key_filename + ".pub")
448         except OSError:
449             LOG.exception("There was an error removing the key file %s",
450                           self.key_filename)
451
452     def undeploy(self):
453         """undeploys stack from cloud"""
454         if self._flags.no_teardown:
455             LOG.info("Undeploying context '%s' SKIP", self.name)
456             return
457
458         if self.stack:
459             LOG.info("Undeploying context '%s' START", self.name)
460             self.stack.delete()
461             self.stack = None
462             LOG.info("Undeploying context '%s' DONE", self.name)
463
464             self._delete_key_file()
465
466         super(HeatContext, self).undeploy()
467
468     @staticmethod
469     def generate_routing_table(server):
470         routes = [
471             {
472                 "network": intf["network"],
473                 "netmask": intf["netmask"],
474                 "if": name,
475                 # We have to encode a None gateway as '' for Jinja2 to YAML conversion
476                 "gateway": intf["gateway_ip"] if intf["gateway_ip"] else '',
477             }
478             for name, intf in server.interfaces.items()
479         ]
480         return routes
481
482     def _get_server(self, attr_name):
483         """lookup server info by name from context
484         attr_name: either a name for a server created by yardstick or a dict
485         with attribute name mapping when using external heat templates
486         """
487         if isinstance(attr_name, collections.Mapping):
488             node_name, cname = self.split_host_name(attr_name['name'])
489             if cname is None or cname != self.name:
490                 return None
491
492             # Create a dummy server instance for holding the *_ip attributes
493             server = Server(node_name, self, {})
494             server.public_ip = self.stack.outputs.get(
495                 attr_name.get("public_ip_attr", object()), None)
496
497             server.private_ip = self.stack.outputs.get(
498                 attr_name.get("private_ip_attr", object()), None)
499         else:
500             try:
501                 server = self._server_map[attr_name]
502             except KeyError:
503                 attr_name_no_suffix = attr_name.split("-")[0]
504                 server = self._server_map.get(attr_name_no_suffix, None)
505             if server is None:
506                 return None
507
508         pkey = pkg_resources.resource_string(
509             'yardstick.resources',
510             h_join('files/yardstick_key', self.name)).decode('utf-8')
511         key_filename = pkg_resources.resource_filename('yardstick.resources',
512             h_join('files/yardstick_key', self.name))
513         result = {
514             "user": server.context.user,
515             "pkey": pkey,
516             "key_filename": key_filename,
517             "private_ip": server.private_ip,
518             "interfaces": server.interfaces,
519             "routing_table": self.generate_routing_table(server),
520             # empty IPv6 routing table
521             "nd_route_tbl": [],
522             # we want to save the contex name so we can generate pod.yaml
523             "name": server.name,
524         }
525         # Target server may only have private_ip
526         if server.public_ip:
527             result["ip"] = server.public_ip
528
529         return result
530
531     def _get_network(self, attr_name):
532         if not isinstance(attr_name, collections.Mapping):
533             network = self.networks.get(attr_name, None)
534
535         else:
536             # Only take the first key, value
537             key, value = next(iter(attr_name.items()), (None, None))
538             if key is None:
539                 return None
540             network_iter = (n for n in self.networks.values() if getattr(n, key) == value)
541             network = next(network_iter, None)
542
543         if network is None:
544             return None
545
546         result = {
547             "name": network.name,
548             "segmentation_id": network.segmentation_id,
549             "network_type": network.network_type,
550             "physical_network": network.physical_network,
551         }
552         return result
553
554     def _get_physical_nodes(self):
555         return self.nodes
556
557     def _get_physical_node_for_server(self, server_name):
558         node_name, ctx_name = self.split_host_name(server_name)
559         if ctx_name is None or self.name != ctx_name:
560             return None
561
562         matching_nodes = [s for s in self.servers if s.name == node_name]
563         if len(matching_nodes) == 0:
564             return None
565
566         server = openstack_utils.get_server(self.shade_client,
567                                             name_or_id=server_name)
568
569         if server:
570             server = server.toDict()
571             list_hypervisors = self.operator_client.list_hypervisors()
572
573             for hypervisor in list_hypervisors:
574                 if hypervisor.hypervisor_hostname == server['OS-EXT-SRV-ATTR:hypervisor_hostname']:
575                     for node in self.nodes:
576                         if node['ip'] == hypervisor.host_ip:
577                             return "{}.{}".format(node['name'], self._name)
578
579         return None