Merge "kubernetes_utils: avoid 404 error code in delete_custom_resource_definition()"
[yardstick.git] / yardstick / benchmark / contexts / heat.py
1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import collections
11 import logging
12 import os
13 import errno
14 from collections import OrderedDict
15
16 import ipaddress
17 import pkg_resources
18
19 from yardstick.benchmark import contexts
20 from yardstick.benchmark.contexts.base import Context
21 from yardstick.benchmark.contexts.model import Network
22 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
23 from yardstick.benchmark.contexts.model import Server
24 from yardstick.benchmark.contexts.model import update_scheduler_hints
25 from yardstick.common import exceptions as y_exc
26 from yardstick.common.openstack_utils import get_shade_client
27 from yardstick.orchestrator.heat import HeatStack
28 from yardstick.orchestrator.heat import HeatTemplate
29 from yardstick.common import constants as consts
30 from yardstick.common import utils
31 from yardstick.common.utils import source_env
32 from yardstick.ssh import SSH
33 from yardstick.common import openstack_utils
34
35 LOG = logging.getLogger(__name__)
36
37 DEFAULT_HEAT_TIMEOUT = 3600
38
39
40 def join_args(sep, *args):
41     return sep.join(args)
42
43
44 def h_join(*args):
45     return '-'.join(args)
46
47
48 class HeatContext(Context):
49     """Class that represents a context in the logical model"""
50
51     __context_type__ = contexts.CONTEXT_HEAT
52
53     def __init__(self):
54         self.stack = None
55         self.networks = OrderedDict()
56         self.heat_timeout = None
57         self.servers = []
58         self.placement_groups = []
59         self.server_groups = []
60         self.keypair_name = None
61         self.secgroup_name = None
62         self._server_map = {}
63         self.attrs = {}
64         self._image = None
65         self._flavor = None
66         self.flavors = set()
67         self._user = None
68         self.template_file = None
69         self.heat_parameters = None
70         self.shade_client = None
71         self.heat_timeout = None
72         self.key_filename = None
73         self.shade_client = None
74         self.operator_client = None
75         self.nodes = []
76         self.controllers = []
77         self.computes = []
78         self.baremetals = []
79         super(HeatContext, self).__init__()
80
81     @staticmethod
82     def assign_external_network(networks):
83         sorted_networks = sorted(networks.items())
84         external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
85
86         have_external_network = any(net.get("external_network") for net in networks.values())
87         if not have_external_network:
88             # try looking for mgmt network first
89             try:
90                 networks['mgmt']["external_network"] = external_network
91             except KeyError:
92                 if sorted_networks:
93                     # otherwise assign it to first network using os.environ
94                     sorted_networks[0][1]["external_network"] = external_network
95
96         return sorted_networks
97
98     def init(self, attrs):
99         """Initializes itself from the supplied arguments"""
100         super(HeatContext, self).init(attrs)
101
102         self.check_environment()
103         self._user = attrs.get("user")
104
105         self.template_file = attrs.get("heat_template")
106
107         self.shade_client = openstack_utils.get_shade_client()
108         self.operator_client = openstack_utils.get_shade_operator_client()
109
110         try:
111             self.read_pod_file(attrs)
112         except IOError:
113             LOG.warning("No pod file specified. NVFi metrics will be disabled")
114
115         self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
116         if self.template_file:
117             self.heat_parameters = attrs.get("heat_parameters")
118             return
119
120         self.keypair_name = h_join(self.name, "key")
121         self.secgroup_name = h_join(self.name, "secgroup")
122
123         self._image = attrs.get("image")
124
125         self._flavor = attrs.get("flavor")
126
127         self.placement_groups = [PlacementGroup(name, self, pg_attrs["policy"])
128                                  for name, pg_attrs in attrs.get(
129                                  "placement_groups", {}).items()]
130
131         self.server_groups = [ServerGroup(name, self, sg_attrs["policy"])
132                               for name, sg_attrs in attrs.get(
133                               "server_groups", {}).items()]
134
135         # we have to do this first, because we are injecting external_network
136         # into the dict
137         sorted_networks = self.assign_external_network(attrs["networks"])
138
139         self.networks = OrderedDict(
140             (name, Network(name, self, net_attrs)) for name, net_attrs in
141             sorted_networks)
142
143         for name, server_attrs in sorted(attrs["servers"].items()):
144             server = Server(name, self, server_attrs)
145             self.servers.append(server)
146             self._server_map[server.dn] = server
147
148         self.attrs = attrs
149
150     def check_environment(self):
151         try:
152             os.environ['OS_AUTH_URL']
153         except KeyError:
154             try:
155                 source_env(consts.OPENRC)
156             except IOError as e:
157                 if e.errno != errno.EEXIST:
158                     LOG.error('OPENRC file not found')
159                     raise
160                 else:
161                     LOG.error('OS_AUTH_URL not found')
162
163     @property
164     def image(self):
165         """returns application's default image name"""
166         return self._image
167
168     @property
169     def flavor(self):
170         """returns application's default flavor name"""
171         return self._flavor
172
173     @property
174     def user(self):
175         """return login user name corresponding to image"""
176         return self._user
177
178     def _add_resources_to_template(self, template):
179         """add to the template the resources represented by this context"""
180
181         if self.flavor:
182             if isinstance(self.flavor, dict):
183                 flavor = self.flavor.setdefault("name", self.name + "-flavor")
184                 template.add_flavor(**self.flavor)
185                 self.flavors.add(flavor)
186
187         template.add_keypair(self.keypair_name, self.name)
188         template.add_security_group(self.secgroup_name)
189
190         for network in self.networks.values():
191             # Using existing network
192             if network.is_existing():
193                 continue
194             template.add_network(network.stack_name,
195                                  network.physical_network,
196                                  network.provider,
197                                  network.segmentation_id,
198                                  network.port_security_enabled,
199                                  network.network_type)
200             template.add_subnet(network.subnet_stack_name, network.stack_name,
201                                 network.subnet_cidr,
202                                 network.enable_dhcp,
203                                 network.gateway_ip)
204
205             if network.router:
206                 template.add_router(network.router.stack_name,
207                                     network.router.external_gateway_info,
208                                     network.subnet_stack_name)
209                 template.add_router_interface(network.router.stack_if_name,
210                                               network.router.stack_name,
211                                               network.subnet_stack_name)
212
213         # create a list of servers sorted by increasing no of placement groups
214         list_of_servers = sorted(self.servers,
215                                  key=lambda s: len(s.placement_groups))
216
217         #
218         # add servers with scheduler hints derived from placement groups
219         #
220
221         # create list of servers with availability policy
222         availability_servers = []
223         for server in list_of_servers:
224             for pg in server.placement_groups:
225                 if pg.policy == "availability":
226                     availability_servers.append(server)
227                     break
228
229         for server in availability_servers:
230             if isinstance(server.flavor, dict):
231                 try:
232                     self.flavors.add(server.flavor["name"])
233                 except KeyError:
234                     self.flavors.add(h_join(server.stack_name, "flavor"))
235
236         # add servers with availability policy
237         added_servers = []
238         for server in availability_servers:
239             scheduler_hints = {}
240             for pg in server.placement_groups:
241                 update_scheduler_hints(scheduler_hints, added_servers, pg)
242             # workaround for openstack nova bug, check JIRA: YARDSTICK-200
243             # for details
244             if len(availability_servers) == 2:
245                 if not scheduler_hints["different_host"]:
246                     scheduler_hints.pop("different_host", None)
247                     server.add_to_template(template,
248                                            list(self.networks.values()),
249                                            scheduler_hints)
250                 else:
251                     scheduler_hints["different_host"] = \
252                         scheduler_hints["different_host"][0]
253                     server.add_to_template(template,
254                                            list(self.networks.values()),
255                                            scheduler_hints)
256             else:
257                 server.add_to_template(template,
258                                        list(self.networks.values()),
259                                        scheduler_hints)
260             added_servers.append(server.stack_name)
261
262         # create list of servers with affinity policy
263         affinity_servers = []
264         for server in list_of_servers:
265             for pg in server.placement_groups:
266                 if pg.policy == "affinity":
267                     affinity_servers.append(server)
268                     break
269
270         # add servers with affinity policy
271         for server in affinity_servers:
272             if server.stack_name in added_servers:
273                 continue
274             scheduler_hints = {}
275             for pg in server.placement_groups:
276                 update_scheduler_hints(scheduler_hints, added_servers, pg)
277             server.add_to_template(template, list(self.networks.values()),
278                                    scheduler_hints)
279             added_servers.append(server.stack_name)
280
281         # add server group
282         for sg in self.server_groups:
283             template.add_server_group(sg.name, sg.policy)
284
285         # add remaining servers with no placement group configured
286         for server in list_of_servers:
287             # TODO placement_group and server_group should combine
288             if not server.placement_groups:
289                 scheduler_hints = {}
290                 # affinity/anti-aff server group
291                 sg = server.server_group
292                 if sg:
293                     scheduler_hints["group"] = {'get_resource': sg.name}
294                 server.add_to_template(template,
295                                        list(self.networks.values()),
296                                        scheduler_hints)
297
298     def get_neutron_info(self):
299         if not self.shade_client:
300             self.shade_client = get_shade_client()
301
302         networks = self.shade_client.list_networks()
303         for network in self.networks.values():
304             for neutron_net in (net for net in networks if net.name == network.stack_name):
305                     network.segmentation_id = neutron_net.get('provider:segmentation_id')
306                     # we already have physical_network
307                     # network.physical_network = neutron_net.get('provider:physical_network')
308                     network.network_type = neutron_net.get('provider:network_type')
309                     network.neutron_info = neutron_net
310
311     def _create_new_stack(self, heat_template):
312          try:
313              return heat_template.create(block=True,
314                                          timeout=self.heat_timeout)
315          except KeyboardInterrupt:
316              raise y_exc.StackCreationInterrupt
317          except Exception:
318              LOG.exception("stack failed")
319              # let the other failures happen, we want stack trace
320              raise
321
322     def _retrieve_existing_stack(self, stack_name):
323         stack = HeatStack(stack_name)
324         if stack.get():
325             return stack
326         else:
327             LOG.warning("Stack %s does not exist", self.name)
328             return None
329
330     def deploy(self):
331         """deploys template into a stack using cloud"""
332         LOG.info("Deploying context '%s' START", self.name)
333
334         self.key_filename = ''.join(
335             [consts.YARDSTICK_ROOT_PATH,
336              'yardstick/resources/files/yardstick_key-',
337              self.name])
338         # Permissions may have changed since creation; this can be fixed. If we
339         # overwrite the file, we lose future access to VMs using this key.
340         # As long as the file exists, even if it is unreadable, keep it intact
341         if not os.path.exists(self.key_filename):
342             SSH.gen_keys(self.key_filename)
343
344         heat_template = HeatTemplate(
345             self.name, template_file=self.template_file,
346             heat_parameters=self.heat_parameters,
347             os_cloud_config=self._flags.os_cloud_config)
348
349         if self.template_file is None:
350             self._add_resources_to_template(heat_template)
351
352         if self._flags.no_setup:
353             # Try to get an existing stack, returns a stack or None
354             self.stack = self._retrieve_existing_stack(self.name)
355             if not self.stack:
356                 self.stack = self._create_new_stack(heat_template)
357
358         else:
359             self.stack = self._create_new_stack(heat_template)
360
361         # TODO: use Neutron to get segmentation-id
362         self.get_neutron_info()
363
364         # copy some vital stack output into server objects
365         for server in self.servers:
366             if server.ports:
367                 self.add_server_port(server)
368
369             if server.floating_ip:
370                 server.public_ip = \
371                     self.stack.outputs[server.floating_ip["stack_name"]]
372
373         LOG.info("Deploying context '%s' DONE", self.name)
374
375     @staticmethod
376     def _port_net_is_existing(port_info):
377         net_flags = port_info.get('net_flags', {})
378         return net_flags.get(consts.IS_EXISTING)
379
380     @staticmethod
381     def _port_net_is_public(port_info):
382         net_flags = port_info.get('net_flags', {})
383         return net_flags.get(consts.IS_PUBLIC)
384
385     def add_server_port(self, server):
386         server_ports = server.ports.values()
387         for server_port in server_ports:
388             port_info = server_port[0]
389             port_ip = self.stack.outputs[port_info["stack_name"]]
390             port_net_is_existing = self._port_net_is_existing(port_info)
391             port_net_is_public = self._port_net_is_public(port_info)
392             if port_net_is_existing and (port_net_is_public or
393                                          len(server_ports) == 1):
394                 server.public_ip = port_ip
395             if not server.private_ip or len(server_ports) == 1:
396                 server.private_ip = port_ip
397
398         server.interfaces = {}
399         for network_name, ports in server.ports.items():
400             for port in ports:
401                 # port['port'] is either port name from mapping or default network_name
402                 if self._port_net_is_existing(port):
403                     continue
404                 server.interfaces[port['port']] = self.make_interface_dict(network_name,
405                                                                            port['port'],
406                                                                            port['stack_name'],
407                                                                            self.stack.outputs)
408                 server.override_ip(network_name, port)
409
410     def make_interface_dict(self, network_name, port, stack_name, outputs):
411         private_ip = outputs[stack_name]
412         mac_address = outputs[h_join(stack_name, "mac_address")]
413         # these are attributes of the network, not the port
414         output_subnet_cidr = outputs[h_join(self.name, network_name,
415                                             'subnet', 'cidr')]
416
417         # these are attributes of the network, not the port
418         output_subnet_gateway = outputs[h_join(self.name, network_name,
419                                                'subnet', 'gateway_ip')]
420
421         return {
422             # add default port name
423             "name": port,
424             "private_ip": private_ip,
425             "subnet_id": outputs[h_join(stack_name, "subnet_id")],
426             "subnet_cidr": output_subnet_cidr,
427             "network": str(ipaddress.ip_network(output_subnet_cidr).network_address),
428             "netmask": str(ipaddress.ip_network(output_subnet_cidr).netmask),
429             "gateway_ip": output_subnet_gateway,
430             "mac_address": mac_address,
431             "device_id": outputs[h_join(stack_name, "device_id")],
432             "network_id": outputs[h_join(stack_name, "network_id")],
433             # this should be == vld_id for NSB tests
434             "network_name": network_name,
435             # to match vnf_generic
436             "local_mac": mac_address,
437             "local_ip": private_ip,
438         }
439
440     def _delete_key_file(self):
441         try:
442             utils.remove_file(self.key_filename)
443             utils.remove_file(self.key_filename + ".pub")
444         except OSError:
445             LOG.exception("There was an error removing the key file %s",
446                           self.key_filename)
447
448     def undeploy(self):
449         """undeploys stack from cloud"""
450         if self._flags.no_teardown:
451             LOG.info("Undeploying context '%s' SKIP", self.name)
452             return
453
454         if self.stack:
455             LOG.info("Undeploying context '%s' START", self.name)
456             self.stack.delete()
457             self.stack = None
458             LOG.info("Undeploying context '%s' DONE", self.name)
459
460             self._delete_key_file()
461
462         super(HeatContext, self).undeploy()
463
464     @staticmethod
465     def generate_routing_table(server):
466         routes = [
467             {
468                 "network": intf["network"],
469                 "netmask": intf["netmask"],
470                 "if": name,
471                 # We have to encode a None gateway as '' for Jinja2 to YAML conversion
472                 "gateway": intf["gateway_ip"] if intf["gateway_ip"] else '',
473             }
474             for name, intf in server.interfaces.items()
475         ]
476         return routes
477
478     def _get_server(self, attr_name):
479         """lookup server info by name from context
480         attr_name: either a name for a server created by yardstick or a dict
481         with attribute name mapping when using external heat templates
482         """
483         if isinstance(attr_name, collections.Mapping):
484             node_name, cname = self.split_host_name(attr_name['name'])
485             if cname is None or cname != self.name:
486                 return None
487
488             # Create a dummy server instance for holding the *_ip attributes
489             server = Server(node_name, self, {})
490             server.public_ip = self.stack.outputs.get(
491                 attr_name.get("public_ip_attr", object()), None)
492
493             server.private_ip = self.stack.outputs.get(
494                 attr_name.get("private_ip_attr", object()), None)
495         else:
496             try:
497                 server = self._server_map[attr_name]
498             except KeyError:
499                 attr_name_no_suffix = attr_name.split("-")[0]
500                 server = self._server_map.get(attr_name_no_suffix, None)
501             if server is None:
502                 return None
503
504         pkey = pkg_resources.resource_string(
505             'yardstick.resources',
506             h_join('files/yardstick_key', self.name)).decode('utf-8')
507
508         result = {
509             "user": server.context.user,
510             "pkey": pkey,
511             "private_ip": server.private_ip,
512             "interfaces": server.interfaces,
513             "routing_table": self.generate_routing_table(server),
514             # empty IPv6 routing table
515             "nd_route_tbl": [],
516             # we want to save the contex name so we can generate pod.yaml
517             "name": server.name,
518         }
519         # Target server may only have private_ip
520         if server.public_ip:
521             result["ip"] = server.public_ip
522
523         return result
524
525     def _get_network(self, attr_name):
526         if not isinstance(attr_name, collections.Mapping):
527             network = self.networks.get(attr_name, None)
528
529         else:
530             # Only take the first key, value
531             key, value = next(iter(attr_name.items()), (None, None))
532             if key is None:
533                 return None
534             network_iter = (n for n in self.networks.values() if getattr(n, key) == value)
535             network = next(network_iter, None)
536
537         if network is None:
538             return None
539
540         result = {
541             "name": network.name,
542             "segmentation_id": network.segmentation_id,
543             "network_type": network.network_type,
544             "physical_network": network.physical_network,
545         }
546         return result
547
548     def _get_physical_nodes(self):
549         return self.nodes
550
551     def _get_physical_node_for_server(self, server_name):
552         node_name, ctx_name = self.split_host_name(server_name)
553         if ctx_name is None or self.name != ctx_name:
554             return None
555
556         matching_nodes = [s for s in self.servers if s.name == node_name]
557         if len(matching_nodes) == 0:
558             return None
559
560         server = openstack_utils.get_server(self.shade_client,
561                                             name_or_id=server_name)
562
563         if server:
564             server = server.toDict()
565             list_hypervisors = self.operator_client.list_hypervisors()
566
567             for hypervisor in list_hypervisors:
568                 if hypervisor.hypervisor_hostname == server['OS-EXT-SRV-ATTR:hypervisor_hostname']:
569                     for node in self.nodes:
570                         if node['ip'] == hypervisor.host_ip:
571                             return "{}.{}".format(node['name'], self._name)
572
573         return None