Merge "Do not start collectd twice when SampleVNF is running on Baremetal"
[yardstick.git] / yardstick / benchmark / contexts / heat.py
1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import collections
11 import logging
12 import os
13 import errno
14 from collections import OrderedDict
15
16 import ipaddress
17 import pkg_resources
18
19 from yardstick.benchmark.contexts.base import Context
20 from yardstick.benchmark.contexts.model import Network
21 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
22 from yardstick.benchmark.contexts.model import Server
23 from yardstick.benchmark.contexts.model import update_scheduler_hints
24 from yardstick.common import exceptions as y_exc
25 from yardstick.common.openstack_utils import get_shade_client
26 from yardstick.orchestrator.heat import HeatStack
27 from yardstick.orchestrator.heat import HeatTemplate
28 from yardstick.common import constants as consts
29 from yardstick.common import utils
30 from yardstick.common.utils import source_env
31 from yardstick.ssh import SSH
32
33 LOG = logging.getLogger(__name__)
34
35 DEFAULT_HEAT_TIMEOUT = 3600
36
37
38 def join_args(sep, *args):
39     return sep.join(args)
40
41
42 def h_join(*args):
43     return '-'.join(args)
44
45
46 class HeatContext(Context):
47     """Class that represents a context in the logical model"""
48
49     __context_type__ = "Heat"
50
51     def __init__(self):
52         self.stack = None
53         self.networks = OrderedDict()
54         self.heat_timeout = None
55         self.servers = []
56         self.placement_groups = []
57         self.server_groups = []
58         self.keypair_name = None
59         self.secgroup_name = None
60         self._server_map = {}
61         self.attrs = {}
62         self._image = None
63         self._flavor = None
64         self.flavors = set()
65         self._user = None
66         self.template_file = None
67         self.heat_parameters = None
68         self.shade_client = None
69         self.heat_timeout = None
70         self.key_filename = None
71         super(HeatContext, self).__init__()
72
73     @staticmethod
74     def assign_external_network(networks):
75         sorted_networks = sorted(networks.items())
76         external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
77
78         have_external_network = any(net.get("external_network") for net in networks.values())
79         if not have_external_network:
80             # try looking for mgmt network first
81             try:
82                 networks['mgmt']["external_network"] = external_network
83             except KeyError:
84                 if sorted_networks:
85                     # otherwise assign it to first network using os.environ
86                     sorted_networks[0][1]["external_network"] = external_network
87
88         return sorted_networks
89
90     def init(self, attrs):
91         """Initializes itself from the supplied arguments"""
92         super(HeatContext, self).init(attrs)
93
94         self.check_environment()
95         self._user = attrs.get("user")
96
97         self.template_file = attrs.get("heat_template")
98
99         self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
100         if self.template_file:
101             self.heat_parameters = attrs.get("heat_parameters")
102             return
103
104         self.keypair_name = h_join(self.name, "key")
105         self.secgroup_name = h_join(self.name, "secgroup")
106
107         self._image = attrs.get("image")
108
109         self._flavor = attrs.get("flavor")
110
111         self.placement_groups = [PlacementGroup(name, self, pg_attrs["policy"])
112                                  for name, pg_attrs in attrs.get(
113                                  "placement_groups", {}).items()]
114
115         self.server_groups = [ServerGroup(name, self, sg_attrs["policy"])
116                               for name, sg_attrs in attrs.get(
117                               "server_groups", {}).items()]
118
119         # we have to do this first, because we are injecting external_network
120         # into the dict
121         sorted_networks = self.assign_external_network(attrs["networks"])
122
123         self.networks = OrderedDict(
124             (name, Network(name, self, net_attrs)) for name, net_attrs in
125             sorted_networks)
126
127         for name, server_attrs in sorted(attrs["servers"].items()):
128             server = Server(name, self, server_attrs)
129             self.servers.append(server)
130             self._server_map[server.dn] = server
131
132         self.attrs = attrs
133
134     def check_environment(self):
135         try:
136             os.environ['OS_AUTH_URL']
137         except KeyError:
138             try:
139                 source_env(consts.OPENRC)
140             except IOError as e:
141                 if e.errno != errno.EEXIST:
142                     LOG.error('OPENRC file not found')
143                     raise
144                 else:
145                     LOG.error('OS_AUTH_URL not found')
146
147     @property
148     def image(self):
149         """returns application's default image name"""
150         return self._image
151
152     @property
153     def flavor(self):
154         """returns application's default flavor name"""
155         return self._flavor
156
157     @property
158     def user(self):
159         """return login user name corresponding to image"""
160         return self._user
161
162     def _add_resources_to_template(self, template):
163         """add to the template the resources represented by this context"""
164
165         if self.flavor:
166             if isinstance(self.flavor, dict):
167                 flavor = self.flavor.setdefault("name", self.name + "-flavor")
168                 template.add_flavor(**self.flavor)
169                 self.flavors.add(flavor)
170
171         template.add_keypair(self.keypair_name, self.name)
172         template.add_security_group(self.secgroup_name)
173
174         for network in self.networks.values():
175             # Using existing network
176             if network.is_existing():
177                 continue
178             template.add_network(network.stack_name,
179                                  network.physical_network,
180                                  network.provider,
181                                  network.segmentation_id,
182                                  network.port_security_enabled,
183                                  network.network_type)
184             template.add_subnet(network.subnet_stack_name, network.stack_name,
185                                 network.subnet_cidr,
186                                 network.enable_dhcp,
187                                 network.gateway_ip)
188
189             if network.router:
190                 template.add_router(network.router.stack_name,
191                                     network.router.external_gateway_info,
192                                     network.subnet_stack_name)
193                 template.add_router_interface(network.router.stack_if_name,
194                                               network.router.stack_name,
195                                               network.subnet_stack_name)
196
197         # create a list of servers sorted by increasing no of placement groups
198         list_of_servers = sorted(self.servers,
199                                  key=lambda s: len(s.placement_groups))
200
201         #
202         # add servers with scheduler hints derived from placement groups
203         #
204
205         # create list of servers with availability policy
206         availability_servers = []
207         for server in list_of_servers:
208             for pg in server.placement_groups:
209                 if pg.policy == "availability":
210                     availability_servers.append(server)
211                     break
212
213         for server in availability_servers:
214             if isinstance(server.flavor, dict):
215                 try:
216                     self.flavors.add(server.flavor["name"])
217                 except KeyError:
218                     self.flavors.add(h_join(server.stack_name, "flavor"))
219
220         # add servers with availability policy
221         added_servers = []
222         for server in availability_servers:
223             scheduler_hints = {}
224             for pg in server.placement_groups:
225                 update_scheduler_hints(scheduler_hints, added_servers, pg)
226             # workaround for openstack nova bug, check JIRA: YARDSTICK-200
227             # for details
228             if len(availability_servers) == 2:
229                 if not scheduler_hints["different_host"]:
230                     scheduler_hints.pop("different_host", None)
231                     server.add_to_template(template,
232                                            list(self.networks.values()),
233                                            scheduler_hints)
234                 else:
235                     scheduler_hints["different_host"] = \
236                         scheduler_hints["different_host"][0]
237                     server.add_to_template(template,
238                                            list(self.networks.values()),
239                                            scheduler_hints)
240             else:
241                 server.add_to_template(template,
242                                        list(self.networks.values()),
243                                        scheduler_hints)
244             added_servers.append(server.stack_name)
245
246         # create list of servers with affinity policy
247         affinity_servers = []
248         for server in list_of_servers:
249             for pg in server.placement_groups:
250                 if pg.policy == "affinity":
251                     affinity_servers.append(server)
252                     break
253
254         # add servers with affinity policy
255         for server in affinity_servers:
256             if server.stack_name in added_servers:
257                 continue
258             scheduler_hints = {}
259             for pg in server.placement_groups:
260                 update_scheduler_hints(scheduler_hints, added_servers, pg)
261             server.add_to_template(template, list(self.networks.values()),
262                                    scheduler_hints)
263             added_servers.append(server.stack_name)
264
265         # add server group
266         for sg in self.server_groups:
267             template.add_server_group(sg.name, sg.policy)
268
269         # add remaining servers with no placement group configured
270         for server in list_of_servers:
271             # TODO placement_group and server_group should combine
272             if not server.placement_groups:
273                 scheduler_hints = {}
274                 # affinity/anti-aff server group
275                 sg = server.server_group
276                 if sg:
277                     scheduler_hints["group"] = {'get_resource': sg.name}
278                 server.add_to_template(template,
279                                        list(self.networks.values()),
280                                        scheduler_hints)
281
282     def get_neutron_info(self):
283         if not self.shade_client:
284             self.shade_client = get_shade_client()
285
286         networks = self.shade_client.list_networks()
287         for network in self.networks.values():
288             for neutron_net in (net for net in networks if net.name == network.stack_name):
289                     network.segmentation_id = neutron_net.get('provider:segmentation_id')
290                     # we already have physical_network
291                     # network.physical_network = neutron_net.get('provider:physical_network')
292                     network.network_type = neutron_net.get('provider:network_type')
293                     network.neutron_info = neutron_net
294
295     def _create_new_stack(self, heat_template):
296          try:
297              return heat_template.create(block=True,
298                                          timeout=self.heat_timeout)
299          except KeyboardInterrupt:
300              raise y_exc.StackCreationInterrupt
301          except Exception:
302              LOG.exception("stack failed")
303              # let the other failures happen, we want stack trace
304              raise
305
306     def _retrieve_existing_stack(self, stack_name):
307         stack = HeatStack(stack_name)
308         if stack.get():
309             return stack
310         else:
311             LOG.warning("Stack %s does not exist", self.name)
312             return None
313
314     def deploy(self):
315         """deploys template into a stack using cloud"""
316         LOG.info("Deploying context '%s' START", self.name)
317
318         self.key_filename = ''.join(
319             [consts.YARDSTICK_ROOT_PATH,
320              'yardstick/resources/files/yardstick_key-',
321              self.name])
322         # Permissions may have changed since creation; this can be fixed. If we
323         # overwrite the file, we lose future access to VMs using this key.
324         # As long as the file exists, even if it is unreadable, keep it intact
325         if not os.path.exists(self.key_filename):
326             SSH.gen_keys(self.key_filename)
327
328         heat_template = HeatTemplate(
329             self.name, template_file=self.template_file,
330             heat_parameters=self.heat_parameters,
331             os_cloud_config=self._flags.os_cloud_config)
332
333         if self.template_file is None:
334             self._add_resources_to_template(heat_template)
335
336         if self._flags.no_setup:
337             # Try to get an existing stack, returns a stack or None
338             self.stack = self._retrieve_existing_stack(self.name)
339             if not self.stack:
340                 self.stack = self._create_new_stack(heat_template)
341
342         else:
343             self.stack = self._create_new_stack(heat_template)
344
345         # TODO: use Neutron to get segmentation-id
346         self.get_neutron_info()
347
348         # copy some vital stack output into server objects
349         for server in self.servers:
350             if server.ports:
351                 self.add_server_port(server)
352
353             if server.floating_ip:
354                 server.public_ip = \
355                     self.stack.outputs[server.floating_ip["stack_name"]]
356
357         LOG.info("Deploying context '%s' DONE", self.name)
358
359     @staticmethod
360     def _port_net_is_existing(port_info):
361         net_flags = port_info.get('net_flags', {})
362         return net_flags.get(consts.IS_EXISTING)
363
364     @staticmethod
365     def _port_net_is_public(port_info):
366         net_flags = port_info.get('net_flags', {})
367         return net_flags.get(consts.IS_PUBLIC)
368
369     def add_server_port(self, server):
370         server_ports = server.ports.values()
371         for server_port in server_ports:
372             port_info = server_port[0]
373             port_ip = self.stack.outputs[port_info["stack_name"]]
374             port_net_is_existing = self._port_net_is_existing(port_info)
375             port_net_is_public = self._port_net_is_public(port_info)
376             if port_net_is_existing and (port_net_is_public or
377                                          len(server_ports) == 1):
378                 server.public_ip = port_ip
379             if not server.private_ip or len(server_ports) == 1:
380                 server.private_ip = port_ip
381
382         server.interfaces = {}
383         for network_name, ports in server.ports.items():
384             for port in ports:
385                 # port['port'] is either port name from mapping or default network_name
386                 if self._port_net_is_existing(port):
387                     continue
388                 server.interfaces[port['port']] = self.make_interface_dict(network_name,
389                                                                            port['port'],
390                                                                            port['stack_name'],
391                                                                            self.stack.outputs)
392                 server.override_ip(network_name, port)
393
394     def make_interface_dict(self, network_name, port, stack_name, outputs):
395         private_ip = outputs[stack_name]
396         mac_address = outputs[h_join(stack_name, "mac_address")]
397         # these are attributes of the network, not the port
398         output_subnet_cidr = outputs[h_join(self.name, network_name,
399                                             'subnet', 'cidr')]
400
401         # these are attributes of the network, not the port
402         output_subnet_gateway = outputs[h_join(self.name, network_name,
403                                                'subnet', 'gateway_ip')]
404
405         return {
406             # add default port name
407             "name": port,
408             "private_ip": private_ip,
409             "subnet_id": outputs[h_join(stack_name, "subnet_id")],
410             "subnet_cidr": output_subnet_cidr,
411             "network": str(ipaddress.ip_network(output_subnet_cidr).network_address),
412             "netmask": str(ipaddress.ip_network(output_subnet_cidr).netmask),
413             "gateway_ip": output_subnet_gateway,
414             "mac_address": mac_address,
415             "device_id": outputs[h_join(stack_name, "device_id")],
416             "network_id": outputs[h_join(stack_name, "network_id")],
417             # this should be == vld_id for NSB tests
418             "network_name": network_name,
419             # to match vnf_generic
420             "local_mac": mac_address,
421             "local_ip": private_ip,
422         }
423
424     def _delete_key_file(self):
425         try:
426             utils.remove_file(self.key_filename)
427             utils.remove_file(self.key_filename + ".pub")
428         except OSError:
429             LOG.exception("There was an error removing the key file %s",
430                           self.key_filename)
431
432     def undeploy(self):
433         """undeploys stack from cloud"""
434         if self._flags.no_teardown:
435             LOG.info("Undeploying context '%s' SKIP", self.name)
436             return
437
438         if self.stack:
439             LOG.info("Undeploying context '%s' START", self.name)
440             self.stack.delete()
441             self.stack = None
442             LOG.info("Undeploying context '%s' DONE", self.name)
443
444             self._delete_key_file()
445
446         super(HeatContext, self).undeploy()
447
448     @staticmethod
449     def generate_routing_table(server):
450         routes = [
451             {
452                 "network": intf["network"],
453                 "netmask": intf["netmask"],
454                 "if": name,
455                 # We have to encode a None gateway as '' for Jinja2 to YAML conversion
456                 "gateway": intf["gateway_ip"] if intf["gateway_ip"] else '',
457             }
458             for name, intf in server.interfaces.items()
459         ]
460         return routes
461
462     def _get_server(self, attr_name):
463         """lookup server info by name from context
464         attr_name: either a name for a server created by yardstick or a dict
465         with attribute name mapping when using external heat templates
466         """
467         if isinstance(attr_name, collections.Mapping):
468             node_name, cname = self.split_name(attr_name['name'])
469             if cname is None or cname != self.name:
470                 return None
471
472             # Create a dummy server instance for holding the *_ip attributes
473             server = Server(node_name, self, {})
474             server.public_ip = self.stack.outputs.get(
475                 attr_name.get("public_ip_attr", object()), None)
476
477             server.private_ip = self.stack.outputs.get(
478                 attr_name.get("private_ip_attr", object()), None)
479         else:
480             try:
481                 server = self._server_map[attr_name]
482             except KeyError:
483                 attr_name_no_suffix = attr_name.split("-")[0]
484                 server = self._server_map.get(attr_name_no_suffix, None)
485             if server is None:
486                 return None
487
488         pkey = pkg_resources.resource_string(
489             'yardstick.resources',
490             h_join('files/yardstick_key', self.name)).decode('utf-8')
491
492         result = {
493             "user": server.context.user,
494             "pkey": pkey,
495             "private_ip": server.private_ip,
496             "interfaces": server.interfaces,
497             "routing_table": self.generate_routing_table(server),
498             # empty IPv6 routing table
499             "nd_route_tbl": [],
500             # we want to save the contex name so we can generate pod.yaml
501             "name": server.name,
502         }
503         # Target server may only have private_ip
504         if server.public_ip:
505             result["ip"] = server.public_ip
506
507         return result
508
509     def _get_network(self, attr_name):
510         if not isinstance(attr_name, collections.Mapping):
511             network = self.networks.get(attr_name, None)
512
513         else:
514             # Only take the first key, value
515             key, value = next(iter(attr_name.items()), (None, None))
516             if key is None:
517                 return None
518             network_iter = (n for n in self.networks.values() if getattr(n, key) == value)
519             network = next(network_iter, None)
520
521         if network is None:
522             return None
523
524         result = {
525             "name": network.name,
526             "segmentation_id": network.segmentation_id,
527             "network_type": network.network_type,
528             "physical_network": network.physical_network,
529         }
530         return result