Merge "Add testsuite "os-odl-ovs-noha""
[yardstick.git] / yardstick / benchmark / contexts / heat.py
1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import collections
11 import logging
12 import os
13 import errno
14 from collections import OrderedDict
15
16 import ipaddress
17 import pkg_resources
18
19 from yardstick.benchmark.contexts.base import Context
20 from yardstick.benchmark.contexts.model import Network
21 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
22 from yardstick.benchmark.contexts.model import Server
23 from yardstick.benchmark.contexts.model import update_scheduler_hints
24 from yardstick.common import exceptions as y_exc
25 from yardstick.common.openstack_utils import get_shade_client
26 from yardstick.orchestrator.heat import HeatStack
27 from yardstick.orchestrator.heat import HeatTemplate
28 from yardstick.common import constants as consts
29 from yardstick.common import utils
30 from yardstick.common.utils import source_env
31 from yardstick.ssh import SSH
32 from yardstick.common import openstack_utils
33
34 LOG = logging.getLogger(__name__)
35
36 DEFAULT_HEAT_TIMEOUT = 3600
37
38
39 def join_args(sep, *args):
40     return sep.join(args)
41
42
43 def h_join(*args):
44     return '-'.join(args)
45
46
47 class HeatContext(Context):
48     """Class that represents a context in the logical model"""
49
50     __context_type__ = "Heat"
51
52     def __init__(self):
53         self.stack = None
54         self.networks = OrderedDict()
55         self.heat_timeout = None
56         self.servers = []
57         self.placement_groups = []
58         self.server_groups = []
59         self.keypair_name = None
60         self.secgroup_name = None
61         self._server_map = {}
62         self.attrs = {}
63         self._image = None
64         self._flavor = None
65         self.flavors = set()
66         self._user = None
67         self.template_file = None
68         self.heat_parameters = None
69         self.shade_client = None
70         self.heat_timeout = None
71         self.key_filename = None
72         self.shade_client = None
73         self.operator_client = None
74         self.nodes = []
75         self.controllers = []
76         self.computes = []
77         self.baremetals = []
78         super(HeatContext, self).__init__()
79
80     @staticmethod
81     def assign_external_network(networks):
82         sorted_networks = sorted(networks.items())
83         external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
84
85         have_external_network = any(net.get("external_network") for net in networks.values())
86         if not have_external_network:
87             # try looking for mgmt network first
88             try:
89                 networks['mgmt']["external_network"] = external_network
90             except KeyError:
91                 if sorted_networks:
92                     # otherwise assign it to first network using os.environ
93                     sorted_networks[0][1]["external_network"] = external_network
94
95         return sorted_networks
96
97     def init(self, attrs):
98         """Initializes itself from the supplied arguments"""
99         super(HeatContext, self).init(attrs)
100
101         self.check_environment()
102         self._user = attrs.get("user")
103
104         self.template_file = attrs.get("heat_template")
105
106         self.shade_client = openstack_utils.get_shade_client()
107         self.operator_client = openstack_utils.get_shade_operator_client()
108
109         try:
110             self.read_pod_file(attrs)
111         except IOError:
112             LOG.warning("No pod file specified. NVFi metrics will be disabled")
113
114         self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
115         if self.template_file:
116             self.heat_parameters = attrs.get("heat_parameters")
117             return
118
119         self.keypair_name = h_join(self.name, "key")
120         self.secgroup_name = h_join(self.name, "secgroup")
121
122         self._image = attrs.get("image")
123
124         self._flavor = attrs.get("flavor")
125
126         self.placement_groups = [PlacementGroup(name, self, pg_attrs["policy"])
127                                  for name, pg_attrs in attrs.get(
128                                  "placement_groups", {}).items()]
129
130         self.server_groups = [ServerGroup(name, self, sg_attrs["policy"])
131                               for name, sg_attrs in attrs.get(
132                               "server_groups", {}).items()]
133
134         # we have to do this first, because we are injecting external_network
135         # into the dict
136         sorted_networks = self.assign_external_network(attrs["networks"])
137
138         self.networks = OrderedDict(
139             (name, Network(name, self, net_attrs)) for name, net_attrs in
140             sorted_networks)
141
142         for name, server_attrs in sorted(attrs["servers"].items()):
143             server = Server(name, self, server_attrs)
144             self.servers.append(server)
145             self._server_map[server.dn] = server
146
147         self.attrs = attrs
148
149     def check_environment(self):
150         try:
151             os.environ['OS_AUTH_URL']
152         except KeyError:
153             try:
154                 source_env(consts.OPENRC)
155             except IOError as e:
156                 if e.errno != errno.EEXIST:
157                     LOG.error('OPENRC file not found')
158                     raise
159                 else:
160                     LOG.error('OS_AUTH_URL not found')
161
162     @property
163     def image(self):
164         """returns application's default image name"""
165         return self._image
166
167     @property
168     def flavor(self):
169         """returns application's default flavor name"""
170         return self._flavor
171
172     @property
173     def user(self):
174         """return login user name corresponding to image"""
175         return self._user
176
177     def _add_resources_to_template(self, template):
178         """add to the template the resources represented by this context"""
179
180         if self.flavor:
181             if isinstance(self.flavor, dict):
182                 flavor = self.flavor.setdefault("name", self.name + "-flavor")
183                 template.add_flavor(**self.flavor)
184                 self.flavors.add(flavor)
185
186         template.add_keypair(self.keypair_name, self.name)
187         template.add_security_group(self.secgroup_name)
188
189         for network in self.networks.values():
190             # Using existing network
191             if network.is_existing():
192                 continue
193             template.add_network(network.stack_name,
194                                  network.physical_network,
195                                  network.provider,
196                                  network.segmentation_id,
197                                  network.port_security_enabled,
198                                  network.network_type)
199             template.add_subnet(network.subnet_stack_name, network.stack_name,
200                                 network.subnet_cidr,
201                                 network.enable_dhcp,
202                                 network.gateway_ip)
203
204             if network.router:
205                 template.add_router(network.router.stack_name,
206                                     network.router.external_gateway_info,
207                                     network.subnet_stack_name)
208                 template.add_router_interface(network.router.stack_if_name,
209                                               network.router.stack_name,
210                                               network.subnet_stack_name)
211
212         # create a list of servers sorted by increasing no of placement groups
213         list_of_servers = sorted(self.servers,
214                                  key=lambda s: len(s.placement_groups))
215
216         #
217         # add servers with scheduler hints derived from placement groups
218         #
219
220         # create list of servers with availability policy
221         availability_servers = []
222         for server in list_of_servers:
223             for pg in server.placement_groups:
224                 if pg.policy == "availability":
225                     availability_servers.append(server)
226                     break
227
228         for server in availability_servers:
229             if isinstance(server.flavor, dict):
230                 try:
231                     self.flavors.add(server.flavor["name"])
232                 except KeyError:
233                     self.flavors.add(h_join(server.stack_name, "flavor"))
234
235         # add servers with availability policy
236         added_servers = []
237         for server in availability_servers:
238             scheduler_hints = {}
239             for pg in server.placement_groups:
240                 update_scheduler_hints(scheduler_hints, added_servers, pg)
241             # workaround for openstack nova bug, check JIRA: YARDSTICK-200
242             # for details
243             if len(availability_servers) == 2:
244                 if not scheduler_hints["different_host"]:
245                     scheduler_hints.pop("different_host", None)
246                     server.add_to_template(template,
247                                            list(self.networks.values()),
248                                            scheduler_hints)
249                 else:
250                     scheduler_hints["different_host"] = \
251                         scheduler_hints["different_host"][0]
252                     server.add_to_template(template,
253                                            list(self.networks.values()),
254                                            scheduler_hints)
255             else:
256                 server.add_to_template(template,
257                                        list(self.networks.values()),
258                                        scheduler_hints)
259             added_servers.append(server.stack_name)
260
261         # create list of servers with affinity policy
262         affinity_servers = []
263         for server in list_of_servers:
264             for pg in server.placement_groups:
265                 if pg.policy == "affinity":
266                     affinity_servers.append(server)
267                     break
268
269         # add servers with affinity policy
270         for server in affinity_servers:
271             if server.stack_name in added_servers:
272                 continue
273             scheduler_hints = {}
274             for pg in server.placement_groups:
275                 update_scheduler_hints(scheduler_hints, added_servers, pg)
276             server.add_to_template(template, list(self.networks.values()),
277                                    scheduler_hints)
278             added_servers.append(server.stack_name)
279
280         # add server group
281         for sg in self.server_groups:
282             template.add_server_group(sg.name, sg.policy)
283
284         # add remaining servers with no placement group configured
285         for server in list_of_servers:
286             # TODO placement_group and server_group should combine
287             if not server.placement_groups:
288                 scheduler_hints = {}
289                 # affinity/anti-aff server group
290                 sg = server.server_group
291                 if sg:
292                     scheduler_hints["group"] = {'get_resource': sg.name}
293                 server.add_to_template(template,
294                                        list(self.networks.values()),
295                                        scheduler_hints)
296
297     def get_neutron_info(self):
298         if not self.shade_client:
299             self.shade_client = get_shade_client()
300
301         networks = self.shade_client.list_networks()
302         for network in self.networks.values():
303             for neutron_net in (net for net in networks if net.name == network.stack_name):
304                     network.segmentation_id = neutron_net.get('provider:segmentation_id')
305                     # we already have physical_network
306                     # network.physical_network = neutron_net.get('provider:physical_network')
307                     network.network_type = neutron_net.get('provider:network_type')
308                     network.neutron_info = neutron_net
309
310     def _create_new_stack(self, heat_template):
311          try:
312              return heat_template.create(block=True,
313                                          timeout=self.heat_timeout)
314          except KeyboardInterrupt:
315              raise y_exc.StackCreationInterrupt
316          except Exception:
317              LOG.exception("stack failed")
318              # let the other failures happen, we want stack trace
319              raise
320
321     def _retrieve_existing_stack(self, stack_name):
322         stack = HeatStack(stack_name)
323         if stack.get():
324             return stack
325         else:
326             LOG.warning("Stack %s does not exist", self.name)
327             return None
328
329     def deploy(self):
330         """deploys template into a stack using cloud"""
331         LOG.info("Deploying context '%s' START", self.name)
332
333         self.key_filename = ''.join(
334             [consts.YARDSTICK_ROOT_PATH,
335              'yardstick/resources/files/yardstick_key-',
336              self.name])
337         # Permissions may have changed since creation; this can be fixed. If we
338         # overwrite the file, we lose future access to VMs using this key.
339         # As long as the file exists, even if it is unreadable, keep it intact
340         if not os.path.exists(self.key_filename):
341             SSH.gen_keys(self.key_filename)
342
343         heat_template = HeatTemplate(
344             self.name, template_file=self.template_file,
345             heat_parameters=self.heat_parameters,
346             os_cloud_config=self._flags.os_cloud_config)
347
348         if self.template_file is None:
349             self._add_resources_to_template(heat_template)
350
351         if self._flags.no_setup:
352             # Try to get an existing stack, returns a stack or None
353             self.stack = self._retrieve_existing_stack(self.name)
354             if not self.stack:
355                 self.stack = self._create_new_stack(heat_template)
356
357         else:
358             self.stack = self._create_new_stack(heat_template)
359
360         # TODO: use Neutron to get segmentation-id
361         self.get_neutron_info()
362
363         # copy some vital stack output into server objects
364         for server in self.servers:
365             if server.ports:
366                 self.add_server_port(server)
367
368             if server.floating_ip:
369                 server.public_ip = \
370                     self.stack.outputs[server.floating_ip["stack_name"]]
371
372         LOG.info("Deploying context '%s' DONE", self.name)
373
374     @staticmethod
375     def _port_net_is_existing(port_info):
376         net_flags = port_info.get('net_flags', {})
377         return net_flags.get(consts.IS_EXISTING)
378
379     @staticmethod
380     def _port_net_is_public(port_info):
381         net_flags = port_info.get('net_flags', {})
382         return net_flags.get(consts.IS_PUBLIC)
383
384     def add_server_port(self, server):
385         server_ports = server.ports.values()
386         for server_port in server_ports:
387             port_info = server_port[0]
388             port_ip = self.stack.outputs[port_info["stack_name"]]
389             port_net_is_existing = self._port_net_is_existing(port_info)
390             port_net_is_public = self._port_net_is_public(port_info)
391             if port_net_is_existing and (port_net_is_public or
392                                          len(server_ports) == 1):
393                 server.public_ip = port_ip
394             if not server.private_ip or len(server_ports) == 1:
395                 server.private_ip = port_ip
396
397         server.interfaces = {}
398         for network_name, ports in server.ports.items():
399             for port in ports:
400                 # port['port'] is either port name from mapping or default network_name
401                 if self._port_net_is_existing(port):
402                     continue
403                 server.interfaces[port['port']] = self.make_interface_dict(network_name,
404                                                                            port['port'],
405                                                                            port['stack_name'],
406                                                                            self.stack.outputs)
407                 server.override_ip(network_name, port)
408
409     def make_interface_dict(self, network_name, port, stack_name, outputs):
410         private_ip = outputs[stack_name]
411         mac_address = outputs[h_join(stack_name, "mac_address")]
412         # these are attributes of the network, not the port
413         output_subnet_cidr = outputs[h_join(self.name, network_name,
414                                             'subnet', 'cidr')]
415
416         # these are attributes of the network, not the port
417         output_subnet_gateway = outputs[h_join(self.name, network_name,
418                                                'subnet', 'gateway_ip')]
419
420         return {
421             # add default port name
422             "name": port,
423             "private_ip": private_ip,
424             "subnet_id": outputs[h_join(stack_name, "subnet_id")],
425             "subnet_cidr": output_subnet_cidr,
426             "network": str(ipaddress.ip_network(output_subnet_cidr).network_address),
427             "netmask": str(ipaddress.ip_network(output_subnet_cidr).netmask),
428             "gateway_ip": output_subnet_gateway,
429             "mac_address": mac_address,
430             "device_id": outputs[h_join(stack_name, "device_id")],
431             "network_id": outputs[h_join(stack_name, "network_id")],
432             # this should be == vld_id for NSB tests
433             "network_name": network_name,
434             # to match vnf_generic
435             "local_mac": mac_address,
436             "local_ip": private_ip,
437         }
438
439     def _delete_key_file(self):
440         try:
441             utils.remove_file(self.key_filename)
442             utils.remove_file(self.key_filename + ".pub")
443         except OSError:
444             LOG.exception("There was an error removing the key file %s",
445                           self.key_filename)
446
447     def undeploy(self):
448         """undeploys stack from cloud"""
449         if self._flags.no_teardown:
450             LOG.info("Undeploying context '%s' SKIP", self.name)
451             return
452
453         if self.stack:
454             LOG.info("Undeploying context '%s' START", self.name)
455             self.stack.delete()
456             self.stack = None
457             LOG.info("Undeploying context '%s' DONE", self.name)
458
459             self._delete_key_file()
460
461         super(HeatContext, self).undeploy()
462
463     @staticmethod
464     def generate_routing_table(server):
465         routes = [
466             {
467                 "network": intf["network"],
468                 "netmask": intf["netmask"],
469                 "if": name,
470                 # We have to encode a None gateway as '' for Jinja2 to YAML conversion
471                 "gateway": intf["gateway_ip"] if intf["gateway_ip"] else '',
472             }
473             for name, intf in server.interfaces.items()
474         ]
475         return routes
476
477     def _get_server(self, attr_name):
478         """lookup server info by name from context
479         attr_name: either a name for a server created by yardstick or a dict
480         with attribute name mapping when using external heat templates
481         """
482         if isinstance(attr_name, collections.Mapping):
483             node_name, cname = self.split_host_name(attr_name['name'])
484             if cname is None or cname != self.name:
485                 return None
486
487             # Create a dummy server instance for holding the *_ip attributes
488             server = Server(node_name, self, {})
489             server.public_ip = self.stack.outputs.get(
490                 attr_name.get("public_ip_attr", object()), None)
491
492             server.private_ip = self.stack.outputs.get(
493                 attr_name.get("private_ip_attr", object()), None)
494         else:
495             try:
496                 server = self._server_map[attr_name]
497             except KeyError:
498                 attr_name_no_suffix = attr_name.split("-")[0]
499                 server = self._server_map.get(attr_name_no_suffix, None)
500             if server is None:
501                 return None
502
503         pkey = pkg_resources.resource_string(
504             'yardstick.resources',
505             h_join('files/yardstick_key', self.name)).decode('utf-8')
506
507         result = {
508             "user": server.context.user,
509             "pkey": pkey,
510             "private_ip": server.private_ip,
511             "interfaces": server.interfaces,
512             "routing_table": self.generate_routing_table(server),
513             # empty IPv6 routing table
514             "nd_route_tbl": [],
515             # we want to save the contex name so we can generate pod.yaml
516             "name": server.name,
517         }
518         # Target server may only have private_ip
519         if server.public_ip:
520             result["ip"] = server.public_ip
521
522         return result
523
524     def _get_network(self, attr_name):
525         if not isinstance(attr_name, collections.Mapping):
526             network = self.networks.get(attr_name, None)
527
528         else:
529             # Only take the first key, value
530             key, value = next(iter(attr_name.items()), (None, None))
531             if key is None:
532                 return None
533             network_iter = (n for n in self.networks.values() if getattr(n, key) == value)
534             network = next(network_iter, None)
535
536         if network is None:
537             return None
538
539         result = {
540             "name": network.name,
541             "segmentation_id": network.segmentation_id,
542             "network_type": network.network_type,
543             "physical_network": network.physical_network,
544         }
545         return result
546
547     def _get_physical_nodes(self):
548         return self.nodes
549
550     def _get_physical_node_for_server(self, server_name):
551         node_name, ctx_name = self.split_host_name(server_name)
552         if ctx_name is None or self.name != ctx_name:
553             return None
554
555         matching_nodes = [s for s in self.servers if s.name == node_name]
556         if len(matching_nodes) == 0:
557             return None
558
559         server = openstack_utils.get_server(self.shade_client,
560                                             name_or_id=server_name)
561
562         if server:
563             server = server.toDict()
564             list_hypervisors = self.operator_client.list_hypervisors()
565
566             for hypervisor in list_hypervisors:
567                 if hypervisor.hypervisor_hostname == server['OS-EXT-SRV-ATTR:hypervisor_hostname']:
568                     for node in self.nodes:
569                         if node['ip'] == hypervisor.host_ip:
570                             return "{}.{}".format(node['name'], self._name)
571
572         return None