Make security group configurable - dovetail
[yardstick.git] / yardstick / benchmark / contexts / heat.py
1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import collections
11 import logging
12 import os
13 import errno
14 from collections import OrderedDict
15
16 import ipaddress
17 import pkg_resources
18
19 from yardstick.benchmark.contexts.base import Context
20 from yardstick.benchmark.contexts.model import Network
21 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
22 from yardstick.benchmark.contexts.model import Server
23 from yardstick.benchmark.contexts.model import update_scheduler_hints
24 from yardstick.common import exceptions as y_exc
25 from yardstick.common.openstack_utils import get_shade_client
26 from yardstick.orchestrator.heat import HeatStack
27 from yardstick.orchestrator.heat import HeatTemplate
28 from yardstick.common import constants as consts
29 from yardstick.common import utils
30 from yardstick.common.utils import source_env
31 from yardstick.ssh import SSH
32 from yardstick.common import openstack_utils
33
34 LOG = logging.getLogger(__name__)
35
36 DEFAULT_HEAT_TIMEOUT = 3600
37
38
39 def join_args(sep, *args):
40     return sep.join(args)
41
42
43 def h_join(*args):
44     return '-'.join(args)
45
46
47 class HeatContext(Context):
48     """Class that represents a context in the logical model"""
49
50     __context_type__ = "Heat"
51
52     def __init__(self):
53         self.stack = None
54         self.networks = OrderedDict()
55         self.heat_timeout = None
56         self.servers = []
57         self.placement_groups = []
58         self.server_groups = []
59         self.keypair_name = None
60         self.secgroup_name = None
61         self.security_group = None
62         self._server_map = {}
63         self.attrs = {}
64         self._image = None
65         self._flavor = None
66         self.flavors = set()
67         self._user = None
68         self.template_file = None
69         self.heat_parameters = None
70         self.shade_client = None
71         self.heat_timeout = None
72         self.key_filename = None
73         self.shade_client = None
74         self.operator_client = None
75         self.nodes = []
76         self.controllers = []
77         self.computes = []
78         self.baremetals = []
79         super(HeatContext, self).__init__()
80
81     @staticmethod
82     def assign_external_network(networks):
83         sorted_networks = sorted(networks.items())
84         external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
85
86         have_external_network = any(net.get("external_network") for net in networks.values())
87         if not have_external_network:
88             # try looking for mgmt network first
89             try:
90                 networks['mgmt']["external_network"] = external_network
91             except KeyError:
92                 if sorted_networks:
93                     # otherwise assign it to first network using os.environ
94                     sorted_networks[0][1]["external_network"] = external_network
95
96         return sorted_networks
97
98     def init(self, attrs):
99         """Initializes itself from the supplied arguments"""
100         super(HeatContext, self).init(attrs)
101
102         self.check_environment()
103         self._user = attrs.get("user")
104
105         self.template_file = attrs.get("heat_template")
106
107         self.shade_client = openstack_utils.get_shade_client()
108         self.operator_client = openstack_utils.get_shade_operator_client()
109
110         try:
111             self.read_pod_file(attrs)
112         except IOError:
113             LOG.warning("No pod file specified. NVFi metrics will be disabled")
114
115         self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
116         if self.template_file:
117             self.heat_parameters = attrs.get("heat_parameters")
118             return
119
120         self.keypair_name = h_join(self.name, "key")
121
122         self.secgroup_name = h_join(self.name, "secgroup")
123
124         self.security_group = attrs.get("security_group")
125
126         self._image = attrs.get("image")
127
128         self._flavor = attrs.get("flavor")
129
130         self.placement_groups = [PlacementGroup(name, self, pg_attrs["policy"])
131                                  for name, pg_attrs in attrs.get(
132                                  "placement_groups", {}).items()]
133
134         self.server_groups = [ServerGroup(name, self, sg_attrs["policy"])
135                               for name, sg_attrs in attrs.get(
136                               "server_groups", {}).items()]
137
138         # we have to do this first, because we are injecting external_network
139         # into the dict
140         sorted_networks = self.assign_external_network(attrs["networks"])
141
142         self.networks = OrderedDict(
143             (name, Network(name, self, net_attrs)) for name, net_attrs in
144             sorted_networks)
145
146         for name, server_attrs in sorted(attrs["servers"].items()):
147             server = Server(name, self, server_attrs)
148             self.servers.append(server)
149             self._server_map[server.dn] = server
150
151         self.attrs = attrs
152
153     def check_environment(self):
154         try:
155             os.environ['OS_AUTH_URL']
156         except KeyError:
157             try:
158                 source_env(consts.OPENRC)
159             except IOError as e:
160                 if e.errno != errno.EEXIST:
161                     LOG.error('OPENRC file not found')
162                     raise
163                 else:
164                     LOG.error('OS_AUTH_URL not found')
165
166     @property
167     def image(self):
168         """returns application's default image name"""
169         return self._image
170
171     @property
172     def flavor(self):
173         """returns application's default flavor name"""
174         return self._flavor
175
176     @property
177     def user(self):
178         """return login user name corresponding to image"""
179         return self._user
180
181     def _add_resources_to_template(self, template):
182         """add to the template the resources represented by this context"""
183
184         if self.flavor:
185             if isinstance(self.flavor, dict):
186                 flavor = self.flavor.setdefault("name", self.name + "-flavor")
187                 template.add_flavor(**self.flavor)
188                 self.flavors.add(flavor)
189
190         template.add_keypair(self.keypair_name, self.name)
191         template.add_security_group(self.secgroup_name, self.security_group)
192
193         for network in self.networks.values():
194             # Using existing network
195             if network.is_existing():
196                 continue
197             template.add_network(network.stack_name,
198                                  network.physical_network,
199                                  network.provider,
200                                  network.segmentation_id,
201                                  network.port_security_enabled,
202                                  network.network_type)
203             template.add_subnet(network.subnet_stack_name, network.stack_name,
204                                 network.subnet_cidr,
205                                 network.enable_dhcp,
206                                 network.gateway_ip)
207
208             if network.router:
209                 template.add_router(network.router.stack_name,
210                                     network.router.external_gateway_info,
211                                     network.subnet_stack_name)
212                 template.add_router_interface(network.router.stack_if_name,
213                                               network.router.stack_name,
214                                               network.subnet_stack_name)
215
216         # create a list of servers sorted by increasing no of placement groups
217         list_of_servers = sorted(self.servers,
218                                  key=lambda s: len(s.placement_groups))
219
220         #
221         # add servers with scheduler hints derived from placement groups
222         #
223
224         # create list of servers with availability policy
225         availability_servers = []
226         for server in list_of_servers:
227             for pg in server.placement_groups:
228                 if pg.policy == "availability":
229                     availability_servers.append(server)
230                     break
231
232         for server in availability_servers:
233             if isinstance(server.flavor, dict):
234                 try:
235                     self.flavors.add(server.flavor["name"])
236                 except KeyError:
237                     self.flavors.add(h_join(server.stack_name, "flavor"))
238
239         # add servers with availability policy
240         added_servers = []
241         for server in availability_servers:
242             scheduler_hints = {}
243             for pg in server.placement_groups:
244                 update_scheduler_hints(scheduler_hints, added_servers, pg)
245             # workaround for openstack nova bug, check JIRA: YARDSTICK-200
246             # for details
247             if len(availability_servers) == 2:
248                 if not scheduler_hints["different_host"]:
249                     scheduler_hints.pop("different_host", None)
250                     server.add_to_template(template,
251                                            list(self.networks.values()),
252                                            scheduler_hints)
253                 else:
254                     scheduler_hints["different_host"] = \
255                         scheduler_hints["different_host"][0]
256                     server.add_to_template(template,
257                                            list(self.networks.values()),
258                                            scheduler_hints)
259             else:
260                 server.add_to_template(template,
261                                        list(self.networks.values()),
262                                        scheduler_hints)
263             added_servers.append(server.stack_name)
264
265         # create list of servers with affinity policy
266         affinity_servers = []
267         for server in list_of_servers:
268             for pg in server.placement_groups:
269                 if pg.policy == "affinity":
270                     affinity_servers.append(server)
271                     break
272
273         # add servers with affinity policy
274         for server in affinity_servers:
275             if server.stack_name in added_servers:
276                 continue
277             scheduler_hints = {}
278             for pg in server.placement_groups:
279                 update_scheduler_hints(scheduler_hints, added_servers, pg)
280             server.add_to_template(template, list(self.networks.values()),
281                                    scheduler_hints)
282             added_servers.append(server.stack_name)
283
284         # add server group
285         for sg in self.server_groups:
286             template.add_server_group(sg.name, sg.policy)
287
288         # add remaining servers with no placement group configured
289         for server in list_of_servers:
290             # TODO placement_group and server_group should combine
291             if not server.placement_groups:
292                 scheduler_hints = {}
293                 # affinity/anti-aff server group
294                 sg = server.server_group
295                 if sg:
296                     scheduler_hints["group"] = {'get_resource': sg.name}
297                 server.add_to_template(template,
298                                        list(self.networks.values()),
299                                        scheduler_hints)
300
301     def get_neutron_info(self):
302         if not self.shade_client:
303             self.shade_client = get_shade_client()
304
305         networks = self.shade_client.list_networks()
306         for network in self.networks.values():
307             for neutron_net in (net for net in networks if net.name == network.stack_name):
308                     network.segmentation_id = neutron_net.get('provider:segmentation_id')
309                     # we already have physical_network
310                     # network.physical_network = neutron_net.get('provider:physical_network')
311                     network.network_type = neutron_net.get('provider:network_type')
312                     network.neutron_info = neutron_net
313
314     def _create_new_stack(self, heat_template):
315          try:
316              return heat_template.create(block=True,
317                                          timeout=self.heat_timeout)
318          except KeyboardInterrupt:
319              raise y_exc.StackCreationInterrupt
320          except Exception:
321              LOG.exception("stack failed")
322              # let the other failures happen, we want stack trace
323              raise
324
325     def _retrieve_existing_stack(self, stack_name):
326         stack = HeatStack(stack_name)
327         if stack.get():
328             return stack
329         else:
330             LOG.warning("Stack %s does not exist", self.name)
331             return None
332
333     def deploy(self):
334         """deploys template into a stack using cloud"""
335         LOG.info("Deploying context '%s' START", self.name)
336
337         self.key_filename = ''.join(
338             [consts.YARDSTICK_ROOT_PATH,
339              'yardstick/resources/files/yardstick_key-',
340              self.name])
341         # Permissions may have changed since creation; this can be fixed. If we
342         # overwrite the file, we lose future access to VMs using this key.
343         # As long as the file exists, even if it is unreadable, keep it intact
344         if not os.path.exists(self.key_filename):
345             SSH.gen_keys(self.key_filename)
346
347         heat_template = HeatTemplate(
348             self.name, template_file=self.template_file,
349             heat_parameters=self.heat_parameters,
350             os_cloud_config=self._flags.os_cloud_config)
351
352         if self.template_file is None:
353             self._add_resources_to_template(heat_template)
354
355         if self._flags.no_setup:
356             # Try to get an existing stack, returns a stack or None
357             self.stack = self._retrieve_existing_stack(self.name)
358             if not self.stack:
359                 self.stack = self._create_new_stack(heat_template)
360
361         else:
362             self.stack = self._create_new_stack(heat_template)
363
364         # TODO: use Neutron to get segmentation-id
365         self.get_neutron_info()
366
367         # copy some vital stack output into server objects
368         for server in self.servers:
369             if server.ports:
370                 self.add_server_port(server)
371
372             if server.floating_ip:
373                 server.public_ip = \
374                     self.stack.outputs[server.floating_ip["stack_name"]]
375
376         LOG.info("Deploying context '%s' DONE", self.name)
377
378     @staticmethod
379     def _port_net_is_existing(port_info):
380         net_flags = port_info.get('net_flags', {})
381         return net_flags.get(consts.IS_EXISTING)
382
383     @staticmethod
384     def _port_net_is_public(port_info):
385         net_flags = port_info.get('net_flags', {})
386         return net_flags.get(consts.IS_PUBLIC)
387
388     def add_server_port(self, server):
389         server_ports = server.ports.values()
390         for server_port in server_ports:
391             port_info = server_port[0]
392             port_ip = self.stack.outputs[port_info["stack_name"]]
393             port_net_is_existing = self._port_net_is_existing(port_info)
394             port_net_is_public = self._port_net_is_public(port_info)
395             if port_net_is_existing and (port_net_is_public or
396                                          len(server_ports) == 1):
397                 server.public_ip = port_ip
398             if not server.private_ip or len(server_ports) == 1:
399                 server.private_ip = port_ip
400
401         server.interfaces = {}
402         for network_name, ports in server.ports.items():
403             for port in ports:
404                 # port['port'] is either port name from mapping or default network_name
405                 if self._port_net_is_existing(port):
406                     continue
407                 server.interfaces[port['port']] = self.make_interface_dict(network_name,
408                                                                            port['port'],
409                                                                            port['stack_name'],
410                                                                            self.stack.outputs)
411                 server.override_ip(network_name, port)
412
413     def make_interface_dict(self, network_name, port, stack_name, outputs):
414         private_ip = outputs[stack_name]
415         mac_address = outputs[h_join(stack_name, "mac_address")]
416         # these are attributes of the network, not the port
417         output_subnet_cidr = outputs[h_join(self.name, network_name,
418                                             'subnet', 'cidr')]
419
420         # these are attributes of the network, not the port
421         output_subnet_gateway = outputs[h_join(self.name, network_name,
422                                                'subnet', 'gateway_ip')]
423
424         return {
425             # add default port name
426             "name": port,
427             "private_ip": private_ip,
428             "subnet_id": outputs[h_join(stack_name, "subnet_id")],
429             "subnet_cidr": output_subnet_cidr,
430             "network": str(ipaddress.ip_network(output_subnet_cidr).network_address),
431             "netmask": str(ipaddress.ip_network(output_subnet_cidr).netmask),
432             "gateway_ip": output_subnet_gateway,
433             "mac_address": mac_address,
434             "device_id": outputs[h_join(stack_name, "device_id")],
435             "network_id": outputs[h_join(stack_name, "network_id")],
436             # this should be == vld_id for NSB tests
437             "network_name": network_name,
438             # to match vnf_generic
439             "local_mac": mac_address,
440             "local_ip": private_ip,
441         }
442
443     def _delete_key_file(self):
444         try:
445             utils.remove_file(self.key_filename)
446             utils.remove_file(self.key_filename + ".pub")
447         except OSError:
448             LOG.exception("There was an error removing the key file %s",
449                           self.key_filename)
450
451     def undeploy(self):
452         """undeploys stack from cloud"""
453         if self._flags.no_teardown:
454             LOG.info("Undeploying context '%s' SKIP", self.name)
455             return
456
457         if self.stack:
458             LOG.info("Undeploying context '%s' START", self.name)
459             self.stack.delete()
460             self.stack = None
461             LOG.info("Undeploying context '%s' DONE", self.name)
462
463             self._delete_key_file()
464
465         super(HeatContext, self).undeploy()
466
467     @staticmethod
468     def generate_routing_table(server):
469         routes = [
470             {
471                 "network": intf["network"],
472                 "netmask": intf["netmask"],
473                 "if": name,
474                 # We have to encode a None gateway as '' for Jinja2 to YAML conversion
475                 "gateway": intf["gateway_ip"] if intf["gateway_ip"] else '',
476             }
477             for name, intf in server.interfaces.items()
478         ]
479         return routes
480
481     def _get_server(self, attr_name):
482         """lookup server info by name from context
483         attr_name: either a name for a server created by yardstick or a dict
484         with attribute name mapping when using external heat templates
485         """
486         if isinstance(attr_name, collections.Mapping):
487             node_name, cname = self.split_host_name(attr_name['name'])
488             if cname is None or cname != self.name:
489                 return None
490
491             # Create a dummy server instance for holding the *_ip attributes
492             server = Server(node_name, self, {})
493             server.public_ip = self.stack.outputs.get(
494                 attr_name.get("public_ip_attr", object()), None)
495
496             server.private_ip = self.stack.outputs.get(
497                 attr_name.get("private_ip_attr", object()), None)
498         else:
499             try:
500                 server = self._server_map[attr_name]
501             except KeyError:
502                 attr_name_no_suffix = attr_name.split("-")[0]
503                 server = self._server_map.get(attr_name_no_suffix, None)
504             if server is None:
505                 return None
506
507         pkey = pkg_resources.resource_string(
508             'yardstick.resources',
509             h_join('files/yardstick_key', self.name)).decode('utf-8')
510
511         result = {
512             "user": server.context.user,
513             "pkey": pkey,
514             "private_ip": server.private_ip,
515             "interfaces": server.interfaces,
516             "routing_table": self.generate_routing_table(server),
517             # empty IPv6 routing table
518             "nd_route_tbl": [],
519             # we want to save the contex name so we can generate pod.yaml
520             "name": server.name,
521         }
522         # Target server may only have private_ip
523         if server.public_ip:
524             result["ip"] = server.public_ip
525
526         return result
527
528     def _get_network(self, attr_name):
529         if not isinstance(attr_name, collections.Mapping):
530             network = self.networks.get(attr_name, None)
531
532         else:
533             # Only take the first key, value
534             key, value = next(iter(attr_name.items()), (None, None))
535             if key is None:
536                 return None
537             network_iter = (n for n in self.networks.values() if getattr(n, key) == value)
538             network = next(network_iter, None)
539
540         if network is None:
541             return None
542
543         result = {
544             "name": network.name,
545             "segmentation_id": network.segmentation_id,
546             "network_type": network.network_type,
547             "physical_network": network.physical_network,
548         }
549         return result
550
551     def _get_physical_nodes(self):
552         return self.nodes
553
554     def _get_physical_node_for_server(self, server_name):
555         node_name, ctx_name = self.split_host_name(server_name)
556         if ctx_name is None or self.name != ctx_name:
557             return None
558
559         matching_nodes = [s for s in self.servers if s.name == node_name]
560         if len(matching_nodes) == 0:
561             return None
562
563         server = openstack_utils.get_server(self.shade_client,
564                                             name_or_id=server_name)
565
566         if server:
567             server = server.toDict()
568             list_hypervisors = self.operator_client.list_hypervisors()
569
570             for hypervisor in list_hypervisors:
571                 if hypervisor.hypervisor_hostname == server['OS-EXT-SRV-ATTR:hypervisor_hostname']:
572                     for node in self.nodes:
573                         if node['ip'] == hypervisor.host_ip:
574                             return "{}.{}".format(node['name'], self._name)
575
576         return None