Merge "test_pktgen_dpdk_throughput: speedup unittest, mock time.sleep()"
[yardstick.git] / yardstick / benchmark / contexts / heat.py
1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 from __future__ import absolute_import
11 from __future__ import print_function
12
13 import collections
14 import logging
15 import os
16 import uuid
17 from collections import OrderedDict
18
19 import ipaddress
20 import paramiko
21 import pkg_resources
22
23 from yardstick.benchmark.contexts.base import Context
24 from yardstick.benchmark.contexts.model import Network
25 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
26 from yardstick.benchmark.contexts.model import Server
27 from yardstick.benchmark.contexts.model import update_scheduler_hints
28 from yardstick.common.openstack_utils import get_neutron_client
29 from yardstick.orchestrator.heat import HeatTemplate, get_short_key_uuid
30 from yardstick.common.constants import YARDSTICK_ROOT_PATH
31
32 LOG = logging.getLogger(__name__)
33
34 DEFAULT_HEAT_TIMEOUT = 3600
35
36
37 class HeatContext(Context):
38     """Class that represents a context in the logical model"""
39
40     __context_type__ = "Heat"
41
42     def __init__(self):
43         self.name = None
44         self.stack = None
45         self.networks = OrderedDict()
46         self.servers = []
47         self.placement_groups = []
48         self.server_groups = []
49         self.keypair_name = None
50         self.secgroup_name = None
51         self._server_map = {}
52         self._image = None
53         self._flavor = None
54         self.flavors = set()
55         self._user = None
56         self.template_file = None
57         self.heat_parameters = None
58         self.neutron_client = None
59         # generate an uuid to identify yardstick_key
60         # the first 8 digits of the uuid will be used
61         self.key_uuid = uuid.uuid4()
62         self.heat_timeout = None
63         self.key_filename = ''.join(
64             [YARDSTICK_ROOT_PATH, 'yardstick/resources/files/yardstick_key-',
65              get_short_key_uuid(self.key_uuid)])
66         super(HeatContext, self).__init__()
67
68     def assign_external_network(self, networks):
69         sorted_networks = sorted(networks.items())
70         external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
71
72         have_external_network = any(net.get("external_network") for net in networks.values())
73         if sorted_networks and not have_external_network:
74             # no external net defined, assign it to first network using os.environ
75             sorted_networks[0][1]["external_network"] = external_network
76
77         self.networks = OrderedDict((name, Network(name, self, attrs))
78                                     for name, attrs in sorted_networks)
79
80     def init(self, attrs):
81         """initializes itself from the supplied arguments"""
82         self.name = attrs["name"]
83
84         self._user = attrs.get("user")
85
86         self.template_file = attrs.get("heat_template")
87         if self.template_file:
88             self.heat_parameters = attrs.get("heat_parameters")
89             return
90
91         self.keypair_name = self.name + "-key"
92         self.secgroup_name = self.name + "-secgroup"
93
94         self._image = attrs.get("image")
95
96         self._flavor = attrs.get("flavor")
97
98         self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
99
100         self.placement_groups = [PlacementGroup(name, self, pgattrs["policy"])
101                                  for name, pgattrs in attrs.get(
102                                  "placement_groups", {}).items()]
103
104         self.server_groups = [ServerGroup(name, self, sgattrs["policy"])
105                               for name, sgattrs in attrs.get(
106                               "server_groups", {}).items()]
107
108         # we have to do this first, because we are injecting external_network
109         # into the dict
110         self.assign_external_network(attrs["networks"])
111
112         for name, serverattrs in sorted(attrs["servers"].items()):
113             server = Server(name, self, serverattrs)
114             self.servers.append(server)
115             self._server_map[server.dn] = server
116
117         rsa_key = paramiko.RSAKey.generate(bits=2048, progress_func=None)
118         rsa_key.write_private_key_file(self.key_filename)
119         print("Writing %s ..." % self.key_filename)
120         with open(self.key_filename + ".pub", "w") as pubkey_file:
121             pubkey_file.write(
122                 "%s %s\n" % (rsa_key.get_name(), rsa_key.get_base64()))
123
124     @property
125     def image(self):
126         """returns application's default image name"""
127         return self._image
128
129     @property
130     def flavor(self):
131         """returns application's default flavor name"""
132         return self._flavor
133
134     @property
135     def user(self):
136         """return login user name corresponding to image"""
137         return self._user
138
139     def _add_resources_to_template(self, template):
140         """add to the template the resources represented by this context"""
141
142         if self.flavor:
143             if isinstance(self.flavor, dict):
144                 flavor = self.flavor.setdefault("name", self.name + "-flavor")
145                 template.add_flavor(**self.flavor)
146                 self.flavors.add(flavor)
147
148         template.add_keypair(self.keypair_name, self.key_uuid)
149         template.add_security_group(self.secgroup_name)
150
151         for network in self.networks.values():
152             template.add_network(network.stack_name,
153                                  network.physical_network,
154                                  network.provider,
155                                  network.segmentation_id)
156             template.add_subnet(network.subnet_stack_name, network.stack_name,
157                                 network.subnet_cidr)
158
159             if network.router:
160                 template.add_router(network.router.stack_name,
161                                     network.router.external_gateway_info,
162                                     network.subnet_stack_name)
163                 template.add_router_interface(network.router.stack_if_name,
164                                               network.router.stack_name,
165                                               network.subnet_stack_name)
166
167         # create a list of servers sorted by increasing no of placement groups
168         list_of_servers = sorted(self.servers,
169                                  key=lambda s: len(s.placement_groups))
170
171         #
172         # add servers with scheduler hints derived from placement groups
173         #
174
175         # create list of servers with availability policy
176         availability_servers = []
177         for server in list_of_servers:
178             for pg in server.placement_groups:
179                 if pg.policy == "availability":
180                     availability_servers.append(server)
181                     break
182
183         for server in availability_servers:
184             if isinstance(server.flavor, dict):
185                 try:
186                     self.flavors.add(server.flavor["name"])
187                 except KeyError:
188                     self.flavors.add(server.stack_name + "-flavor")
189
190         # add servers with availability policy
191         added_servers = []
192         for server in availability_servers:
193             scheduler_hints = {}
194             for pg in server.placement_groups:
195                 update_scheduler_hints(scheduler_hints, added_servers, pg)
196             # workaround for openstack nova bug, check JIRA: YARDSTICK-200
197             # for details
198             if len(availability_servers) == 2:
199                 if not scheduler_hints["different_host"]:
200                     scheduler_hints.pop("different_host", None)
201                     server.add_to_template(template,
202                                            list(self.networks.values()),
203                                            scheduler_hints)
204                 else:
205                     scheduler_hints["different_host"] = \
206                         scheduler_hints["different_host"][0]
207                     server.add_to_template(template,
208                                            list(self.networks.values()),
209                                            scheduler_hints)
210             else:
211                 server.add_to_template(template,
212                                        list(self.networks.values()),
213                                        scheduler_hints)
214             added_servers.append(server.stack_name)
215
216         # create list of servers with affinity policy
217         affinity_servers = []
218         for server in list_of_servers:
219             for pg in server.placement_groups:
220                 if pg.policy == "affinity":
221                     affinity_servers.append(server)
222                     break
223
224         # add servers with affinity policy
225         for server in affinity_servers:
226             if server.stack_name in added_servers:
227                 continue
228             scheduler_hints = {}
229             for pg in server.placement_groups:
230                 update_scheduler_hints(scheduler_hints, added_servers, pg)
231             server.add_to_template(template, list(self.networks.values()),
232                                    scheduler_hints)
233             added_servers.append(server.stack_name)
234
235         # add server group
236         for sg in self.server_groups:
237             template.add_server_group(sg.name, sg.policy)
238
239         # add remaining servers with no placement group configured
240         for server in list_of_servers:
241             # TODO placement_group and server_group should combine
242             if not server.placement_groups:
243                 scheduler_hints = {}
244                 # affinity/anti-aff server group
245                 sg = server.server_group
246                 if sg:
247                     scheduler_hints["group"] = {'get_resource': sg.name}
248                 server.add_to_template(template,
249                                        list(self.networks.values()),
250                                        scheduler_hints)
251
252     def get_neutron_info(self):
253         if not self.neutron_client:
254             self.neutron_client = get_neutron_client()
255
256         networks = self.neutron_client.list_networks()
257         for network in self.networks.values():
258             for neutron_net in networks['networks']:
259                 if neutron_net['name'] == network.stack_name:
260                     network.segmentation_id = neutron_net.get('provider:segmentation_id')
261                     # we already have physical_network
262                     # network.physical_network = neutron_net.get('provider:physical_network')
263                     network.network_type = neutron_net.get('provider:network_type')
264                     network.neutron_info = neutron_net
265
266     def deploy(self):
267         """deploys template into a stack using cloud"""
268         print("Deploying context '%s'" % self.name)
269
270         heat_template = HeatTemplate(self.name, self.template_file,
271                                      self.heat_parameters)
272
273         if self.template_file is None:
274             self._add_resources_to_template(heat_template)
275
276         try:
277             self.stack = heat_template.create(block=True,
278                                               timeout=self.heat_timeout)
279         except KeyboardInterrupt:
280             raise SystemExit("\nStack create interrupted")
281         except:
282             LOG.exception("stack failed")
283             # let the other failures happen, we want stack trace
284             raise
285
286         # TODO: use Neutron to get segementation-id
287         self.get_neutron_info()
288
289         # copy some vital stack output into server objects
290         for server in self.servers:
291             if server.ports:
292                 self.add_server_port(server)
293
294             if server.floating_ip:
295                 server.public_ip = \
296                     self.stack.outputs[server.floating_ip["stack_name"]]
297
298         print("Context '%s' deployed" % self.name)
299
300     def add_server_port(self, server):
301         # TODO(hafe) can only handle one internal network for now
302         port = next(iter(server.ports.values()))
303         server.private_ip = self.stack.outputs[port["stack_name"]]
304         server.interfaces = {}
305         for network_name, port in server.ports.items():
306             server.interfaces[network_name] = self.make_interface_dict(
307                 network_name, port['stack_name'], self.stack.outputs)
308
309     def make_interface_dict(self, network_name, stack_name, outputs):
310         private_ip = outputs[stack_name]
311         mac_addr = outputs[stack_name + "-mac_address"]
312         subnet_cidr_key = "-".join([self.name, network_name, 'subnet', 'cidr'])
313         gateway_key = "-".join([self.name, network_name, 'subnet', 'gateway_ip'])
314         subnet_cidr = outputs[subnet_cidr_key]
315         subnet_ip = ipaddress.ip_network(subnet_cidr)
316         return {
317             "private_ip": private_ip,
318             "subnet_id": outputs[stack_name + "-subnet_id"],
319             "subnet_cidr": subnet_cidr,
320             "network": str(subnet_ip.network_address),
321             "netmask": str(subnet_ip.netmask),
322             "gateway_ip": outputs[gateway_key],
323             "mac_address": mac_addr,
324             "device_id": outputs[stack_name + "-device_id"],
325             "network_id": outputs[stack_name + "-network_id"],
326             "network_name": network_name,
327             # to match vnf_generic
328             "local_mac": mac_addr,
329             "local_ip": private_ip,
330             "vld_id": self.networks[network_name].vld_id,
331         }
332
333     def undeploy(self):
334         """undeploys stack from cloud"""
335         if self.stack:
336             print("Undeploying context '%s'" % self.name)
337             self.stack.delete()
338             self.stack = None
339             print("Context '%s' undeployed" % self.name)
340
341         if os.path.exists(self.key_filename):
342             try:
343                 os.remove(self.key_filename)
344                 os.remove(self.key_filename + ".pub")
345             except OSError:
346                 LOG.exception("Key filename %s", self.key_filename)
347
348         super(HeatContext, self).undeploy()
349
350     @staticmethod
351     def generate_routing_table(server):
352         routes = [
353             {
354                 "network": intf["network"],
355                 "netmask": intf["netmask"],
356                 "if": name,
357                 "gateway": intf["gateway_ip"],
358             }
359             for name, intf in server.interfaces.items()
360         ]
361         return routes
362
363     def _get_server(self, attr_name):
364         """lookup server info by name from context
365         attr_name: either a name for a server created by yardstick or a dict
366         with attribute name mapping when using external heat templates
367         """
368         key_filename = pkg_resources.resource_filename(
369             'yardstick.resources',
370             'files/yardstick_key-' + get_short_key_uuid(self.key_uuid))
371
372         if not isinstance(attr_name, collections.Mapping):
373             server = self._server_map.get(attr_name, None)
374
375         else:
376             cname = attr_name["name"].split(".")[1]
377             if cname != self.name:
378                 return None
379
380             public_ip = None
381             private_ip = None
382             if "public_ip_attr" in attr_name:
383                 public_ip = self.stack.outputs[attr_name["public_ip_attr"]]
384             if "private_ip_attr" in attr_name:
385                 private_ip = self.stack.outputs[
386                     attr_name["private_ip_attr"]]
387
388             # Create a dummy server instance for holding the *_ip attributes
389             server = Server(attr_name["name"].split(".")[0], self, {})
390             server.public_ip = public_ip
391             server.private_ip = private_ip
392
393         if server is None:
394             return None
395
396         result = {
397             "user": server.context.user,
398             "key_filename": key_filename,
399             "private_ip": server.private_ip,
400             "interfaces": server.interfaces,
401             "routing_table": self.generate_routing_table(server),
402             # empty IPv6 routing table
403             "nd_route_tbl": [],
404         }
405         # Target server may only have private_ip
406         if server.public_ip:
407             result["ip"] = server.public_ip
408
409         return result
410
411     def _get_network(self, attr_name):
412         if not isinstance(attr_name, collections.Mapping):
413             network = self.networks.get(attr_name, None)
414
415         else:
416             # Don't generalize too much  Just support vld_id
417             vld_id = attr_name.get('vld_id')
418             if vld_id is None:
419                 return None
420
421             network = next((n for n in self.networks.values() if
422                            getattr(n, "vld_id", None) == vld_id), None)
423
424         if network is None:
425             return None
426
427         result = {
428             "name": network.name,
429             "vld_id": network.vld_id,
430             "segmentation_id": network.segmentation_id,
431             "network_type": network.network_type,
432             "physical_network": network.physical_network,
433         }
434         return result