HA testcase containerized Compass support
[yardstick.git] / yardstick / benchmark / contexts / heat.py
1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 from __future__ import absolute_import
11 from __future__ import print_function
12
13 import collections
14 import logging
15 import os
16 import uuid
17 from collections import OrderedDict
18
19 import ipaddress
20 import paramiko
21 import pkg_resources
22
23 from yardstick.benchmark.contexts.base import Context
24 from yardstick.benchmark.contexts.model import Network
25 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
26 from yardstick.benchmark.contexts.model import Server
27 from yardstick.benchmark.contexts.model import update_scheduler_hints
28 from yardstick.orchestrator.heat import HeatTemplate, get_short_key_uuid
29 from yardstick.common.constants import YARDSTICK_ROOT_PATH
30
31 LOG = logging.getLogger(__name__)
32
33 DEFAULT_HEAT_TIMEOUT = 3600
34
35
36 class HeatContext(Context):
37     """Class that represents a context in the logical model"""
38
39     __context_type__ = "Heat"
40
41     def __init__(self):
42         self.name = None
43         self.stack = None
44         self.networks = OrderedDict()
45         self.servers = []
46         self.placement_groups = []
47         self.server_groups = []
48         self.keypair_name = None
49         self.secgroup_name = None
50         self._server_map = {}
51         self._image = None
52         self._flavor = None
53         self.flavors = set()
54         self._user = None
55         self.template_file = None
56         self.heat_parameters = None
57         # generate an uuid to identify yardstick_key
58         # the first 8 digits of the uuid will be used
59         self.key_uuid = uuid.uuid4()
60         self.key_filename = ''.join(
61             [YARDSTICK_ROOT_PATH, 'yardstick/resources/files/yardstick_key-',
62              get_short_key_uuid(self.key_uuid)])
63         super(HeatContext, self).__init__()
64
65     def assign_external_network(self, networks):
66         sorted_networks = sorted(networks.items())
67         external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
68         have_external_network = [(name, net)
69                                  for name, net in sorted_networks if
70                                  net.get("external_network")]
71         # no external net defined, assign it to first network usig os.environ
72         if sorted_networks and not have_external_network:
73             sorted_networks[0][1]["external_network"] = external_network
74         return sorted_networks
75
76     def init(self, attrs):     # pragma: no cover
77         """initializes itself from the supplied arguments"""
78         self.name = attrs["name"]
79
80         self._user = attrs.get("user")
81
82         self.template_file = attrs.get("heat_template")
83         if self.template_file:
84             self.heat_parameters = attrs.get("heat_parameters")
85             return
86
87         self.keypair_name = self.name + "-key"
88         self.secgroup_name = self.name + "-secgroup"
89
90         self._image = attrs.get("image")
91
92         self._flavor = attrs.get("flavor")
93
94         self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
95
96         self.placement_groups = [PlacementGroup(name, self, pgattrs["policy"])
97                                  for name, pgattrs in attrs.get(
98                                  "placement_groups", {}).items()]
99
100         self.server_groups = [ServerGroup(name, self, sgattrs["policy"])
101                               for name, sgattrs in attrs.get(
102                               "server_groups", {}).items()]
103
104         # we have to do this first, because we are injecting external_network
105         # into the dict
106         sorted_networks = self.assign_external_network(attrs["networks"])
107
108         self.networks = OrderedDict(
109             (name, Network(name, self, netattrs)) for name, netattrs in
110             sorted_networks)
111
112         for name, serverattrs in sorted(attrs["servers"].items()):
113             server = Server(name, self, serverattrs)
114             self.servers.append(server)
115             self._server_map[server.dn] = server
116
117         rsa_key = paramiko.RSAKey.generate(bits=2048, progress_func=None)
118         rsa_key.write_private_key_file(self.key_filename)
119         print("Writing %s ..." % self.key_filename)
120         with open(self.key_filename + ".pub", "w") as pubkey_file:
121             pubkey_file.write(
122                 "%s %s\n" % (rsa_key.get_name(), rsa_key.get_base64()))
123         del rsa_key
124
125     @property
126     def image(self):
127         """returns application's default image name"""
128         return self._image
129
130     @property
131     def flavor(self):
132         """returns application's default flavor name"""
133         return self._flavor
134
135     @property
136     def user(self):
137         """return login user name corresponding to image"""
138         return self._user
139
140     def _add_resources_to_template(self, template):
141         """add to the template the resources represented by this context"""
142
143         if self.flavor:
144             if isinstance(self.flavor, dict):
145                 flavor = self.flavor.setdefault("name", self.name + "-flavor")
146                 template.add_flavor(**self.flavor)
147                 self.flavors.add(flavor)
148
149         template.add_keypair(self.keypair_name, self.key_uuid)
150         template.add_security_group(self.secgroup_name)
151
152         for network in self.networks.values():
153             template.add_network(network.stack_name,
154                                  network.physical_network,
155                                  network.provider,
156                                  network.segmentation_id)
157             template.add_subnet(network.subnet_stack_name, network.stack_name,
158                                 network.subnet_cidr)
159
160             if network.router:
161                 template.add_router(network.router.stack_name,
162                                     network.router.external_gateway_info,
163                                     network.subnet_stack_name)
164                 template.add_router_interface(network.router.stack_if_name,
165                                               network.router.stack_name,
166                                               network.subnet_stack_name)
167
168         # create a list of servers sorted by increasing no of placement groups
169         list_of_servers = sorted(self.servers,
170                                  key=lambda s: len(s.placement_groups))
171
172         #
173         # add servers with scheduler hints derived from placement groups
174         #
175
176         # create list of servers with availability policy
177         availability_servers = []
178         for server in list_of_servers:
179             for pg in server.placement_groups:
180                 if pg.policy == "availability":
181                     availability_servers.append(server)
182                     break
183
184         for server in availability_servers:
185             if isinstance(server.flavor, dict):
186                 try:
187                     self.flavors.add(server.flavor["name"])
188                 except KeyError:
189                     self.flavors.add(server.stack_name + "-flavor")
190
191         # add servers with availability policy
192         added_servers = []
193         for server in availability_servers:
194             scheduler_hints = {}
195             for pg in server.placement_groups:
196                 update_scheduler_hints(scheduler_hints, added_servers, pg)
197             # workround for openstack nova bug, check JIRA: YARDSTICK-200
198             # for details
199             if len(availability_servers) == 2:
200                 if not scheduler_hints["different_host"]:
201                     scheduler_hints.pop("different_host", None)
202                     server.add_to_template(template,
203                                            list(self.networks.values()),
204                                            scheduler_hints)
205                 else:
206                     scheduler_hints["different_host"] = \
207                         scheduler_hints["different_host"][0]
208                     server.add_to_template(template,
209                                            list(self.networks.values()),
210                                            scheduler_hints)
211             else:
212                 server.add_to_template(template,
213                                        list(self.networks.values()),
214                                        scheduler_hints)
215             added_servers.append(server.stack_name)
216
217         # create list of servers with affinity policy
218         affinity_servers = []
219         for server in list_of_servers:
220             for pg in server.placement_groups:
221                 if pg.policy == "affinity":
222                     affinity_servers.append(server)
223                     break
224
225         # add servers with affinity policy
226         for server in affinity_servers:
227             if server.stack_name in added_servers:
228                 continue
229             scheduler_hints = {}
230             for pg in server.placement_groups:
231                 update_scheduler_hints(scheduler_hints, added_servers, pg)
232             server.add_to_template(template, list(self.networks.values()),
233                                    scheduler_hints)
234             added_servers.append(server.stack_name)
235
236         # add server group
237         for sg in self.server_groups:
238             template.add_server_group(sg.name, sg.policy)
239
240         # add remaining servers with no placement group configured
241         for server in list_of_servers:
242             # TODO placement_group and server_group should combine
243             if not server.placement_groups:
244                 scheduler_hints = {}
245                 # affinity/anti-aff server group
246                 sg = server.server_group
247                 if sg:
248                     scheduler_hints["group"] = {'get_resource': sg.name}
249                 server.add_to_template(template,
250                                        list(self.networks.values()),
251                                        scheduler_hints)
252
253     def deploy(self):
254         """deploys template into a stack using cloud"""
255         print("Deploying context '%s'" % self.name)
256
257         heat_template = HeatTemplate(self.name, self.template_file,
258                                      self.heat_parameters)
259
260         if self.template_file is None:
261             self._add_resources_to_template(heat_template)
262
263         try:
264             self.stack = heat_template.create(block=True,
265                                               timeout=self.heat_timeout)
266         except KeyboardInterrupt:
267             raise SystemExit("\nStack create interrupted")
268         except:
269             LOG.exception("stack failed")
270             raise
271         # let the other failures happend, we want stack trace
272
273         # copy some vital stack output into server objects
274         for server in self.servers:
275             if server.ports:
276                 # TODO(hafe) can only handle one internal network for now
277                 port = next(iter(server.ports.values()))
278                 server.private_ip = self.stack.outputs[port["stack_name"]]
279                 server.interfaces = {}
280                 for network_name, port in server.ports.items():
281                     self.make_interface_dict(network_name, port['stack_name'],
282                                              server,
283                                              self.stack.outputs)
284
285             if server.floating_ip:
286                 server.public_ip = \
287                     self.stack.outputs[server.floating_ip["stack_name"]]
288
289         print("Context '%s' deployed" % self.name)
290
291     def make_interface_dict(self, network_name, stack_name, server, outputs):
292         server.interfaces[network_name] = {
293             "private_ip": outputs[stack_name],
294             "subnet_id": outputs[stack_name + "-subnet_id"],
295             "subnet_cidr": outputs[
296                 "{}-{}-subnet-cidr".format(self.name, network_name)],
297             "netmask": str(ipaddress.ip_network(
298                 outputs["{}-{}-subnet-cidr".format(self.name,
299                                                    network_name)]).netmask),
300             "gateway_ip": outputs[
301                 "{}-{}-subnet-gateway_ip".format(self.name, network_name)],
302             "mac_address": outputs[stack_name + "-mac_address"],
303             "device_id": outputs[stack_name + "-device_id"],
304             "network_id": outputs[stack_name + "-network_id"],
305             "network_name": network_name,
306             # to match vnf_generic
307             "local_mac": outputs[stack_name + "-mac_address"],
308             "local_ip": outputs[stack_name],
309             "vld_id": self.networks[network_name].vld_id,
310         }
311
312     def undeploy(self):
313         """undeploys stack from cloud"""
314         if self.stack:
315             print("Undeploying context '%s'" % self.name)
316             self.stack.delete()
317             self.stack = None
318             print("Context '%s' undeployed" % self.name)
319
320         if os.path.exists(self.key_filename):
321             try:
322                 os.remove(self.key_filename)
323                 os.remove(self.key_filename + ".pub")
324             except OSError:
325                 LOG.exception("Key filename %s", self.key_filename)
326
327         super(HeatContext, self).undeploy()
328
329     def _get_server(self, attr_name):
330         """lookup server info by name from context
331         attr_name: either a name for a server created by yardstick or a dict
332         with attribute name mapping when using external heat templates
333         """
334         key_filename = pkg_resources.resource_filename(
335             'yardstick.resources',
336             'files/yardstick_key-' + get_short_key_uuid(self.key_uuid))
337
338         if isinstance(attr_name, collections.Mapping):
339             cname = attr_name["name"].split(".")[1]
340             if cname != self.name:
341                 return None
342
343             public_ip = None
344             private_ip = None
345             if "public_ip_attr" in attr_name:
346                 public_ip = self.stack.outputs[attr_name["public_ip_attr"]]
347             if "private_ip_attr" in attr_name:
348                 private_ip = self.stack.outputs[
349                     attr_name["private_ip_attr"]]
350
351             # Create a dummy server instance for holding the *_ip attributes
352             server = Server(attr_name["name"].split(".")[0], self, {})
353             server.public_ip = public_ip
354             server.private_ip = private_ip
355         else:
356             if attr_name not in self._server_map:
357                 return None
358             server = self._server_map[attr_name]
359
360         if server is None:
361             return None
362
363         result = {
364             "user": server.context.user,
365             "key_filename": key_filename,
366             "private_ip": server.private_ip,
367             "interfaces": server.interfaces,
368         }
369         # Target server may only have private_ip
370         if server.public_ip:
371             result["ip"] = server.public_ip
372
373         return result