Merge "move external_network auto-assign to Heat context"
[yardstick.git] / yardstick / benchmark / contexts / heat.py
1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 from __future__ import absolute_import
11 from __future__ import print_function
12
13 import collections
14 import logging
15 import os
16 import sys
17 import uuid
18
19 import paramiko
20 import pkg_resources
21
22 from yardstick.benchmark.contexts.base import Context
23 from yardstick.benchmark.contexts.model import Network
24 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
25 from yardstick.benchmark.contexts.model import Server
26 from yardstick.benchmark.contexts.model import update_scheduler_hints
27 from yardstick.orchestrator.heat import HeatTemplate, get_short_key_uuid
28 from yardstick.definitions import YARDSTICK_ROOT_PATH
29
30 LOG = logging.getLogger(__name__)
31
32
33 class HeatContext(Context):
34     """Class that represents a context in the logical model"""
35
36     __context_type__ = "Heat"
37
38     def __init__(self):
39         self.name = None
40         self.stack = None
41         self.networks = []
42         self.servers = []
43         self.placement_groups = []
44         self.server_groups = []
45         self.keypair_name = None
46         self.secgroup_name = None
47         self._server_map = {}
48         self._image = None
49         self._flavor = None
50         self._user = None
51         self.template_file = None
52         self.heat_parameters = None
53         # generate an uuid to identify yardstick_key
54         # the first 8 digits of the uuid will be used
55         self.key_uuid = uuid.uuid4()
56         self.key_filename = ''.join(
57             [YARDSTICK_ROOT_PATH, 'yardstick/resources/files/yardstick_key-',
58              get_short_key_uuid(self.key_uuid)])
59         super(HeatContext, self).__init__()
60
61     def assign_external_network(self, networks):
62         sorted_networks = sorted(networks.items())
63         external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
64         have_external_network = [(name, net)
65                                  for name, net in sorted_networks if
66                                  net.get("external_network")]
67         # no external net defined, assign it to first network usig os.environ
68         if sorted_networks and not have_external_network:
69             sorted_networks[0][1]["external_network"] = external_network
70
71     def init(self, attrs):     # pragma: no cover
72         """initializes itself from the supplied arguments"""
73         self.name = attrs["name"]
74
75         self._user = attrs.get("user")
76
77         self.template_file = attrs.get("heat_template")
78         if self.template_file:
79             self.heat_parameters = attrs.get("heat_parameters")
80             return
81
82         self.keypair_name = self.name + "-key"
83         self.secgroup_name = self.name + "-secgroup"
84
85         self._image = attrs.get("image")
86
87         self._flavor = attrs.get("flavor")
88
89         self.placement_groups = [PlacementGroup(name, self, pgattrs["policy"])
90                                  for name, pgattrs in attrs.get(
91                                  "placement_groups", {}).items()]
92
93         self.server_groups = [ServerGroup(name, self, sgattrs["policy"])
94                               for name, sgattrs in attrs.get(
95                               "server_groups", {}).items()]
96
97         self.assign_external_network(attrs["networks"])
98
99         self.networks = [Network(name, self, netattrs) for name, netattrs in
100                          sorted(attrs["networks"].items())]
101
102         for name, serverattrs in attrs["servers"].items():
103             server = Server(name, self, serverattrs)
104             self.servers.append(server)
105             self._server_map[server.dn] = server
106
107         rsa_key = paramiko.RSAKey.generate(bits=2048, progress_func=None)
108         rsa_key.write_private_key_file(self.key_filename)
109         print("Writing %s ..." % self.key_filename)
110         with open(self.key_filename + ".pub", "w") as pubkey_file:
111             pubkey_file.write(
112                 "%s %s\n" % (rsa_key.get_name(), rsa_key.get_base64()))
113         del rsa_key
114
115     @property
116     def image(self):
117         """returns application's default image name"""
118         return self._image
119
120     @property
121     def flavor(self):
122         """returns application's default flavor name"""
123         return self._flavor
124
125     @property
126     def user(self):
127         """return login user name corresponding to image"""
128         return self._user
129
130     def _add_resources_to_template(self, template):
131         """add to the template the resources represented by this context"""
132         template.add_keypair(self.keypair_name, self.key_uuid)
133         template.add_security_group(self.secgroup_name)
134
135         for network in self.networks:
136             template.add_network(network.stack_name)
137             template.add_subnet(network.subnet_stack_name, network.stack_name,
138                                 network.subnet_cidr)
139
140             if network.router:
141                 template.add_router(network.router.stack_name,
142                                     network.router.external_gateway_info,
143                                     network.subnet_stack_name)
144                 template.add_router_interface(network.router.stack_if_name,
145                                               network.router.stack_name,
146                                               network.subnet_stack_name)
147
148         # create a list of servers sorted by increasing no of placement groups
149         list_of_servers = sorted(self.servers,
150                                  key=lambda s: len(s.placement_groups))
151
152         #
153         # add servers with scheduler hints derived from placement groups
154         #
155
156         # create list of servers with availability policy
157         availability_servers = []
158         for server in list_of_servers:
159             for pg in server.placement_groups:
160                 if pg.policy == "availability":
161                     availability_servers.append(server)
162                     break
163
164         # add servers with availability policy
165         added_servers = []
166         for server in availability_servers:
167             scheduler_hints = {}
168             for pg in server.placement_groups:
169                 update_scheduler_hints(scheduler_hints, added_servers, pg)
170             # workround for openstack nova bug, check JIRA: YARDSTICK-200
171             # for details
172             if len(availability_servers) == 2:
173                 if not scheduler_hints["different_host"]:
174                     scheduler_hints.pop("different_host", None)
175                     server.add_to_template(template,
176                                            self.networks,
177                                            scheduler_hints)
178                 else:
179                     scheduler_hints["different_host"] = \
180                         scheduler_hints["different_host"][0]
181                     server.add_to_template(template,
182                                            self.networks,
183                                            scheduler_hints)
184             else:
185                 server.add_to_template(template,
186                                        self.networks,
187                                        scheduler_hints)
188             added_servers.append(server.stack_name)
189
190         # create list of servers with affinity policy
191         affinity_servers = []
192         for server in list_of_servers:
193             for pg in server.placement_groups:
194                 if pg.policy == "affinity":
195                     affinity_servers.append(server)
196                     break
197
198         # add servers with affinity policy
199         for server in affinity_servers:
200             if server.stack_name in added_servers:
201                 continue
202             scheduler_hints = {}
203             for pg in server.placement_groups:
204                 update_scheduler_hints(scheduler_hints, added_servers, pg)
205             server.add_to_template(template, self.networks, scheduler_hints)
206             added_servers.append(server.stack_name)
207
208         # add server group
209         for sg in self.server_groups:
210             template.add_server_group(sg.name, sg.policy)
211
212         # add remaining servers with no placement group configured
213         for server in list_of_servers:
214             # TODO placement_group and server_group should combine
215             if not server.placement_groups:
216                 scheduler_hints = {}
217                 # affinity/anti-aff server group
218                 sg = server.server_group
219                 if sg:
220                     scheduler_hints["group"] = {'get_resource': sg.name}
221                 server.add_to_template(template,
222                                        self.networks, scheduler_hints)
223
224     def deploy(self):
225         """deploys template into a stack using cloud"""
226         print("Deploying context '%s'" % self.name)
227
228         heat_template = HeatTemplate(self.name, self.template_file,
229                                      self.heat_parameters)
230
231         if self.template_file is None:
232             self._add_resources_to_template(heat_template)
233
234         try:
235             self.stack = heat_template.create()
236         except KeyboardInterrupt:
237             sys.exit("\nStack create interrupted")
238         except RuntimeError as err:
239             sys.exit("error: failed to deploy stack: '%s'" % err.args)
240         except Exception as err:
241             sys.exit("error: failed to deploy stack: '%s'" % err)
242
243         # copy some vital stack output into server objects
244         for server in self.servers:
245             if server.ports:
246                 # TODO(hafe) can only handle one internal network for now
247                 port = next(iter(server.ports.values()))
248                 server.private_ip = self.stack.outputs[port["stack_name"]]
249
250             if server.floating_ip:
251                 server.public_ip = \
252                     self.stack.outputs[server.floating_ip["stack_name"]]
253
254         print("Context '%s' deployed" % self.name)
255
256     def undeploy(self):
257         """undeploys stack from cloud"""
258         if self.stack:
259             print("Undeploying context '%s'" % self.name)
260             self.stack.delete()
261             self.stack = None
262             print("Context '%s' undeployed" % self.name)
263
264         if os.path.exists(self.key_filename):
265             try:
266                 os.remove(self.key_filename)
267                 os.remove(self.key_filename + ".pub")
268             except OSError:
269                 LOG.exception("Key filename %s", self.key_filename)
270
271         super(HeatContext, self).undeploy()
272
273     def _get_server(self, attr_name):
274         """lookup server info by name from context
275         attr_name: either a name for a server created by yardstick or a dict
276         with attribute name mapping when using external heat templates
277         """
278         key_filename = pkg_resources.resource_filename(
279             'yardstick.resources',
280             'files/yardstick_key-' + get_short_key_uuid(self.key_uuid))
281
282         if isinstance(attr_name, collections.Mapping):
283             cname = attr_name["name"].split(".")[1]
284             if cname != self.name:
285                 return None
286
287             public_ip = None
288             private_ip = None
289             if "public_ip_attr" in attr_name:
290                 public_ip = self.stack.outputs[attr_name["public_ip_attr"]]
291             if "private_ip_attr" in attr_name:
292                 private_ip = self.stack.outputs[
293                     attr_name["private_ip_attr"]]
294
295             # Create a dummy server instance for holding the *_ip attributes
296             server = Server(attr_name["name"].split(".")[0], self, {})
297             server.public_ip = public_ip
298             server.private_ip = private_ip
299         else:
300             if attr_name not in self._server_map:
301                 return None
302             server = self._server_map[attr_name]
303
304         if server is None:
305             return None
306
307         result = {
308             "user": server.context.user,
309             "key_filename": key_filename,
310             "private_ip": server.private_ip
311         }
312         # Target server may only have private_ip
313         if server.public_ip:
314             result["ip"] = server.public_ip
315
316         return result