Merge "Split reporting_target from http_target in yardstick_verify"
[yardstick.git] / yardstick / benchmark / contexts / heat.py
1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 from __future__ import absolute_import
11 from __future__ import print_function
12
13 import collections
14 import logging
15 import os
16 import sys
17 import uuid
18
19 import paramiko
20 import pkg_resources
21
22 from yardstick.benchmark.contexts.base import Context
23 from yardstick.benchmark.contexts.model import Network
24 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
25 from yardstick.benchmark.contexts.model import Server
26 from yardstick.benchmark.contexts.model import update_scheduler_hints
27 from yardstick.orchestrator.heat import HeatTemplate, get_short_key_uuid
28 from yardstick.definitions import YARDSTICK_ROOT_PATH
29
30 LOG = logging.getLogger(__name__)
31
32
33 class HeatContext(Context):
34     """Class that represents a context in the logical model"""
35
36     __context_type__ = "Heat"
37
38     def __init__(self):
39         self.name = None
40         self.stack = None
41         self.networks = []
42         self.servers = []
43         self.placement_groups = []
44         self.server_groups = []
45         self.keypair_name = None
46         self.secgroup_name = None
47         self._server_map = {}
48         self._image = None
49         self._flavor = None
50         self._user = None
51         self.template_file = None
52         self.heat_parameters = None
53         # generate an uuid to identify yardstick_key
54         # the first 8 digits of the uuid will be used
55         self.key_uuid = uuid.uuid4()
56         self.key_filename = ''.join(
57             [YARDSTICK_ROOT_PATH, 'yardstick/resources/files/yardstick_key-',
58              get_short_key_uuid(self.key_uuid)])
59         super(HeatContext, self).__init__()
60
61     def init(self, attrs):     # pragma: no cover
62         """initializes itself from the supplied arguments"""
63         self.name = attrs["name"]
64
65         self._user = attrs.get("user")
66
67         self.template_file = attrs.get("heat_template")
68         if self.template_file:
69             self.heat_parameters = attrs.get("heat_parameters")
70             return
71
72         self.keypair_name = self.name + "-key"
73         self.secgroup_name = self.name + "-secgroup"
74
75         self._image = attrs.get("image")
76
77         self._flavor = attrs.get("flavor")
78
79         self.placement_groups = [PlacementGroup(name, self, pgattrs["policy"])
80                                  for name, pgattrs in attrs.get(
81                                  "placement_groups", {}).items()]
82
83         self.server_groups = [ServerGroup(name, self, sgattrs["policy"])
84                               for name, sgattrs in attrs.get(
85                               "server_groups", {}).items()]
86
87         for name, netattrs in attrs["networks"].items():
88             network = Network(name, self, netattrs)
89             self.networks.append(network)
90
91         for name, serverattrs in attrs["servers"].items():
92             server = Server(name, self, serverattrs)
93             self.servers.append(server)
94             self._server_map[server.dn] = server
95
96         rsa_key = paramiko.RSAKey.generate(bits=2048, progress_func=None)
97         rsa_key.write_private_key_file(self.key_filename)
98         print("Writing %s ..." % self.key_filename)
99         with open(self.key_filename + ".pub", "w") as pubkey_file:
100             pubkey_file.write(
101                 "%s %s\n" % (rsa_key.get_name(), rsa_key.get_base64()))
102         del rsa_key
103
104     @property
105     def image(self):
106         """returns application's default image name"""
107         return self._image
108
109     @property
110     def flavor(self):
111         """returns application's default flavor name"""
112         return self._flavor
113
114     @property
115     def user(self):
116         """return login user name corresponding to image"""
117         return self._user
118
119     def _add_resources_to_template(self, template):
120         """add to the template the resources represented by this context"""
121         template.add_keypair(self.keypair_name, self.key_uuid)
122         template.add_security_group(self.secgroup_name)
123
124         for network in self.networks:
125             template.add_network(network.stack_name)
126             template.add_subnet(network.subnet_stack_name, network.stack_name,
127                                 network.subnet_cidr)
128
129             if network.router:
130                 template.add_router(network.router.stack_name,
131                                     network.router.external_gateway_info,
132                                     network.subnet_stack_name)
133                 template.add_router_interface(network.router.stack_if_name,
134                                               network.router.stack_name,
135                                               network.subnet_stack_name)
136
137         # create a list of servers sorted by increasing no of placement groups
138         list_of_servers = sorted(self.servers,
139                                  key=lambda s: len(s.placement_groups))
140
141         #
142         # add servers with scheduler hints derived from placement groups
143         #
144
145         # create list of servers with availability policy
146         availability_servers = []
147         for server in list_of_servers:
148             for pg in server.placement_groups:
149                 if pg.policy == "availability":
150                     availability_servers.append(server)
151                     break
152
153         # add servers with availability policy
154         added_servers = []
155         for server in availability_servers:
156             scheduler_hints = {}
157             for pg in server.placement_groups:
158                 update_scheduler_hints(scheduler_hints, added_servers, pg)
159             # workround for openstack nova bug, check JIRA: YARDSTICK-200
160             # for details
161             if len(availability_servers) == 2:
162                 if not scheduler_hints["different_host"]:
163                     scheduler_hints.pop("different_host", None)
164                     server.add_to_template(template,
165                                            self.networks,
166                                            scheduler_hints)
167                 else:
168                     scheduler_hints["different_host"] = \
169                         scheduler_hints["different_host"][0]
170                     server.add_to_template(template,
171                                            self.networks,
172                                            scheduler_hints)
173             else:
174                 server.add_to_template(template,
175                                        self.networks,
176                                        scheduler_hints)
177             added_servers.append(server.stack_name)
178
179         # create list of servers with affinity policy
180         affinity_servers = []
181         for server in list_of_servers:
182             for pg in server.placement_groups:
183                 if pg.policy == "affinity":
184                     affinity_servers.append(server)
185                     break
186
187         # add servers with affinity policy
188         for server in affinity_servers:
189             if server.stack_name in added_servers:
190                 continue
191             scheduler_hints = {}
192             for pg in server.placement_groups:
193                 update_scheduler_hints(scheduler_hints, added_servers, pg)
194             server.add_to_template(template, self.networks, scheduler_hints)
195             added_servers.append(server.stack_name)
196
197         # add server group
198         for sg in self.server_groups:
199             template.add_server_group(sg.name, sg.policy)
200
201         # add remaining servers with no placement group configured
202         for server in list_of_servers:
203             # TODO placement_group and server_group should combine
204             if not server.placement_groups:
205                 scheduler_hints = {}
206                 # affinity/anti-aff server group
207                 sg = server.server_group
208                 if sg:
209                     scheduler_hints["group"] = {'get_resource': sg.name}
210                 server.add_to_template(template,
211                                        self.networks, scheduler_hints)
212
213     def deploy(self):
214         """deploys template into a stack using cloud"""
215         print("Deploying context '%s'" % self.name)
216
217         heat_template = HeatTemplate(self.name, self.template_file,
218                                      self.heat_parameters)
219
220         if self.template_file is None:
221             self._add_resources_to_template(heat_template)
222
223         try:
224             self.stack = heat_template.create()
225         except KeyboardInterrupt:
226             sys.exit("\nStack create interrupted")
227         except RuntimeError as err:
228             sys.exit("error: failed to deploy stack: '%s'" % err.args)
229         except Exception as err:
230             sys.exit("error: failed to deploy stack: '%s'" % err)
231
232         # copy some vital stack output into server objects
233         for server in self.servers:
234             if server.ports:
235                 # TODO(hafe) can only handle one internal network for now
236                 port = next(iter(server.ports.values()))
237                 server.private_ip = self.stack.outputs[port["stack_name"]]
238
239             if server.floating_ip:
240                 server.public_ip = \
241                     self.stack.outputs[server.floating_ip["stack_name"]]
242
243         print("Context '%s' deployed" % self.name)
244
245     def undeploy(self):
246         """undeploys stack from cloud"""
247         if self.stack:
248             print("Undeploying context '%s'" % self.name)
249             self.stack.delete()
250             self.stack = None
251             print("Context '%s' undeployed" % self.name)
252
253         if os.path.exists(self.key_filename):
254             try:
255                 os.remove(self.key_filename)
256                 os.remove(self.key_filename + ".pub")
257             except OSError:
258                 LOG.exception("Key filename %s", self.key_filename)
259
260     def _get_server(self, attr_name):
261         """lookup server info by name from context
262         attr_name: either a name for a server created by yardstick or a dict
263         with attribute name mapping when using external heat templates
264         """
265         key_filename = pkg_resources.resource_filename(
266             'yardstick.resources',
267             'files/yardstick_key-' + get_short_key_uuid(self.key_uuid))
268
269         if isinstance(attr_name, collections.Mapping):
270             cname = attr_name["name"].split(".")[1]
271             if cname != self.name:
272                 return None
273
274             public_ip = None
275             private_ip = None
276             if "public_ip_attr" in attr_name:
277                 public_ip = self.stack.outputs[attr_name["public_ip_attr"]]
278             if "private_ip_attr" in attr_name:
279                 private_ip = self.stack.outputs[
280                     attr_name["private_ip_attr"]]
281
282             # Create a dummy server instance for holding the *_ip attributes
283             server = Server(attr_name["name"].split(".")[0], self, {})
284             server.public_ip = public_ip
285             server.private_ip = private_ip
286         else:
287             if attr_name not in self._server_map:
288                 return None
289             server = self._server_map[attr_name]
290
291         if server is None:
292             return None
293
294         result = {
295             "user": server.context.user,
296             "key_filename": key_filename,
297             "private_ip": server.private_ip
298         }
299         # Target server may only have private_ip
300         if server.public_ip:
301             result["ip"] = server.public_ip
302
303         return result