Merge "Add SRIOV support"
[yardstick.git] / yardstick / benchmark / contexts / heat.py
1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 from __future__ import absolute_import
11 from __future__ import print_function
12
13 import collections
14 import logging
15 import os
16 import sys
17 import uuid
18
19 import paramiko
20 import pkg_resources
21
22 from yardstick.benchmark.contexts.base import Context
23 from yardstick.benchmark.contexts.model import Network
24 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
25 from yardstick.benchmark.contexts.model import Server
26 from yardstick.benchmark.contexts.model import update_scheduler_hints
27 from yardstick.orchestrator.heat import HeatTemplate, get_short_key_uuid
28 from yardstick.common.constants import YARDSTICK_ROOT_PATH
29
30 LOG = logging.getLogger(__name__)
31
32
33 class HeatContext(Context):
34     """Class that represents a context in the logical model"""
35
36     __context_type__ = "Heat"
37
38     def __init__(self):
39         self.name = None
40         self.stack = None
41         self.networks = []
42         self.servers = []
43         self.placement_groups = []
44         self.server_groups = []
45         self.keypair_name = None
46         self.secgroup_name = None
47         self._server_map = {}
48         self._image = None
49         self._flavor = None
50         self._user = None
51         self.template_file = None
52         self.heat_parameters = None
53         # generate an uuid to identify yardstick_key
54         # the first 8 digits of the uuid will be used
55         self.key_uuid = uuid.uuid4()
56         self.key_filename = ''.join(
57             [YARDSTICK_ROOT_PATH, 'yardstick/resources/files/yardstick_key-',
58              get_short_key_uuid(self.key_uuid)])
59         super(HeatContext, self).__init__()
60
61     def assign_external_network(self, networks):
62         sorted_networks = sorted(networks.items())
63         external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
64         have_external_network = [(name, net)
65                                  for name, net in sorted_networks if
66                                  net.get("external_network")]
67         # no external net defined, assign it to first network usig os.environ
68         if sorted_networks and not have_external_network:
69             sorted_networks[0][1]["external_network"] = external_network
70
71     def init(self, attrs):     # pragma: no cover
72         """initializes itself from the supplied arguments"""
73         self.name = attrs["name"]
74
75         self._user = attrs.get("user")
76
77         self.template_file = attrs.get("heat_template")
78         if self.template_file:
79             self.heat_parameters = attrs.get("heat_parameters")
80             return
81
82         self.keypair_name = self.name + "-key"
83         self.secgroup_name = self.name + "-secgroup"
84
85         self._image = attrs.get("image")
86
87         self._flavor = attrs.get("flavor")
88
89         self.placement_groups = [PlacementGroup(name, self, pgattrs["policy"])
90                                  for name, pgattrs in attrs.get(
91                                  "placement_groups", {}).items()]
92
93         self.server_groups = [ServerGroup(name, self, sgattrs["policy"])
94                               for name, sgattrs in attrs.get(
95                               "server_groups", {}).items()]
96
97         self.assign_external_network(attrs["networks"])
98
99         self.networks = [Network(name, self, netattrs) for name, netattrs in
100                          sorted(attrs["networks"].items())]
101
102         for name, serverattrs in attrs["servers"].items():
103             server = Server(name, self, serverattrs)
104             self.servers.append(server)
105             self._server_map[server.dn] = server
106
107         rsa_key = paramiko.RSAKey.generate(bits=2048, progress_func=None)
108         rsa_key.write_private_key_file(self.key_filename)
109         print("Writing %s ..." % self.key_filename)
110         with open(self.key_filename + ".pub", "w") as pubkey_file:
111             pubkey_file.write(
112                 "%s %s\n" % (rsa_key.get_name(), rsa_key.get_base64()))
113         del rsa_key
114
115     @property
116     def image(self):
117         """returns application's default image name"""
118         return self._image
119
120     @property
121     def flavor(self):
122         """returns application's default flavor name"""
123         return self._flavor
124
125     @property
126     def user(self):
127         """return login user name corresponding to image"""
128         return self._user
129
130     def _add_resources_to_template(self, template):
131         """add to the template the resources represented by this context"""
132         template.add_keypair(self.keypair_name, self.key_uuid)
133         template.add_security_group(self.secgroup_name)
134
135         for network in self.networks:
136             template.add_network(network.stack_name,
137                                  network.physical_network,
138                                  network.provider)
139             template.add_subnet(network.subnet_stack_name,
140                                 network.stack_name,
141                                 network.subnet_cidr)
142
143             if network.router:
144                 template.add_router(network.router.stack_name,
145                                     network.router.external_gateway_info,
146                                     network.subnet_stack_name)
147                 template.add_router_interface(network.router.stack_if_name,
148                                               network.router.stack_name,
149                                               network.subnet_stack_name)
150
151         # create a list of servers sorted by increasing no of placement groups
152         list_of_servers = sorted(self.servers,
153                                  key=lambda s: len(s.placement_groups))
154
155         #
156         # add servers with scheduler hints derived from placement groups
157         #
158
159         # create list of servers with availability policy
160         availability_servers = []
161         for server in list_of_servers:
162             for pg in server.placement_groups:
163                 if pg.policy == "availability":
164                     availability_servers.append(server)
165                     break
166
167         # add servers with availability policy
168         added_servers = []
169         for server in availability_servers:
170             scheduler_hints = {}
171             for pg in server.placement_groups:
172                 update_scheduler_hints(scheduler_hints, added_servers, pg)
173             # workround for openstack nova bug, check JIRA: YARDSTICK-200
174             # for details
175             if len(availability_servers) == 2:
176                 if not scheduler_hints["different_host"]:
177                     scheduler_hints.pop("different_host", None)
178                     server.add_to_template(template,
179                                            self.networks,
180                                            scheduler_hints)
181                 else:
182                     scheduler_hints["different_host"] = \
183                         scheduler_hints["different_host"][0]
184                     server.add_to_template(template,
185                                            self.networks,
186                                            scheduler_hints)
187             else:
188                 server.add_to_template(template,
189                                        self.networks,
190                                        scheduler_hints)
191             added_servers.append(server.stack_name)
192
193         # create list of servers with affinity policy
194         affinity_servers = []
195         for server in list_of_servers:
196             for pg in server.placement_groups:
197                 if pg.policy == "affinity":
198                     affinity_servers.append(server)
199                     break
200
201         # add servers with affinity policy
202         for server in affinity_servers:
203             if server.stack_name in added_servers:
204                 continue
205             scheduler_hints = {}
206             for pg in server.placement_groups:
207                 update_scheduler_hints(scheduler_hints, added_servers, pg)
208             server.add_to_template(template, self.networks, scheduler_hints)
209             added_servers.append(server.stack_name)
210
211         # add server group
212         for sg in self.server_groups:
213             template.add_server_group(sg.name, sg.policy)
214
215         # add remaining servers with no placement group configured
216         for server in list_of_servers:
217             # TODO placement_group and server_group should combine
218             if not server.placement_groups:
219                 scheduler_hints = {}
220                 # affinity/anti-aff server group
221                 sg = server.server_group
222                 if sg:
223                     scheduler_hints["group"] = {'get_resource': sg.name}
224                 server.add_to_template(template,
225                                        self.networks, scheduler_hints)
226
227     def deploy(self):
228         """deploys template into a stack using cloud"""
229         print("Deploying context '%s'" % self.name)
230
231         heat_template = HeatTemplate(self.name, self.template_file,
232                                      self.heat_parameters)
233
234         if self.template_file is None:
235             self._add_resources_to_template(heat_template)
236
237         try:
238             self.stack = heat_template.create()
239         except KeyboardInterrupt:
240             sys.exit("\nStack create interrupted")
241         except RuntimeError as err:
242             sys.exit("error: failed to deploy stack: '%s'" % err.args)
243         except Exception as err:
244             sys.exit("error: failed to deploy stack: '%s'" % err)
245
246         # copy some vital stack output into server objects
247         for server in self.servers:
248             if server.ports:
249                 # TODO(hafe) can only handle one internal network for now
250                 port = next(iter(server.ports.values()))
251                 server.private_ip = self.stack.outputs[port["stack_name"]]
252
253             if server.floating_ip:
254                 server.public_ip = \
255                     self.stack.outputs[server.floating_ip["stack_name"]]
256
257         print("Context '%s' deployed" % self.name)
258
259     def undeploy(self):
260         """undeploys stack from cloud"""
261         if self.stack:
262             print("Undeploying context '%s'" % self.name)
263             self.stack.delete()
264             self.stack = None
265             print("Context '%s' undeployed" % self.name)
266
267         if os.path.exists(self.key_filename):
268             try:
269                 os.remove(self.key_filename)
270                 os.remove(self.key_filename + ".pub")
271             except OSError:
272                 LOG.exception("Key filename %s", self.key_filename)
273
274         super(HeatContext, self).undeploy()
275
276     def _get_server(self, attr_name):
277         """lookup server info by name from context
278         attr_name: either a name for a server created by yardstick or a dict
279         with attribute name mapping when using external heat templates
280         """
281         key_filename = pkg_resources.resource_filename(
282             'yardstick.resources',
283             'files/yardstick_key-' + get_short_key_uuid(self.key_uuid))
284
285         if isinstance(attr_name, collections.Mapping):
286             cname = attr_name["name"].split(".")[1]
287             if cname != self.name:
288                 return None
289
290             public_ip = None
291             private_ip = None
292             if "public_ip_attr" in attr_name:
293                 public_ip = self.stack.outputs[attr_name["public_ip_attr"]]
294             if "private_ip_attr" in attr_name:
295                 private_ip = self.stack.outputs[
296                     attr_name["private_ip_attr"]]
297
298             # Create a dummy server instance for holding the *_ip attributes
299             server = Server(attr_name["name"].split(".")[0], self, {})
300             server.public_ip = public_ip
301             server.private_ip = private_ip
302         else:
303             if attr_name not in self._server_map:
304                 return None
305             server = self._server_map[attr_name]
306
307         if server is None:
308             return None
309
310         result = {
311             "user": server.context.user,
312             "key_filename": key_filename,
313             "private_ip": server.private_ip
314         }
315         # Target server may only have private_ip
316         if server.public_ip:
317             result["ip"] = server.public_ip
318
319         return result