8c514d250aaa17a1a28b557c8e345e1fe4eb48aa
[yardstick.git] / yardstick / benchmark / contexts / heat.py
1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 import sys
11 import pkg_resources
12
13 from yardstick.benchmark.contexts.base import Context
14 from yardstick.benchmark.contexts.model import Server
15 from yardstick.benchmark.contexts.model import PlacementGroup
16 from yardstick.benchmark.contexts.model import Network
17 from yardstick.benchmark.contexts.model import update_scheduler_hints
18 from yardstick.orchestrator.heat import HeatTemplate
19
20
21 class HeatContext(Context):
22     '''Class that represents a context in the logical model'''
23
24     __context_type__ = "Heat"
25
26     def __init__(self):
27         self.name = None
28         self.stack = None
29         self.networks = []
30         self.servers = []
31         self.placement_groups = []
32         self.keypair_name = None
33         self.secgroup_name = None
34         self._server_map = {}
35         self._image = None
36         self._flavor = None
37         self._user = None
38         self.template_file = None
39         self.heat_parameters = None
40         super(self.__class__, self).__init__()
41
42     def init(self, attrs):
43         '''initializes itself from the supplied arguments'''
44         self.name = attrs["name"]
45
46         if "user" in attrs:
47             self._user = attrs["user"]
48
49         if "heat_template" in attrs:
50             self.template_file = attrs["heat_template"]
51             self.heat_parameters = attrs.get("heat_parameters", None)
52             return
53
54         self.keypair_name = self.name + "-key"
55         self.secgroup_name = self.name + "-secgroup"
56
57         if "image" in attrs:
58             self._image = attrs["image"]
59
60         if "flavor" in attrs:
61             self._flavor = attrs["flavor"]
62
63         if "placement_groups" in attrs:
64             for name, pgattrs in attrs["placement_groups"].items():
65                 pg = PlacementGroup(name, self, pgattrs["policy"])
66                 self.placement_groups.append(pg)
67
68         for name, netattrs in attrs["networks"].items():
69             network = Network(name, self, netattrs)
70             self.networks.append(network)
71
72         for name, serverattrs in attrs["servers"].items():
73             server = Server(name, self, serverattrs)
74             self.servers.append(server)
75             self._server_map[server.dn] = server
76
77     @property
78     def image(self):
79         '''returns application's default image name'''
80         return self._image
81
82     @property
83     def flavor(self):
84         '''returns application's default flavor name'''
85         return self._flavor
86
87     @property
88     def user(self):
89         '''return login user name corresponding to image'''
90         return self._user
91
92     def _add_resources_to_template(self, template):
93         '''add to the template the resources represented by this context'''
94         template.add_keypair(self.keypair_name)
95         template.add_security_group(self.secgroup_name)
96
97         for network in self.networks:
98             template.add_network(network.stack_name)
99             template.add_subnet(network.subnet_stack_name, network.stack_name,
100                                 network.subnet_cidr)
101
102             if network.router:
103                 template.add_router(network.router.stack_name,
104                                     network.router.external_gateway_info,
105                                     network.subnet_stack_name)
106                 template.add_router_interface(network.router.stack_if_name,
107                                               network.router.stack_name,
108                                               network.subnet_stack_name)
109
110         # create a list of servers sorted by increasing no of placement groups
111         list_of_servers = sorted(self.servers,
112                                  key=lambda s: len(s.placement_groups))
113
114         #
115         # add servers with scheduler hints derived from placement groups
116         #
117
118         # create list of servers with availability policy
119         availability_servers = []
120         for server in list_of_servers:
121             for pg in server.placement_groups:
122                 if pg.policy == "availability":
123                     availability_servers.append(server)
124                     break
125
126         # add servers with availability policy
127         added_servers = []
128         for server in availability_servers:
129             scheduler_hints = {}
130             for pg in server.placement_groups:
131                 update_scheduler_hints(scheduler_hints, added_servers, pg)
132             # workround for openstack nova bug, check JIRA: YARDSTICK-200
133             # for details
134             if len(availability_servers) == 2:
135                 if len(scheduler_hints["different_host"]) == 0:
136                     scheduler_hints.pop("different_host", None)
137                     server.add_to_template(template,
138                                            self.networks,
139                                            scheduler_hints)
140                     added_servers.append(server.stack_name)
141                 else:
142                     scheduler_hints["different_host"] = \
143                         scheduler_hints["different_host"][0]
144                     server.add_to_template(template,
145                                            self.networks,
146                                            scheduler_hints)
147                     added_servers.append(server.stack_name)
148             else:
149                 server.add_to_template(template,
150                                        self.networks,
151                                        scheduler_hints)
152                 added_servers.append(server.stack_name)
153
154         # create list of servers with affinity policy
155         affinity_servers = []
156         for server in list_of_servers:
157             for pg in server.placement_groups:
158                 if pg.policy == "affinity":
159                     affinity_servers.append(server)
160                     break
161
162         # add servers with affinity policy
163         for server in affinity_servers:
164             if server.stack_name in added_servers:
165                 continue
166             scheduler_hints = {}
167             for pg in server.placement_groups:
168                 update_scheduler_hints(scheduler_hints, added_servers, pg)
169             server.add_to_template(template, self.networks, scheduler_hints)
170             added_servers.append(server.stack_name)
171
172         # add remaining servers with no placement group configured
173         for server in list_of_servers:
174             if len(server.placement_groups) == 0:
175                 server.add_to_template(template, self.networks, {})
176
177     def deploy(self):
178         '''deploys template into a stack using cloud'''
179         print "Deploying context '%s'" % self.name
180
181         heat_template = HeatTemplate(self.name, self.template_file,
182                                      self.heat_parameters)
183
184         if self.template_file is None:
185             self._add_resources_to_template(heat_template)
186
187         try:
188             self.stack = heat_template.create()
189         except KeyboardInterrupt:
190             sys.exit("\nStack create interrupted")
191         except RuntimeError as err:
192             sys.exit("error: failed to deploy stack: '%s'" % err.args)
193         except Exception as err:
194             sys.exit("error: failed to deploy stack: '%s'" % err)
195
196         # copy some vital stack output into server objects
197         for server in self.servers:
198             if len(server.ports) > 0:
199                 # TODO(hafe) can only handle one internal network for now
200                 port = server.ports.values()[0]
201                 server.private_ip = self.stack.outputs[port["stack_name"]]
202
203             if server.floating_ip:
204                 server.public_ip = \
205                     self.stack.outputs[server.floating_ip["stack_name"]]
206
207         print "Context '%s' deployed" % self.name
208
209     def undeploy(self):
210         '''undeploys stack from cloud'''
211         if self.stack:
212             print "Undeploying context '%s'" % self.name
213             self.stack.delete()
214             self.stack = None
215             print "Context '%s' undeployed" % self.name
216
217     def _get_server(self, attr_name):
218         '''lookup server info by name from context
219         attr_name: either a name for a server created by yardstick or a dict
220         with attribute name mapping when using external heat templates
221         '''
222         key_filename = pkg_resources.resource_filename(
223             'yardstick.resources', 'files/yardstick_key')
224
225         if type(attr_name) is dict:
226             cname = attr_name["name"].split(".")[1]
227             if cname != self.name:
228                 return None
229
230             public_ip = None
231             private_ip = None
232             if "public_ip_attr" in attr_name:
233                 public_ip = self.stack.outputs[attr_name["public_ip_attr"]]
234             if "private_ip_attr" in attr_name:
235                 private_ip = self.stack.outputs[
236                     attr_name["private_ip_attr"]]
237
238             # Create a dummy server instance for holding the *_ip attributes
239             server = Server(attr_name["name"].split(".")[0], self, {})
240             server.public_ip = public_ip
241             server.private_ip = private_ip
242         else:
243             if attr_name not in self._server_map:
244                 return None
245             server = self._server_map[attr_name]
246
247         if server is None:
248             return None
249
250         result = {
251             "user": server.context.user,
252             "key_filename": key_filename,
253             "private_ip": server.private_ip
254         }
255         # Target server may only have private_ip
256         if server.public_ip:
257             result["ip"] = server.public_ip
258
259         return result