1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
10 from __future__ import absolute_import
11 from __future__ import print_function
18 from collections import OrderedDict
23 from yardstick.benchmark.contexts.base import Context
24 from yardstick.benchmark.contexts.model import Network
25 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
26 from yardstick.benchmark.contexts.model import Server
27 from yardstick.benchmark.contexts.model import update_scheduler_hints
28 from yardstick.common.openstack_utils import get_neutron_client
29 from yardstick.orchestrator.heat import HeatTemplate, get_short_key_uuid
30 from yardstick.common import constants as consts
31 from yardstick.common.utils import source_env
32 from yardstick.ssh import SSH
34 LOG = logging.getLogger(__name__)
36 DEFAULT_HEAT_TIMEOUT = 3600
39 def join_args(sep, *args):
47 class HeatContext(Context):
48 """Class that represents a context in the logical model"""
50 __context_type__ = "Heat"
55 self.networks = OrderedDict()
56 self.heat_timeout = None
58 self.placement_groups = []
59 self.server_groups = []
60 self.keypair_name = None
61 self.secgroup_name = None
68 self.template_file = None
69 self.heat_parameters = None
70 self.neutron_client = None
71 # generate an uuid to identify yardstick_key
72 # the first 8 digits of the uuid will be used
73 self.key_uuid = uuid.uuid4()
74 self.heat_timeout = None
75 self.key_filename = ''.join(
76 [consts.YARDSTICK_ROOT_PATH, 'yardstick/resources/files/yardstick_key-',
77 get_short_key_uuid(self.key_uuid)])
78 super(HeatContext, self).__init__()
81 def assign_external_network(networks):
82 sorted_networks = sorted(networks.items())
83 external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
85 have_external_network = any(net.get("external_network") for net in networks.values())
86 if not have_external_network:
87 # try looking for mgmt network first
89 networks['mgmt']["external_network"] = external_network
92 # otherwise assign it to first network using os.environ
93 sorted_networks[0][1]["external_network"] = external_network
95 return sorted_networks
97 def init(self, attrs):
98 self.check_environment()
99 """initializes itself from the supplied arguments"""
100 self.name = attrs["name"]
102 self._user = attrs.get("user")
104 self.template_file = attrs.get("heat_template")
105 if self.template_file:
106 self.heat_parameters = attrs.get("heat_parameters")
109 self.keypair_name = h_join(self.name, "key")
110 self.secgroup_name = h_join(self.name, "secgroup")
112 self._image = attrs.get("image")
114 self._flavor = attrs.get("flavor")
116 self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
118 self.placement_groups = [PlacementGroup(name, self, pg_attrs["policy"])
119 for name, pg_attrs in attrs.get(
120 "placement_groups", {}).items()]
122 self.server_groups = [ServerGroup(name, self, sg_attrs["policy"])
123 for name, sg_attrs in attrs.get(
124 "server_groups", {}).items()]
126 # we have to do this first, because we are injecting external_network
128 sorted_networks = self.assign_external_network(attrs["networks"])
130 self.networks = OrderedDict(
131 (name, Network(name, self, net_attrs)) for name, net_attrs in
134 for name, server_attrs in sorted(attrs["servers"].items()):
135 server = Server(name, self, server_attrs)
136 self.servers.append(server)
137 self._server_map[server.dn] = server
140 SSH.gen_keys(self.key_filename)
142 def check_environment(self):
144 os.environ['OS_AUTH_URL']
147 source_env(consts.OPENRC)
149 if e.errno != errno.EEXIST:
150 LOG.error('OPENRC file not found')
153 LOG.error('OS_AUTH_URL not found')
157 """returns application's default image name"""
162 """returns application's default flavor name"""
167 """return login user name corresponding to image"""
170 def _add_resources_to_template(self, template):
171 """add to the template the resources represented by this context"""
174 if isinstance(self.flavor, dict):
175 flavor = self.flavor.setdefault("name", self.name + "-flavor")
176 template.add_flavor(**self.flavor)
177 self.flavors.add(flavor)
179 template.add_keypair(self.keypair_name, self.key_uuid)
180 template.add_security_group(self.secgroup_name)
182 for network in self.networks.values():
183 template.add_network(network.stack_name,
184 network.physical_network,
186 network.segmentation_id,
187 network.port_security_enabled,
188 network.network_type)
189 template.add_subnet(network.subnet_stack_name, network.stack_name,
195 template.add_router(network.router.stack_name,
196 network.router.external_gateway_info,
197 network.subnet_stack_name)
198 template.add_router_interface(network.router.stack_if_name,
199 network.router.stack_name,
200 network.subnet_stack_name)
202 # create a list of servers sorted by increasing no of placement groups
203 list_of_servers = sorted(self.servers,
204 key=lambda s: len(s.placement_groups))
207 # add servers with scheduler hints derived from placement groups
210 # create list of servers with availability policy
211 availability_servers = []
212 for server in list_of_servers:
213 for pg in server.placement_groups:
214 if pg.policy == "availability":
215 availability_servers.append(server)
218 for server in availability_servers:
219 if isinstance(server.flavor, dict):
221 self.flavors.add(server.flavor["name"])
223 self.flavors.add(h_join(server.stack_name, "flavor"))
225 # add servers with availability policy
227 for server in availability_servers:
229 for pg in server.placement_groups:
230 update_scheduler_hints(scheduler_hints, added_servers, pg)
231 # workaround for openstack nova bug, check JIRA: YARDSTICK-200
233 if len(availability_servers) == 2:
234 if not scheduler_hints["different_host"]:
235 scheduler_hints.pop("different_host", None)
236 server.add_to_template(template,
237 list(self.networks.values()),
240 scheduler_hints["different_host"] = \
241 scheduler_hints["different_host"][0]
242 server.add_to_template(template,
243 list(self.networks.values()),
246 server.add_to_template(template,
247 list(self.networks.values()),
249 added_servers.append(server.stack_name)
251 # create list of servers with affinity policy
252 affinity_servers = []
253 for server in list_of_servers:
254 for pg in server.placement_groups:
255 if pg.policy == "affinity":
256 affinity_servers.append(server)
259 # add servers with affinity policy
260 for server in affinity_servers:
261 if server.stack_name in added_servers:
264 for pg in server.placement_groups:
265 update_scheduler_hints(scheduler_hints, added_servers, pg)
266 server.add_to_template(template, list(self.networks.values()),
268 added_servers.append(server.stack_name)
271 for sg in self.server_groups:
272 template.add_server_group(sg.name, sg.policy)
274 # add remaining servers with no placement group configured
275 for server in list_of_servers:
276 # TODO placement_group and server_group should combine
277 if not server.placement_groups:
279 # affinity/anti-aff server group
280 sg = server.server_group
282 scheduler_hints["group"] = {'get_resource': sg.name}
283 server.add_to_template(template,
284 list(self.networks.values()),
287 def get_neutron_info(self):
288 if not self.neutron_client:
289 self.neutron_client = get_neutron_client()
291 networks = self.neutron_client.list_networks()
292 for network in self.networks.values():
293 for neutron_net in networks['networks']:
294 if neutron_net['name'] == network.stack_name:
295 network.segmentation_id = neutron_net.get('provider:segmentation_id')
296 # we already have physical_network
297 # network.physical_network = neutron_net.get('provider:physical_network')
298 network.network_type = neutron_net.get('provider:network_type')
299 network.neutron_info = neutron_net
302 """deploys template into a stack using cloud"""
303 LOG.info("Deploying context '%s' START", self.name)
305 heat_template = HeatTemplate(self.name, self.template_file,
306 self.heat_parameters)
308 if self.template_file is None:
309 self._add_resources_to_template(heat_template)
312 self.stack = heat_template.create(block=True,
313 timeout=self.heat_timeout)
314 except KeyboardInterrupt:
315 raise SystemExit("\nStack create interrupted")
317 LOG.exception("stack failed")
318 # let the other failures happen, we want stack trace
321 # TODO: use Neutron to get segmentation-id
322 self.get_neutron_info()
324 # copy some vital stack output into server objects
325 for server in self.servers:
327 self.add_server_port(server)
329 if server.floating_ip:
331 self.stack.outputs[server.floating_ip["stack_name"]]
333 LOG.info("Deploying context '%s' DONE", self.name)
335 def add_server_port(self, server):
336 # use private ip from first port in first network
338 private_port = next(iter(server.ports.values()))[0]
340 LOG.exception("Unable to find first private port in %s", server.ports)
342 server.private_ip = self.stack.outputs[private_port["stack_name"]]
343 server.interfaces = {}
344 for network_name, ports in server.ports.items():
346 # port['port'] is either port name from mapping or default network_name
347 server.interfaces[port['port']] = self.make_interface_dict(network_name,
352 def make_interface_dict(self, network_name, port, stack_name, outputs):
353 private_ip = outputs[stack_name]
354 mac_address = outputs[h_join(stack_name, "mac_address")]
355 # these are attributes of the network, not the port
356 output_subnet_cidr = outputs[h_join(self.name, network_name,
359 # these are attributes of the network, not the port
360 output_subnet_gateway = outputs[h_join(self.name, network_name,
361 'subnet', 'gateway_ip')]
364 "private_ip": private_ip,
365 "subnet_id": outputs[h_join(stack_name, "subnet_id")],
366 "subnet_cidr": output_subnet_cidr,
367 "network": str(ipaddress.ip_network(output_subnet_cidr).network_address),
368 "netmask": str(ipaddress.ip_network(output_subnet_cidr).netmask),
369 "gateway_ip": output_subnet_gateway,
370 "mac_address": mac_address,
371 "device_id": outputs[h_join(stack_name, "device_id")],
372 "network_id": outputs[h_join(stack_name, "network_id")],
373 # this should be == vld_id for NSB tests
374 "network_name": network_name,
375 # to match vnf_generic
376 "local_mac": mac_address,
377 "local_ip": private_ip,
381 """undeploys stack from cloud"""
383 LOG.info("Undeploying context '%s' START", self.name)
386 LOG.info("Undeploying context '%s' DONE", self.name)
388 if os.path.exists(self.key_filename):
390 os.remove(self.key_filename)
391 os.remove(self.key_filename + ".pub")
393 LOG.exception("Key filename %s", self.key_filename)
395 super(HeatContext, self).undeploy()
398 def generate_routing_table(server):
401 "network": intf["network"],
402 "netmask": intf["netmask"],
404 # We have to encode a None gateway as '' for Jinja2 to YAML conversion
405 "gateway": intf["gateway_ip"] if intf["gateway_ip"] else '',
407 for name, intf in server.interfaces.items()
411 def _get_server(self, attr_name):
412 """lookup server info by name from context
413 attr_name: either a name for a server created by yardstick or a dict
414 with attribute name mapping when using external heat templates
416 if isinstance(attr_name, collections.Mapping):
417 node_name, cname = self.split_name(attr_name['name'])
418 if cname is None or cname != self.name:
421 # Create a dummy server instance for holding the *_ip attributes
422 server = Server(node_name, self, {})
423 server.public_ip = self.stack.outputs.get(
424 attr_name.get("public_ip_attr", object()), None)
426 server.private_ip = self.stack.outputs.get(
427 attr_name.get("private_ip_attr", object()), None)
429 server = self._server_map.get(attr_name, None)
433 pkey = pkg_resources.resource_string(
434 'yardstick.resources',
435 h_join('files/yardstick_key', get_short_key_uuid(self.key_uuid))).decode('utf-8')
438 "user": server.context.user,
440 "private_ip": server.private_ip,
441 "interfaces": server.interfaces,
442 "routing_table": self.generate_routing_table(server),
443 # empty IPv6 routing table
445 # we want to save the contex name so we can generate pod.yaml
448 # Target server may only have private_ip
450 result["ip"] = server.public_ip
454 def _get_network(self, attr_name):
455 if not isinstance(attr_name, collections.Mapping):
456 network = self.networks.get(attr_name, None)
459 # Only take the first key, value
460 key, value = next(iter(attr_name.items()), (None, None))
463 network_iter = (n for n in self.networks.values() if getattr(n, key) == value)
464 network = next(network_iter, None)
470 "name": network.name,
471 "segmentation_id": network.segmentation_id,
472 "network_type": network.network_type,
473 "physical_network": network.physical_network,