1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
10 from __future__ import absolute_import
11 from __future__ import print_function
17 from collections import OrderedDict
22 from yardstick.benchmark.contexts.base import Context
23 from yardstick.benchmark.contexts.model import Network
24 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
25 from yardstick.benchmark.contexts.model import Server
26 from yardstick.benchmark.contexts.model import update_scheduler_hints
27 from yardstick.common import exceptions as y_exc
28 from yardstick.common.openstack_utils import get_neutron_client
29 from yardstick.orchestrator.heat import HeatStack
30 from yardstick.orchestrator.heat import HeatTemplate
31 from yardstick.common import constants as consts
32 from yardstick.common import utils
33 from yardstick.common.utils import source_env
34 from yardstick.ssh import SSH
36 LOG = logging.getLogger(__name__)
38 DEFAULT_HEAT_TIMEOUT = 3600
41 def join_args(sep, *args):
49 class HeatContext(Context):
50 """Class that represents a context in the logical model"""
52 __context_type__ = "Heat"
56 self.networks = OrderedDict()
57 self.heat_timeout = None
59 self.placement_groups = []
60 self.server_groups = []
61 self.keypair_name = None
62 self.secgroup_name = None
69 self.template_file = None
70 self.heat_parameters = None
71 self.neutron_client = None
72 self.heat_timeout = None
73 self.key_filename = None
74 super(HeatContext, self).__init__()
77 def assign_external_network(networks):
78 sorted_networks = sorted(networks.items())
79 external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
81 have_external_network = any(net.get("external_network") for net in networks.values())
82 if not have_external_network:
83 # try looking for mgmt network first
85 networks['mgmt']["external_network"] = external_network
88 # otherwise assign it to first network using os.environ
89 sorted_networks[0][1]["external_network"] = external_network
91 return sorted_networks
93 def init(self, attrs):
94 """Initializes itself from the supplied arguments"""
95 super(HeatContext, self).init(attrs)
97 self.check_environment()
98 self._user = attrs.get("user")
100 self.template_file = attrs.get("heat_template")
102 self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
103 if self.template_file:
104 self.heat_parameters = attrs.get("heat_parameters")
107 self.keypair_name = h_join(self.name, "key")
108 self.secgroup_name = h_join(self.name, "secgroup")
110 self._image = attrs.get("image")
112 self._flavor = attrs.get("flavor")
114 self.placement_groups = [PlacementGroup(name, self, pg_attrs["policy"])
115 for name, pg_attrs in attrs.get(
116 "placement_groups", {}).items()]
118 self.server_groups = [ServerGroup(name, self, sg_attrs["policy"])
119 for name, sg_attrs in attrs.get(
120 "server_groups", {}).items()]
122 # we have to do this first, because we are injecting external_network
124 sorted_networks = self.assign_external_network(attrs["networks"])
126 self.networks = OrderedDict(
127 (name, Network(name, self, net_attrs)) for name, net_attrs in
130 for name, server_attrs in sorted(attrs["servers"].items()):
131 server = Server(name, self, server_attrs)
132 self.servers.append(server)
133 self._server_map[server.dn] = server
137 self.key_filename = ''.join(
138 [consts.YARDSTICK_ROOT_PATH,
139 'yardstick/resources/files/yardstick_key-',
141 # Permissions may have changed since creation; this can be fixed. If we
142 # overwrite the file, we lose future access to VMs using this key.
143 # As long as the file exists, even if it is unreadable, keep it intact
144 if not os.path.exists(self.key_filename):
145 SSH.gen_keys(self.key_filename)
147 def check_environment(self):
149 os.environ['OS_AUTH_URL']
152 source_env(consts.OPENRC)
154 if e.errno != errno.EEXIST:
155 LOG.error('OPENRC file not found')
158 LOG.error('OS_AUTH_URL not found')
162 """returns application's default image name"""
167 """returns application's default flavor name"""
172 """return login user name corresponding to image"""
175 def _add_resources_to_template(self, template):
176 """add to the template the resources represented by this context"""
179 if isinstance(self.flavor, dict):
180 flavor = self.flavor.setdefault("name", self.name + "-flavor")
181 template.add_flavor(**self.flavor)
182 self.flavors.add(flavor)
184 template.add_keypair(self.keypair_name, self.name)
185 template.add_security_group(self.secgroup_name)
187 for network in self.networks.values():
188 template.add_network(network.stack_name,
189 network.physical_network,
191 network.segmentation_id,
192 network.port_security_enabled,
193 network.network_type)
194 template.add_subnet(network.subnet_stack_name, network.stack_name,
200 template.add_router(network.router.stack_name,
201 network.router.external_gateway_info,
202 network.subnet_stack_name)
203 template.add_router_interface(network.router.stack_if_name,
204 network.router.stack_name,
205 network.subnet_stack_name)
207 # create a list of servers sorted by increasing no of placement groups
208 list_of_servers = sorted(self.servers,
209 key=lambda s: len(s.placement_groups))
212 # add servers with scheduler hints derived from placement groups
215 # create list of servers with availability policy
216 availability_servers = []
217 for server in list_of_servers:
218 for pg in server.placement_groups:
219 if pg.policy == "availability":
220 availability_servers.append(server)
223 for server in availability_servers:
224 if isinstance(server.flavor, dict):
226 self.flavors.add(server.flavor["name"])
228 self.flavors.add(h_join(server.stack_name, "flavor"))
230 # add servers with availability policy
232 for server in availability_servers:
234 for pg in server.placement_groups:
235 update_scheduler_hints(scheduler_hints, added_servers, pg)
236 # workaround for openstack nova bug, check JIRA: YARDSTICK-200
238 if len(availability_servers) == 2:
239 if not scheduler_hints["different_host"]:
240 scheduler_hints.pop("different_host", None)
241 server.add_to_template(template,
242 list(self.networks.values()),
245 scheduler_hints["different_host"] = \
246 scheduler_hints["different_host"][0]
247 server.add_to_template(template,
248 list(self.networks.values()),
251 server.add_to_template(template,
252 list(self.networks.values()),
254 added_servers.append(server.stack_name)
256 # create list of servers with affinity policy
257 affinity_servers = []
258 for server in list_of_servers:
259 for pg in server.placement_groups:
260 if pg.policy == "affinity":
261 affinity_servers.append(server)
264 # add servers with affinity policy
265 for server in affinity_servers:
266 if server.stack_name in added_servers:
269 for pg in server.placement_groups:
270 update_scheduler_hints(scheduler_hints, added_servers, pg)
271 server.add_to_template(template, list(self.networks.values()),
273 added_servers.append(server.stack_name)
276 for sg in self.server_groups:
277 template.add_server_group(sg.name, sg.policy)
279 # add remaining servers with no placement group configured
280 for server in list_of_servers:
281 # TODO placement_group and server_group should combine
282 if not server.placement_groups:
284 # affinity/anti-aff server group
285 sg = server.server_group
287 scheduler_hints["group"] = {'get_resource': sg.name}
288 server.add_to_template(template,
289 list(self.networks.values()),
292 def get_neutron_info(self):
293 if not self.neutron_client:
294 self.neutron_client = get_neutron_client()
296 networks = self.neutron_client.list_networks()
297 for network in self.networks.values():
298 for neutron_net in networks['networks']:
299 if neutron_net['name'] == network.stack_name:
300 network.segmentation_id = neutron_net.get('provider:segmentation_id')
301 # we already have physical_network
302 # network.physical_network = neutron_net.get('provider:physical_network')
303 network.network_type = neutron_net.get('provider:network_type')
304 network.neutron_info = neutron_net
306 def _create_new_stack(self, heat_template):
308 return heat_template.create(block=True,
309 timeout=self.heat_timeout)
310 except KeyboardInterrupt:
311 raise y_exc.StackCreationInterrupt
313 LOG.exception("stack failed")
314 # let the other failures happen, we want stack trace
317 def _retrieve_existing_stack(self, stack_name):
318 stack = HeatStack(stack_name)
322 LOG.warning("Stack %s does not exist", self.name)
326 """deploys template into a stack using cloud"""
327 LOG.info("Deploying context '%s' START", self.name)
329 heat_template = HeatTemplate(self.name, self.template_file,
330 self.heat_parameters)
332 if self.template_file is None:
333 self._add_resources_to_template(heat_template)
335 if self._flags.no_setup:
336 # Try to get an existing stack, returns a stack or None
337 self.stack = self._retrieve_existing_stack(self.name)
339 self.stack = self._create_new_stack(heat_template)
342 self.stack = self._create_new_stack(heat_template)
344 # TODO: use Neutron to get segmentation-id
345 self.get_neutron_info()
347 # copy some vital stack output into server objects
348 for server in self.servers:
350 self.add_server_port(server)
352 if server.floating_ip:
354 self.stack.outputs[server.floating_ip["stack_name"]]
356 LOG.info("Deploying context '%s' DONE", self.name)
358 def add_server_port(self, server):
359 # use private ip from first port in first network
361 private_port = next(iter(server.ports.values()))[0]
363 LOG.exception("Unable to find first private port in %s", server.ports)
365 server.private_ip = self.stack.outputs[private_port["stack_name"]]
366 server.interfaces = {}
367 for network_name, ports in server.ports.items():
369 # port['port'] is either port name from mapping or default network_name
370 server.interfaces[port['port']] = self.make_interface_dict(network_name,
374 server.override_ip(network_name, port)
376 def make_interface_dict(self, network_name, port, stack_name, outputs):
377 private_ip = outputs[stack_name]
378 mac_address = outputs[h_join(stack_name, "mac_address")]
379 # these are attributes of the network, not the port
380 output_subnet_cidr = outputs[h_join(self.name, network_name,
383 # these are attributes of the network, not the port
384 output_subnet_gateway = outputs[h_join(self.name, network_name,
385 'subnet', 'gateway_ip')]
388 # add default port name
390 "private_ip": private_ip,
391 "subnet_id": outputs[h_join(stack_name, "subnet_id")],
392 "subnet_cidr": output_subnet_cidr,
393 "network": str(ipaddress.ip_network(output_subnet_cidr).network_address),
394 "netmask": str(ipaddress.ip_network(output_subnet_cidr).netmask),
395 "gateway_ip": output_subnet_gateway,
396 "mac_address": mac_address,
397 "device_id": outputs[h_join(stack_name, "device_id")],
398 "network_id": outputs[h_join(stack_name, "network_id")],
399 # this should be == vld_id for NSB tests
400 "network_name": network_name,
401 # to match vnf_generic
402 "local_mac": mac_address,
403 "local_ip": private_ip,
406 def _delete_key_file(self):
408 utils.remove_file(self.key_filename)
409 utils.remove_file(self.key_filename + ".pub")
411 LOG.exception("There was an error removing the key file %s",
415 """undeploys stack from cloud"""
416 if self._flags.no_teardown:
417 LOG.info("Undeploying context '%s' SKIP", self.name)
421 LOG.info("Undeploying context '%s' START", self.name)
424 LOG.info("Undeploying context '%s' DONE", self.name)
426 self._delete_key_file()
428 super(HeatContext, self).undeploy()
431 def generate_routing_table(server):
434 "network": intf["network"],
435 "netmask": intf["netmask"],
437 # We have to encode a None gateway as '' for Jinja2 to YAML conversion
438 "gateway": intf["gateway_ip"] if intf["gateway_ip"] else '',
440 for name, intf in server.interfaces.items()
444 def _get_server(self, attr_name):
445 """lookup server info by name from context
446 attr_name: either a name for a server created by yardstick or a dict
447 with attribute name mapping when using external heat templates
449 if isinstance(attr_name, collections.Mapping):
450 node_name, cname = self.split_name(attr_name['name'])
451 if cname is None or cname != self.name:
454 # Create a dummy server instance for holding the *_ip attributes
455 server = Server(node_name, self, {})
456 server.public_ip = self.stack.outputs.get(
457 attr_name.get("public_ip_attr", object()), None)
459 server.private_ip = self.stack.outputs.get(
460 attr_name.get("private_ip_attr", object()), None)
463 server = self._server_map[attr_name]
465 attr_name_no_suffix = attr_name.split("-")[0]
466 server = self._server_map.get(attr_name_no_suffix, None)
470 pkey = pkg_resources.resource_string(
471 'yardstick.resources',
472 h_join('files/yardstick_key', self.name)).decode('utf-8')
475 "user": server.context.user,
477 "private_ip": server.private_ip,
478 "interfaces": server.interfaces,
479 "routing_table": self.generate_routing_table(server),
480 # empty IPv6 routing table
482 # we want to save the contex name so we can generate pod.yaml
485 # Target server may only have private_ip
487 result["ip"] = server.public_ip
491 def _get_network(self, attr_name):
492 if not isinstance(attr_name, collections.Mapping):
493 network = self.networks.get(attr_name, None)
496 # Only take the first key, value
497 key, value = next(iter(attr_name.items()), (None, None))
500 network_iter = (n for n in self.networks.values() if getattr(n, key) == value)
501 network = next(network_iter, None)
507 "name": network.name,
508 "segmentation_id": network.segmentation_id,
509 "network_type": network.network_type,
510 "physical_network": network.physical_network,