1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
10 from __future__ import absolute_import
11 from __future__ import print_function
17 from collections import OrderedDict
22 from yardstick.benchmark.contexts.base import Context
23 from yardstick.benchmark.contexts.model import Network
24 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
25 from yardstick.benchmark.contexts.model import Server
26 from yardstick.benchmark.contexts.model import update_scheduler_hints
27 from yardstick.common import exceptions as y_exc
28 from yardstick.common.openstack_utils import get_shade_client
29 from yardstick.orchestrator.heat import HeatStack
30 from yardstick.orchestrator.heat import HeatTemplate
31 from yardstick.common import constants as consts
32 from yardstick.common import utils
33 from yardstick.common.utils import source_env
34 from yardstick.ssh import SSH
36 LOG = logging.getLogger(__name__)
38 DEFAULT_HEAT_TIMEOUT = 3600
41 def join_args(sep, *args):
49 class HeatContext(Context):
50 """Class that represents a context in the logical model"""
52 __context_type__ = "Heat"
56 self.networks = OrderedDict()
57 self.heat_timeout = None
59 self.placement_groups = []
60 self.server_groups = []
61 self.keypair_name = None
62 self.secgroup_name = None
69 self.template_file = None
70 self.heat_parameters = None
71 self.shade_client = None
72 self.heat_timeout = None
73 self.key_filename = None
74 super(HeatContext, self).__init__()
77 def assign_external_network(networks):
78 sorted_networks = sorted(networks.items())
79 external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
81 have_external_network = any(net.get("external_network") for net in networks.values())
82 if not have_external_network:
83 # try looking for mgmt network first
85 networks['mgmt']["external_network"] = external_network
88 # otherwise assign it to first network using os.environ
89 sorted_networks[0][1]["external_network"] = external_network
91 return sorted_networks
93 def init(self, attrs):
94 """Initializes itself from the supplied arguments"""
95 super(HeatContext, self).init(attrs)
97 self.check_environment()
98 self._user = attrs.get("user")
100 self.template_file = attrs.get("heat_template")
102 self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
103 if self.template_file:
104 self.heat_parameters = attrs.get("heat_parameters")
107 self.keypair_name = h_join(self.name, "key")
108 self.secgroup_name = h_join(self.name, "secgroup")
110 self._image = attrs.get("image")
112 self._flavor = attrs.get("flavor")
114 self.placement_groups = [PlacementGroup(name, self, pg_attrs["policy"])
115 for name, pg_attrs in attrs.get(
116 "placement_groups", {}).items()]
118 self.server_groups = [ServerGroup(name, self, sg_attrs["policy"])
119 for name, sg_attrs in attrs.get(
120 "server_groups", {}).items()]
122 # we have to do this first, because we are injecting external_network
124 sorted_networks = self.assign_external_network(attrs["networks"])
126 self.networks = OrderedDict(
127 (name, Network(name, self, net_attrs)) for name, net_attrs in
130 for name, server_attrs in sorted(attrs["servers"].items()):
131 server = Server(name, self, server_attrs)
132 self.servers.append(server)
133 self._server_map[server.dn] = server
137 self.key_filename = ''.join(
138 [consts.YARDSTICK_ROOT_PATH,
139 'yardstick/resources/files/yardstick_key-',
141 # Permissions may have changed since creation; this can be fixed. If we
142 # overwrite the file, we lose future access to VMs using this key.
143 # As long as the file exists, even if it is unreadable, keep it intact
144 if not os.path.exists(self.key_filename):
145 SSH.gen_keys(self.key_filename)
147 def check_environment(self):
149 os.environ['OS_AUTH_URL']
152 source_env(consts.OPENRC)
154 if e.errno != errno.EEXIST:
155 LOG.error('OPENRC file not found')
158 LOG.error('OS_AUTH_URL not found')
162 """returns application's default image name"""
167 """returns application's default flavor name"""
172 """return login user name corresponding to image"""
175 def _add_resources_to_template(self, template):
176 """add to the template the resources represented by this context"""
179 if isinstance(self.flavor, dict):
180 flavor = self.flavor.setdefault("name", self.name + "-flavor")
181 template.add_flavor(**self.flavor)
182 self.flavors.add(flavor)
184 template.add_keypair(self.keypair_name, self.name)
185 template.add_security_group(self.secgroup_name)
187 for network in self.networks.values():
188 template.add_network(network.stack_name,
189 network.physical_network,
191 network.segmentation_id,
192 network.port_security_enabled,
193 network.network_type)
194 template.add_subnet(network.subnet_stack_name, network.stack_name,
200 template.add_router(network.router.stack_name,
201 network.router.external_gateway_info,
202 network.subnet_stack_name)
203 template.add_router_interface(network.router.stack_if_name,
204 network.router.stack_name,
205 network.subnet_stack_name)
207 # create a list of servers sorted by increasing no of placement groups
208 list_of_servers = sorted(self.servers,
209 key=lambda s: len(s.placement_groups))
212 # add servers with scheduler hints derived from placement groups
215 # create list of servers with availability policy
216 availability_servers = []
217 for server in list_of_servers:
218 for pg in server.placement_groups:
219 if pg.policy == "availability":
220 availability_servers.append(server)
223 for server in availability_servers:
224 if isinstance(server.flavor, dict):
226 self.flavors.add(server.flavor["name"])
228 self.flavors.add(h_join(server.stack_name, "flavor"))
230 # add servers with availability policy
232 for server in availability_servers:
234 for pg in server.placement_groups:
235 update_scheduler_hints(scheduler_hints, added_servers, pg)
236 # workaround for openstack nova bug, check JIRA: YARDSTICK-200
238 if len(availability_servers) == 2:
239 if not scheduler_hints["different_host"]:
240 scheduler_hints.pop("different_host", None)
241 server.add_to_template(template,
242 list(self.networks.values()),
245 scheduler_hints["different_host"] = \
246 scheduler_hints["different_host"][0]
247 server.add_to_template(template,
248 list(self.networks.values()),
251 server.add_to_template(template,
252 list(self.networks.values()),
254 added_servers.append(server.stack_name)
256 # create list of servers with affinity policy
257 affinity_servers = []
258 for server in list_of_servers:
259 for pg in server.placement_groups:
260 if pg.policy == "affinity":
261 affinity_servers.append(server)
264 # add servers with affinity policy
265 for server in affinity_servers:
266 if server.stack_name in added_servers:
269 for pg in server.placement_groups:
270 update_scheduler_hints(scheduler_hints, added_servers, pg)
271 server.add_to_template(template, list(self.networks.values()),
273 added_servers.append(server.stack_name)
276 for sg in self.server_groups:
277 template.add_server_group(sg.name, sg.policy)
279 # add remaining servers with no placement group configured
280 for server in list_of_servers:
281 # TODO placement_group and server_group should combine
282 if not server.placement_groups:
284 # affinity/anti-aff server group
285 sg = server.server_group
287 scheduler_hints["group"] = {'get_resource': sg.name}
288 server.add_to_template(template,
289 list(self.networks.values()),
292 def get_neutron_info(self):
293 if not self.shade_client:
294 self.shade_client = get_shade_client()
296 networks = self.shade_client.list_networks()
297 for network in self.networks.values():
298 for neutron_net in (net for net in networks if net.name == network.stack_name):
299 network.segmentation_id = neutron_net.get('provider:segmentation_id')
300 # we already have physical_network
301 # network.physical_network = neutron_net.get('provider:physical_network')
302 network.network_type = neutron_net.get('provider:network_type')
303 network.neutron_info = neutron_net
305 def _create_new_stack(self, heat_template):
307 return heat_template.create(block=True,
308 timeout=self.heat_timeout)
309 except KeyboardInterrupt:
310 raise y_exc.StackCreationInterrupt
312 LOG.exception("stack failed")
313 # let the other failures happen, we want stack trace
316 def _retrieve_existing_stack(self, stack_name):
317 stack = HeatStack(stack_name)
321 LOG.warning("Stack %s does not exist", self.name)
325 """deploys template into a stack using cloud"""
326 LOG.info("Deploying context '%s' START", self.name)
328 heat_template = HeatTemplate(self.name, self.template_file,
329 self.heat_parameters)
331 if self.template_file is None:
332 self._add_resources_to_template(heat_template)
334 if self._flags.no_setup:
335 # Try to get an existing stack, returns a stack or None
336 self.stack = self._retrieve_existing_stack(self.name)
338 self.stack = self._create_new_stack(heat_template)
341 self.stack = self._create_new_stack(heat_template)
343 # TODO: use Neutron to get segmentation-id
344 self.get_neutron_info()
346 # copy some vital stack output into server objects
347 for server in self.servers:
349 self.add_server_port(server)
351 if server.floating_ip:
353 self.stack.outputs[server.floating_ip["stack_name"]]
355 LOG.info("Deploying context '%s' DONE", self.name)
357 def add_server_port(self, server):
358 # use private ip from first port in first network
360 private_port = next(iter(server.ports.values()))[0]
362 LOG.exception("Unable to find first private port in %s", server.ports)
364 server.private_ip = self.stack.outputs[private_port["stack_name"]]
365 server.interfaces = {}
366 for network_name, ports in server.ports.items():
368 # port['port'] is either port name from mapping or default network_name
369 server.interfaces[port['port']] = self.make_interface_dict(network_name,
373 server.override_ip(network_name, port)
375 def make_interface_dict(self, network_name, port, stack_name, outputs):
376 private_ip = outputs[stack_name]
377 mac_address = outputs[h_join(stack_name, "mac_address")]
378 # these are attributes of the network, not the port
379 output_subnet_cidr = outputs[h_join(self.name, network_name,
382 # these are attributes of the network, not the port
383 output_subnet_gateway = outputs[h_join(self.name, network_name,
384 'subnet', 'gateway_ip')]
387 # add default port name
389 "private_ip": private_ip,
390 "subnet_id": outputs[h_join(stack_name, "subnet_id")],
391 "subnet_cidr": output_subnet_cidr,
392 "network": str(ipaddress.ip_network(output_subnet_cidr).network_address),
393 "netmask": str(ipaddress.ip_network(output_subnet_cidr).netmask),
394 "gateway_ip": output_subnet_gateway,
395 "mac_address": mac_address,
396 "device_id": outputs[h_join(stack_name, "device_id")],
397 "network_id": outputs[h_join(stack_name, "network_id")],
398 # this should be == vld_id for NSB tests
399 "network_name": network_name,
400 # to match vnf_generic
401 "local_mac": mac_address,
402 "local_ip": private_ip,
405 def _delete_key_file(self):
407 utils.remove_file(self.key_filename)
408 utils.remove_file(self.key_filename + ".pub")
410 LOG.exception("There was an error removing the key file %s",
414 """undeploys stack from cloud"""
415 if self._flags.no_teardown:
416 LOG.info("Undeploying context '%s' SKIP", self.name)
420 LOG.info("Undeploying context '%s' START", self.name)
423 LOG.info("Undeploying context '%s' DONE", self.name)
425 self._delete_key_file()
427 super(HeatContext, self).undeploy()
430 def generate_routing_table(server):
433 "network": intf["network"],
434 "netmask": intf["netmask"],
436 # We have to encode a None gateway as '' for Jinja2 to YAML conversion
437 "gateway": intf["gateway_ip"] if intf["gateway_ip"] else '',
439 for name, intf in server.interfaces.items()
443 def _get_server(self, attr_name):
444 """lookup server info by name from context
445 attr_name: either a name for a server created by yardstick or a dict
446 with attribute name mapping when using external heat templates
448 if isinstance(attr_name, collections.Mapping):
449 node_name, cname = self.split_name(attr_name['name'])
450 if cname is None or cname != self.name:
453 # Create a dummy server instance for holding the *_ip attributes
454 server = Server(node_name, self, {})
455 server.public_ip = self.stack.outputs.get(
456 attr_name.get("public_ip_attr", object()), None)
458 server.private_ip = self.stack.outputs.get(
459 attr_name.get("private_ip_attr", object()), None)
462 server = self._server_map[attr_name]
464 attr_name_no_suffix = attr_name.split("-")[0]
465 server = self._server_map.get(attr_name_no_suffix, None)
469 pkey = pkg_resources.resource_string(
470 'yardstick.resources',
471 h_join('files/yardstick_key', self.name)).decode('utf-8')
474 "user": server.context.user,
476 "private_ip": server.private_ip,
477 "interfaces": server.interfaces,
478 "routing_table": self.generate_routing_table(server),
479 # empty IPv6 routing table
481 # we want to save the contex name so we can generate pod.yaml
484 # Target server may only have private_ip
486 result["ip"] = server.public_ip
490 def _get_network(self, attr_name):
491 if not isinstance(attr_name, collections.Mapping):
492 network = self.networks.get(attr_name, None)
495 # Only take the first key, value
496 key, value = next(iter(attr_name.items()), (None, None))
499 network_iter = (n for n in self.networks.values() if getattr(n, key) == value)
500 network = next(network_iter, None)
506 "name": network.name,
507 "segmentation_id": network.segmentation_id,
508 "network_type": network.network_type,
509 "physical_network": network.physical_network,