1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
14 from collections import OrderedDict
19 from yardstick.benchmark import contexts
20 from yardstick.benchmark.contexts.base import Context
21 from yardstick.benchmark.contexts.model import Network
22 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
23 from yardstick.benchmark.contexts.model import Server
24 from yardstick.benchmark.contexts.model import update_scheduler_hints
25 from yardstick.common import exceptions as y_exc
26 from yardstick.common.openstack_utils import get_shade_client
27 from yardstick.orchestrator.heat import HeatStack
28 from yardstick.orchestrator.heat import HeatTemplate
29 from yardstick.common import constants as consts
30 from yardstick.common import utils
31 from yardstick.common.utils import source_env
32 from yardstick.ssh import SSH
33 from yardstick.common import openstack_utils
35 LOG = logging.getLogger(__name__)
37 DEFAULT_HEAT_TIMEOUT = 3600
40 def join_args(sep, *args):
48 class HeatContext(Context):
49 """Class that represents a context in the logical model"""
51 __context_type__ = contexts.CONTEXT_HEAT
55 self.networks = OrderedDict()
56 self.heat_timeout = None
58 self.placement_groups = []
59 self.server_groups = []
60 self.keypair_name = None
61 self.secgroup_name = None
68 self.template_file = None
69 self.heat_parameters = None
70 self.shade_client = None
71 self.heat_timeout = None
72 self.key_filename = None
73 self.shade_client = None
74 self.operator_client = None
79 super(HeatContext, self).__init__()
82 def assign_external_network(networks):
83 sorted_networks = sorted(networks.items())
84 external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
86 have_external_network = any(net.get("external_network") for net in networks.values())
87 if not have_external_network:
88 # try looking for mgmt network first
90 networks['mgmt']["external_network"] = external_network
93 # otherwise assign it to first network using os.environ
94 sorted_networks[0][1]["external_network"] = external_network
96 return sorted_networks
98 def init(self, attrs):
99 """Initializes itself from the supplied arguments"""
100 super(HeatContext, self).init(attrs)
102 self.check_environment()
103 self._user = attrs.get("user")
105 self.template_file = attrs.get("heat_template")
107 self.shade_client = openstack_utils.get_shade_client()
108 self.operator_client = openstack_utils.get_shade_operator_client()
111 self.read_pod_file(attrs)
113 LOG.warning("No pod file specified. NVFi metrics will be disabled")
115 self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
116 if self.template_file:
117 self.heat_parameters = attrs.get("heat_parameters")
120 self.keypair_name = h_join(self.name, "key")
121 self.secgroup_name = h_join(self.name, "secgroup")
123 self._image = attrs.get("image")
125 self._flavor = attrs.get("flavor")
127 self.placement_groups = [PlacementGroup(name, self, pg_attrs["policy"])
128 for name, pg_attrs in attrs.get(
129 "placement_groups", {}).items()]
131 self.server_groups = [ServerGroup(name, self, sg_attrs["policy"])
132 for name, sg_attrs in attrs.get(
133 "server_groups", {}).items()]
135 # we have to do this first, because we are injecting external_network
137 sorted_networks = self.assign_external_network(attrs["networks"])
139 self.networks = OrderedDict(
140 (name, Network(name, self, net_attrs)) for name, net_attrs in
143 for name, server_attrs in sorted(attrs["servers"].items()):
144 server = Server(name, self, server_attrs)
145 self.servers.append(server)
146 self._server_map[server.dn] = server
150 def check_environment(self):
152 os.environ['OS_AUTH_URL']
155 source_env(consts.OPENRC)
157 if e.errno != errno.EEXIST:
158 LOG.error('OPENRC file not found')
161 LOG.error('OS_AUTH_URL not found')
165 """returns application's default image name"""
170 """returns application's default flavor name"""
175 """return login user name corresponding to image"""
178 def _add_resources_to_template(self, template):
179 """add to the template the resources represented by this context"""
182 if isinstance(self.flavor, dict):
183 flavor = self.flavor.setdefault("name", self.name + "-flavor")
184 template.add_flavor(**self.flavor)
185 self.flavors.add(flavor)
187 template.add_keypair(self.keypair_name, self.name)
188 template.add_security_group(self.secgroup_name)
190 for network in self.networks.values():
191 # Using existing network
192 if network.is_existing():
194 template.add_network(network.stack_name,
195 network.physical_network,
197 network.segmentation_id,
198 network.port_security_enabled,
199 network.network_type)
200 template.add_subnet(network.subnet_stack_name, network.stack_name,
206 template.add_router(network.router.stack_name,
207 network.router.external_gateway_info,
208 network.subnet_stack_name)
209 template.add_router_interface(network.router.stack_if_name,
210 network.router.stack_name,
211 network.subnet_stack_name)
213 # create a list of servers sorted by increasing no of placement groups
214 list_of_servers = sorted(self.servers,
215 key=lambda s: len(s.placement_groups))
218 # add servers with scheduler hints derived from placement groups
221 # create list of servers with availability policy
222 availability_servers = []
223 for server in list_of_servers:
224 for pg in server.placement_groups:
225 if pg.policy == "availability":
226 availability_servers.append(server)
229 for server in availability_servers:
230 if isinstance(server.flavor, dict):
232 self.flavors.add(server.flavor["name"])
234 self.flavors.add(h_join(server.stack_name, "flavor"))
236 # add servers with availability policy
238 for server in availability_servers:
240 for pg in server.placement_groups:
241 update_scheduler_hints(scheduler_hints, added_servers, pg)
242 # workaround for openstack nova bug, check JIRA: YARDSTICK-200
244 if len(availability_servers) == 2:
245 if not scheduler_hints["different_host"]:
246 scheduler_hints.pop("different_host", None)
247 server.add_to_template(template,
248 list(self.networks.values()),
251 scheduler_hints["different_host"] = \
252 scheduler_hints["different_host"][0]
253 server.add_to_template(template,
254 list(self.networks.values()),
257 server.add_to_template(template,
258 list(self.networks.values()),
260 added_servers.append(server.stack_name)
262 # create list of servers with affinity policy
263 affinity_servers = []
264 for server in list_of_servers:
265 for pg in server.placement_groups:
266 if pg.policy == "affinity":
267 affinity_servers.append(server)
270 # add servers with affinity policy
271 for server in affinity_servers:
272 if server.stack_name in added_servers:
275 for pg in server.placement_groups:
276 update_scheduler_hints(scheduler_hints, added_servers, pg)
277 server.add_to_template(template, list(self.networks.values()),
279 added_servers.append(server.stack_name)
282 for sg in self.server_groups:
283 template.add_server_group(sg.name, sg.policy)
285 # add remaining servers with no placement group configured
286 for server in list_of_servers:
287 # TODO placement_group and server_group should combine
288 if not server.placement_groups:
290 # affinity/anti-aff server group
291 sg = server.server_group
293 scheduler_hints["group"] = {'get_resource': sg.name}
294 server.add_to_template(template,
295 list(self.networks.values()),
298 def get_neutron_info(self):
299 if not self.shade_client:
300 self.shade_client = get_shade_client()
302 networks = self.shade_client.list_networks()
303 for network in self.networks.values():
304 for neutron_net in (net for net in networks if net.name == network.stack_name):
305 network.segmentation_id = neutron_net.get('provider:segmentation_id')
306 # we already have physical_network
307 # network.physical_network = neutron_net.get('provider:physical_network')
308 network.network_type = neutron_net.get('provider:network_type')
309 network.neutron_info = neutron_net
311 def _create_new_stack(self, heat_template):
313 return heat_template.create(block=True,
314 timeout=self.heat_timeout)
315 except KeyboardInterrupt:
316 raise y_exc.StackCreationInterrupt
318 LOG.exception("stack failed")
319 # let the other failures happen, we want stack trace
322 def _retrieve_existing_stack(self, stack_name):
323 stack = HeatStack(stack_name)
327 LOG.warning("Stack %s does not exist", self.name)
331 """deploys template into a stack using cloud"""
332 LOG.info("Deploying context '%s' START", self.name)
334 self.key_filename = ''.join(
335 [consts.YARDSTICK_ROOT_PATH,
336 'yardstick/resources/files/yardstick_key-',
338 # Permissions may have changed since creation; this can be fixed. If we
339 # overwrite the file, we lose future access to VMs using this key.
340 # As long as the file exists, even if it is unreadable, keep it intact
341 if not os.path.exists(self.key_filename):
342 SSH.gen_keys(self.key_filename)
344 heat_template = HeatTemplate(
345 self.name, template_file=self.template_file,
346 heat_parameters=self.heat_parameters,
347 os_cloud_config=self._flags.os_cloud_config)
349 if self.template_file is None:
350 self._add_resources_to_template(heat_template)
352 if self._flags.no_setup:
353 # Try to get an existing stack, returns a stack or None
354 self.stack = self._retrieve_existing_stack(self.name)
356 self.stack = self._create_new_stack(heat_template)
359 self.stack = self._create_new_stack(heat_template)
361 # TODO: use Neutron to get segmentation-id
362 self.get_neutron_info()
364 # copy some vital stack output into server objects
365 for server in self.servers:
367 self.add_server_port(server)
369 if server.floating_ip:
371 self.stack.outputs[server.floating_ip["stack_name"]]
373 LOG.info("Deploying context '%s' DONE", self.name)
376 def _port_net_is_existing(port_info):
377 net_flags = port_info.get('net_flags', {})
378 return net_flags.get(consts.IS_EXISTING)
381 def _port_net_is_public(port_info):
382 net_flags = port_info.get('net_flags', {})
383 return net_flags.get(consts.IS_PUBLIC)
385 def add_server_port(self, server):
386 server_ports = server.ports.values()
387 for server_port in server_ports:
388 port_info = server_port[0]
389 port_ip = self.stack.outputs[port_info["stack_name"]]
390 port_net_is_existing = self._port_net_is_existing(port_info)
391 port_net_is_public = self._port_net_is_public(port_info)
392 if port_net_is_existing and (port_net_is_public or
393 len(server_ports) == 1):
394 server.public_ip = port_ip
395 if not server.private_ip or len(server_ports) == 1:
396 server.private_ip = port_ip
398 server.interfaces = {}
399 for network_name, ports in server.ports.items():
401 # port['port'] is either port name from mapping or default network_name
402 if self._port_net_is_existing(port):
404 server.interfaces[port['port']] = self.make_interface_dict(network_name,
408 server.override_ip(network_name, port)
410 def make_interface_dict(self, network_name, port, stack_name, outputs):
411 private_ip = outputs[stack_name]
412 mac_address = outputs[h_join(stack_name, "mac_address")]
413 # these are attributes of the network, not the port
414 output_subnet_cidr = outputs[h_join(self.name, network_name,
417 # these are attributes of the network, not the port
418 output_subnet_gateway = outputs[h_join(self.name, network_name,
419 'subnet', 'gateway_ip')]
422 # add default port name
424 "private_ip": private_ip,
425 "subnet_id": outputs[h_join(stack_name, "subnet_id")],
426 "subnet_cidr": output_subnet_cidr,
427 "network": str(ipaddress.ip_network(output_subnet_cidr).network_address),
428 "netmask": str(ipaddress.ip_network(output_subnet_cidr).netmask),
429 "gateway_ip": output_subnet_gateway,
430 "mac_address": mac_address,
431 "device_id": outputs[h_join(stack_name, "device_id")],
432 "network_id": outputs[h_join(stack_name, "network_id")],
433 # this should be == vld_id for NSB tests
434 "network_name": network_name,
435 # to match vnf_generic
436 "local_mac": mac_address,
437 "local_ip": private_ip,
440 def _delete_key_file(self):
442 utils.remove_file(self.key_filename)
443 utils.remove_file(self.key_filename + ".pub")
445 LOG.exception("There was an error removing the key file %s",
449 """undeploys stack from cloud"""
450 if self._flags.no_teardown:
451 LOG.info("Undeploying context '%s' SKIP", self.name)
455 LOG.info("Undeploying context '%s' START", self.name)
458 LOG.info("Undeploying context '%s' DONE", self.name)
460 self._delete_key_file()
462 super(HeatContext, self).undeploy()
465 def generate_routing_table(server):
468 "network": intf["network"],
469 "netmask": intf["netmask"],
471 # We have to encode a None gateway as '' for Jinja2 to YAML conversion
472 "gateway": intf["gateway_ip"] if intf["gateway_ip"] else '',
474 for name, intf in server.interfaces.items()
478 def _get_server(self, attr_name):
479 """lookup server info by name from context
480 attr_name: either a name for a server created by yardstick or a dict
481 with attribute name mapping when using external heat templates
483 if isinstance(attr_name, collections.Mapping):
484 node_name, cname = self.split_host_name(attr_name['name'])
485 if cname is None or cname != self.name:
488 # Create a dummy server instance for holding the *_ip attributes
489 server = Server(node_name, self, {})
490 server.public_ip = self.stack.outputs.get(
491 attr_name.get("public_ip_attr", object()), None)
493 server.private_ip = self.stack.outputs.get(
494 attr_name.get("private_ip_attr", object()), None)
497 server = self._server_map[attr_name]
499 attr_name_no_suffix = attr_name.split("-")[0]
500 server = self._server_map.get(attr_name_no_suffix, None)
504 pkey = pkg_resources.resource_string(
505 'yardstick.resources',
506 h_join('files/yardstick_key', self.name)).decode('utf-8')
509 "user": server.context.user,
511 "private_ip": server.private_ip,
512 "interfaces": server.interfaces,
513 "routing_table": self.generate_routing_table(server),
514 # empty IPv6 routing table
516 # we want to save the contex name so we can generate pod.yaml
519 # Target server may only have private_ip
521 result["ip"] = server.public_ip
525 def _get_network(self, attr_name):
526 if not isinstance(attr_name, collections.Mapping):
527 network = self.networks.get(attr_name, None)
530 # Only take the first key, value
531 key, value = next(iter(attr_name.items()), (None, None))
534 network_iter = (n for n in self.networks.values() if getattr(n, key) == value)
535 network = next(network_iter, None)
541 "name": network.name,
542 "segmentation_id": network.segmentation_id,
543 "network_type": network.network_type,
544 "physical_network": network.physical_network,
548 def _get_physical_nodes(self):
551 def _get_physical_node_for_server(self, server_name):
552 node_name, ctx_name = self.split_host_name(server_name)
553 if ctx_name is None or self.name != ctx_name:
556 matching_nodes = [s for s in self.servers if s.name == node_name]
557 if len(matching_nodes) == 0:
560 server = openstack_utils.get_server(self.shade_client,
561 name_or_id=server_name)
564 server = server.toDict()
565 list_hypervisors = self.operator_client.list_hypervisors()
567 for hypervisor in list_hypervisors:
568 if hypervisor.hypervisor_hostname == server['OS-EXT-SRV-ATTR:hypervisor_hostname']:
569 for node in self.nodes:
570 if node['ip'] == hypervisor.host_ip:
571 return "{}.{}".format(node['name'], self._name)