1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
14 from collections import OrderedDict
19 from yardstick.benchmark import contexts
20 from yardstick.benchmark.contexts.base import Context
21 from yardstick.benchmark.contexts.model import Network
22 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
23 from yardstick.benchmark.contexts.model import Server
24 from yardstick.benchmark.contexts.model import update_scheduler_hints
25 from yardstick.common import exceptions as y_exc
26 from yardstick.common.openstack_utils import get_shade_client
27 from yardstick.orchestrator.heat import HeatStack
28 from yardstick.orchestrator.heat import HeatTemplate
29 from yardstick.common import constants as consts
30 from yardstick.common import utils
31 from yardstick.common.utils import source_env
32 from yardstick.ssh import SSH
33 from yardstick.common import openstack_utils
35 LOG = logging.getLogger(__name__)
37 DEFAULT_HEAT_TIMEOUT = 3600
40 def join_args(sep, *args):
48 class HeatContext(Context):
49 """Class that represents a context in the logical model"""
51 __context_type__ = contexts.CONTEXT_HEAT
55 self.networks = OrderedDict()
56 self.heat_timeout = None
58 self.placement_groups = []
59 self.server_groups = []
60 self.keypair_name = None
61 self.secgroup_name = None
62 self.security_group = None
69 self.template_file = None
70 self.heat_parameters = None
71 self.shade_client = None
72 self.heat_timeout = None
73 self.key_filename = None
74 self.shade_client = None
75 self.operator_client = None
80 super(HeatContext, self).__init__()
83 def assign_external_network(networks):
84 sorted_networks = sorted(networks.items())
85 external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
87 have_external_network = any(net.get("external_network") for net in networks.values())
88 if not have_external_network:
89 # try looking for mgmt network first
91 networks['mgmt']["external_network"] = external_network
94 # otherwise assign it to first network using os.environ
95 sorted_networks[0][1]["external_network"] = external_network
97 return sorted_networks
99 def init(self, attrs):
100 """Initializes itself from the supplied arguments"""
101 super(HeatContext, self).init(attrs)
103 self.check_environment()
104 self._user = attrs.get("user")
106 self.template_file = attrs.get("heat_template")
108 self.shade_client = openstack_utils.get_shade_client()
109 self.operator_client = openstack_utils.get_shade_operator_client()
112 self.read_pod_file(attrs)
114 LOG.warning("No pod file specified. NVFi metrics will be disabled")
116 self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
117 if self.template_file:
118 self.heat_parameters = attrs.get("heat_parameters")
121 self.keypair_name = h_join(self.name, "key")
123 self.secgroup_name = h_join(self.name, "secgroup")
125 self.security_group = attrs.get("security_group")
127 self._image = attrs.get("image")
129 self._flavor = attrs.get("flavor")
131 self.placement_groups = [PlacementGroup(name, self, pg_attrs["policy"])
132 for name, pg_attrs in attrs.get(
133 "placement_groups", {}).items()]
135 self.server_groups = [ServerGroup(name, self, sg_attrs["policy"])
136 for name, sg_attrs in attrs.get(
137 "server_groups", {}).items()]
139 # we have to do this first, because we are injecting external_network
141 sorted_networks = self.assign_external_network(attrs["networks"])
143 self.networks = OrderedDict(
144 (name, Network(name, self, net_attrs)) for name, net_attrs in
147 for name, server_attrs in sorted(attrs["servers"].items()):
148 server = Server(name, self, server_attrs)
149 self.servers.append(server)
150 self._server_map[server.dn] = server
154 def check_environment(self):
156 os.environ['OS_AUTH_URL']
159 source_env(consts.OPENRC)
161 if e.errno != errno.EEXIST:
162 LOG.error('OPENRC file not found')
165 LOG.error('OS_AUTH_URL not found')
169 """returns application's default image name"""
174 """returns application's default flavor name"""
179 """return login user name corresponding to image"""
182 def _add_resources_to_template(self, template):
183 """add to the template the resources represented by this context"""
186 if isinstance(self.flavor, dict):
187 flavor = self.flavor.setdefault("name", self.name + "-flavor")
188 template.add_flavor(**self.flavor)
189 self.flavors.add(flavor)
191 template.add_keypair(self.keypair_name, self.name)
192 template.add_security_group(self.secgroup_name, self.security_group)
194 for network in self.networks.values():
195 # Using existing network
196 if network.is_existing():
198 template.add_network(network.stack_name,
199 network.physical_network,
201 network.segmentation_id,
202 network.port_security_enabled,
203 network.network_type)
204 template.add_subnet(network.subnet_stack_name, network.stack_name,
210 template.add_router(network.router.stack_name,
211 network.router.external_gateway_info,
212 network.subnet_stack_name)
213 template.add_router_interface(network.router.stack_if_name,
214 network.router.stack_name,
215 network.subnet_stack_name)
217 # create a list of servers sorted by increasing no of placement groups
218 list_of_servers = sorted(self.servers,
219 key=lambda s: len(s.placement_groups))
222 # add servers with scheduler hints derived from placement groups
225 # create list of servers with availability policy
226 availability_servers = []
227 for server in list_of_servers:
228 for pg in server.placement_groups:
229 if pg.policy == "availability":
230 availability_servers.append(server)
233 for server in availability_servers:
234 if isinstance(server.flavor, dict):
236 self.flavors.add(server.flavor["name"])
238 self.flavors.add(h_join(server.stack_name, "flavor"))
240 # add servers with availability policy
242 for server in availability_servers:
244 for pg in server.placement_groups:
245 update_scheduler_hints(scheduler_hints, added_servers, pg)
246 # workaround for openstack nova bug, check JIRA: YARDSTICK-200
248 if len(availability_servers) == 2:
249 if not scheduler_hints["different_host"]:
250 scheduler_hints.pop("different_host", None)
251 server.add_to_template(template,
252 list(self.networks.values()),
255 scheduler_hints["different_host"] = \
256 scheduler_hints["different_host"][0]
257 server.add_to_template(template,
258 list(self.networks.values()),
261 server.add_to_template(template,
262 list(self.networks.values()),
264 added_servers.append(server.stack_name)
266 # create list of servers with affinity policy
267 affinity_servers = []
268 for server in list_of_servers:
269 for pg in server.placement_groups:
270 if pg.policy == "affinity":
271 affinity_servers.append(server)
274 # add servers with affinity policy
275 for server in affinity_servers:
276 if server.stack_name in added_servers:
279 for pg in server.placement_groups:
280 update_scheduler_hints(scheduler_hints, added_servers, pg)
281 server.add_to_template(template, list(self.networks.values()),
283 added_servers.append(server.stack_name)
286 for sg in self.server_groups:
287 template.add_server_group(sg.name, sg.policy)
289 # add remaining servers with no placement group configured
290 for server in list_of_servers:
291 # TODO placement_group and server_group should combine
292 if not server.placement_groups:
294 # affinity/anti-aff server group
295 sg = server.server_group
297 scheduler_hints["group"] = {'get_resource': sg.name}
298 server.add_to_template(template,
299 list(self.networks.values()),
302 def get_neutron_info(self):
303 if not self.shade_client:
304 self.shade_client = get_shade_client()
306 networks = self.shade_client.list_networks()
307 for network in self.networks.values():
308 for neutron_net in (net for net in networks if net.name == network.stack_name):
309 network.segmentation_id = neutron_net.get('provider:segmentation_id')
310 # we already have physical_network
311 # network.physical_network = neutron_net.get('provider:physical_network')
312 network.network_type = neutron_net.get('provider:network_type')
313 network.neutron_info = neutron_net
315 def _create_new_stack(self, heat_template):
317 return heat_template.create(block=True,
318 timeout=self.heat_timeout)
319 except KeyboardInterrupt:
320 raise y_exc.StackCreationInterrupt
322 LOG.exception("stack failed")
323 # let the other failures happen, we want stack trace
326 def _retrieve_existing_stack(self, stack_name):
327 stack = HeatStack(stack_name)
331 LOG.warning("Stack %s does not exist", self.name)
335 """deploys template into a stack using cloud"""
336 LOG.info("Deploying context '%s' START", self.name)
338 self.key_filename = ''.join(
339 [consts.YARDSTICK_ROOT_PATH,
340 'yardstick/resources/files/yardstick_key-',
342 # Permissions may have changed since creation; this can be fixed. If we
343 # overwrite the file, we lose future access to VMs using this key.
344 # As long as the file exists, even if it is unreadable, keep it intact
345 if not os.path.exists(self.key_filename):
346 SSH.gen_keys(self.key_filename)
348 heat_template = HeatTemplate(
349 self.name, template_file=self.template_file,
350 heat_parameters=self.heat_parameters,
351 os_cloud_config=self._flags.os_cloud_config)
353 if self.template_file is None:
354 self._add_resources_to_template(heat_template)
356 if self._flags.no_setup:
357 # Try to get an existing stack, returns a stack or None
358 self.stack = self._retrieve_existing_stack(self.name)
360 self.stack = self._create_new_stack(heat_template)
363 self.stack = self._create_new_stack(heat_template)
365 # TODO: use Neutron to get segmentation-id
366 self.get_neutron_info()
368 # copy some vital stack output into server objects
369 for server in self.servers:
371 self.add_server_port(server)
373 if server.floating_ip:
375 self.stack.outputs[server.floating_ip["stack_name"]]
377 LOG.info("Deploying context '%s' DONE", self.name)
380 def _port_net_is_existing(port_info):
381 net_flags = port_info.get('net_flags', {})
382 return net_flags.get(consts.IS_EXISTING)
385 def _port_net_is_public(port_info):
386 net_flags = port_info.get('net_flags', {})
387 return net_flags.get(consts.IS_PUBLIC)
389 def add_server_port(self, server):
390 server_ports = server.ports.values()
391 for server_port in server_ports:
392 port_info = server_port[0]
393 port_ip = self.stack.outputs[port_info["stack_name"]]
394 port_net_is_existing = self._port_net_is_existing(port_info)
395 port_net_is_public = self._port_net_is_public(port_info)
396 if port_net_is_existing and (port_net_is_public or
397 len(server_ports) == 1):
398 server.public_ip = port_ip
399 if not server.private_ip or len(server_ports) == 1:
400 server.private_ip = port_ip
402 server.interfaces = {}
403 for network_name, ports in server.ports.items():
405 # port['port'] is either port name from mapping or default network_name
406 if self._port_net_is_existing(port):
408 server.interfaces[port['port']] = self.make_interface_dict(network_name,
412 server.override_ip(network_name, port)
414 def make_interface_dict(self, network_name, port, stack_name, outputs):
415 private_ip = outputs[stack_name]
416 mac_address = outputs[h_join(stack_name, "mac_address")]
417 # these are attributes of the network, not the port
418 output_subnet_cidr = outputs[h_join(self.name, network_name,
421 # these are attributes of the network, not the port
422 output_subnet_gateway = outputs[h_join(self.name, network_name,
423 'subnet', 'gateway_ip')]
426 # add default port name
428 "private_ip": private_ip,
429 "subnet_id": outputs[h_join(stack_name, "subnet_id")],
430 "subnet_cidr": output_subnet_cidr,
431 "network": str(ipaddress.ip_network(output_subnet_cidr).network_address),
432 "netmask": str(ipaddress.ip_network(output_subnet_cidr).netmask),
433 "gateway_ip": output_subnet_gateway,
434 "mac_address": mac_address,
435 "device_id": outputs[h_join(stack_name, "device_id")],
436 "network_id": outputs[h_join(stack_name, "network_id")],
437 # this should be == vld_id for NSB tests
438 "network_name": network_name,
439 # to match vnf_generic
440 "local_mac": mac_address,
441 "local_ip": private_ip,
444 def _delete_key_file(self):
446 utils.remove_file(self.key_filename)
447 utils.remove_file(self.key_filename + ".pub")
449 LOG.exception("There was an error removing the key file %s",
453 """undeploys stack from cloud"""
454 if self._flags.no_teardown:
455 LOG.info("Undeploying context '%s' SKIP", self.name)
459 LOG.info("Undeploying context '%s' START", self.name)
462 LOG.info("Undeploying context '%s' DONE", self.name)
464 self._delete_key_file()
466 super(HeatContext, self).undeploy()
469 def generate_routing_table(server):
472 "network": intf["network"],
473 "netmask": intf["netmask"],
475 # We have to encode a None gateway as '' for Jinja2 to YAML conversion
476 "gateway": intf["gateway_ip"] if intf["gateway_ip"] else '',
478 for name, intf in server.interfaces.items()
482 def _get_server(self, attr_name):
483 """lookup server info by name from context
484 attr_name: either a name for a server created by yardstick or a dict
485 with attribute name mapping when using external heat templates
487 if isinstance(attr_name, collections.Mapping):
488 node_name, cname = self.split_host_name(attr_name['name'])
489 if cname is None or cname != self.name:
492 # Create a dummy server instance for holding the *_ip attributes
493 server = Server(node_name, self, {})
494 server.public_ip = self.stack.outputs.get(
495 attr_name.get("public_ip_attr", object()), None)
497 server.private_ip = self.stack.outputs.get(
498 attr_name.get("private_ip_attr", object()), None)
501 server = self._server_map[attr_name]
503 attr_name_no_suffix = attr_name.split("-")[0]
504 server = self._server_map.get(attr_name_no_suffix, None)
508 pkey = pkg_resources.resource_string(
509 'yardstick.resources',
510 h_join('files/yardstick_key', self.name)).decode('utf-8')
513 "user": server.context.user,
515 "private_ip": server.private_ip,
516 "interfaces": server.interfaces,
517 "routing_table": self.generate_routing_table(server),
518 # empty IPv6 routing table
520 # we want to save the contex name so we can generate pod.yaml
523 # Target server may only have private_ip
525 result["ip"] = server.public_ip
529 def _get_network(self, attr_name):
530 if not isinstance(attr_name, collections.Mapping):
531 network = self.networks.get(attr_name, None)
534 # Only take the first key, value
535 key, value = next(iter(attr_name.items()), (None, None))
538 network_iter = (n for n in self.networks.values() if getattr(n, key) == value)
539 network = next(network_iter, None)
545 "name": network.name,
546 "segmentation_id": network.segmentation_id,
547 "network_type": network.network_type,
548 "physical_network": network.physical_network,
552 def _get_physical_nodes(self):
555 def _get_physical_node_for_server(self, server_name):
556 node_name, ctx_name = self.split_host_name(server_name)
557 if ctx_name is None or self.name != ctx_name:
560 matching_nodes = [s for s in self.servers if s.name == node_name]
561 if len(matching_nodes) == 0:
564 server = openstack_utils.get_server(self.shade_client,
565 name_or_id=server_name)
568 server = server.toDict()
569 list_hypervisors = self.operator_client.list_hypervisors()
571 for hypervisor in list_hypervisors:
572 if hypervisor.hypervisor_hostname == server['OS-EXT-SRV-ATTR:hypervisor_hostname']:
573 for node in self.nodes:
574 if node['ip'] == hypervisor.host_ip:
575 return "{}.{}".format(node['name'], self._name)