1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
14 from collections import OrderedDict
19 from yardstick.benchmark.contexts.base import Context
20 from yardstick.benchmark.contexts.model import Network
21 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
22 from yardstick.benchmark.contexts.model import Server
23 from yardstick.benchmark.contexts.model import update_scheduler_hints
24 from yardstick.common import exceptions as y_exc
25 from yardstick.common.openstack_utils import get_shade_client
26 from yardstick.orchestrator.heat import HeatStack
27 from yardstick.orchestrator.heat import HeatTemplate
28 from yardstick.common import constants as consts
29 from yardstick.common import utils
30 from yardstick.common.utils import source_env
31 from yardstick.ssh import SSH
32 from yardstick.common import openstack_utils
34 LOG = logging.getLogger(__name__)
36 DEFAULT_HEAT_TIMEOUT = 3600
39 def join_args(sep, *args):
47 class HeatContext(Context):
48 """Class that represents a context in the logical model"""
50 __context_type__ = "Heat"
54 self.networks = OrderedDict()
55 self.heat_timeout = None
57 self.placement_groups = []
58 self.server_groups = []
59 self.keypair_name = None
60 self.secgroup_name = None
61 self.security_group = None
68 self.template_file = None
69 self.heat_parameters = None
70 self.shade_client = None
71 self.heat_timeout = None
72 self.key_filename = None
73 self.shade_client = None
74 self.operator_client = None
79 super(HeatContext, self).__init__()
82 def assign_external_network(networks):
83 sorted_networks = sorted(networks.items())
84 external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
86 have_external_network = any(net.get("external_network") for net in networks.values())
87 if not have_external_network:
88 # try looking for mgmt network first
90 networks['mgmt']["external_network"] = external_network
93 # otherwise assign it to first network using os.environ
94 sorted_networks[0][1]["external_network"] = external_network
96 return sorted_networks
98 def init(self, attrs):
99 """Initializes itself from the supplied arguments"""
100 super(HeatContext, self).init(attrs)
102 self.check_environment()
103 self._user = attrs.get("user")
105 self.template_file = attrs.get("heat_template")
107 self.shade_client = openstack_utils.get_shade_client()
108 self.operator_client = openstack_utils.get_shade_operator_client()
111 self.read_pod_file(attrs)
113 LOG.warning("No pod file specified. NVFi metrics will be disabled")
115 self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
116 if self.template_file:
117 self.heat_parameters = attrs.get("heat_parameters")
120 self.keypair_name = h_join(self.name, "key")
122 self.secgroup_name = h_join(self.name, "secgroup")
124 self.security_group = attrs.get("security_group")
126 self._image = attrs.get("image")
128 self._flavor = attrs.get("flavor")
130 self.placement_groups = [PlacementGroup(name, self, pg_attrs["policy"])
131 for name, pg_attrs in attrs.get(
132 "placement_groups", {}).items()]
134 self.server_groups = [ServerGroup(name, self, sg_attrs["policy"])
135 for name, sg_attrs in attrs.get(
136 "server_groups", {}).items()]
138 # we have to do this first, because we are injecting external_network
140 sorted_networks = self.assign_external_network(attrs["networks"])
142 self.networks = OrderedDict(
143 (name, Network(name, self, net_attrs)) for name, net_attrs in
146 for name, server_attrs in sorted(attrs["servers"].items()):
147 server = Server(name, self, server_attrs)
148 self.servers.append(server)
149 self._server_map[server.dn] = server
153 def check_environment(self):
155 os.environ['OS_AUTH_URL']
158 source_env(consts.OPENRC)
160 if e.errno != errno.EEXIST:
161 LOG.error('OPENRC file not found')
164 LOG.error('OS_AUTH_URL not found')
168 """returns application's default image name"""
173 """returns application's default flavor name"""
178 """return login user name corresponding to image"""
181 def _add_resources_to_template(self, template):
182 """add to the template the resources represented by this context"""
185 if isinstance(self.flavor, dict):
186 flavor = self.flavor.setdefault("name", self.name + "-flavor")
187 template.add_flavor(**self.flavor)
188 self.flavors.add(flavor)
190 template.add_keypair(self.keypair_name, self.name)
191 template.add_security_group(self.secgroup_name, self.security_group)
193 for network in self.networks.values():
194 # Using existing network
195 if network.is_existing():
197 template.add_network(network.stack_name,
198 network.physical_network,
200 network.segmentation_id,
201 network.port_security_enabled,
202 network.network_type)
203 template.add_subnet(network.subnet_stack_name, network.stack_name,
209 template.add_router(network.router.stack_name,
210 network.router.external_gateway_info,
211 network.subnet_stack_name)
212 template.add_router_interface(network.router.stack_if_name,
213 network.router.stack_name,
214 network.subnet_stack_name)
216 # create a list of servers sorted by increasing no of placement groups
217 list_of_servers = sorted(self.servers,
218 key=lambda s: len(s.placement_groups))
221 # add servers with scheduler hints derived from placement groups
224 # create list of servers with availability policy
225 availability_servers = []
226 for server in list_of_servers:
227 for pg in server.placement_groups:
228 if pg.policy == "availability":
229 availability_servers.append(server)
232 for server in availability_servers:
233 if isinstance(server.flavor, dict):
235 self.flavors.add(server.flavor["name"])
237 self.flavors.add(h_join(server.stack_name, "flavor"))
239 # add servers with availability policy
241 for server in availability_servers:
243 for pg in server.placement_groups:
244 update_scheduler_hints(scheduler_hints, added_servers, pg)
245 # workaround for openstack nova bug, check JIRA: YARDSTICK-200
247 if len(availability_servers) == 2:
248 if not scheduler_hints["different_host"]:
249 scheduler_hints.pop("different_host", None)
250 server.add_to_template(template,
251 list(self.networks.values()),
254 scheduler_hints["different_host"] = \
255 scheduler_hints["different_host"][0]
256 server.add_to_template(template,
257 list(self.networks.values()),
260 server.add_to_template(template,
261 list(self.networks.values()),
263 added_servers.append(server.stack_name)
265 # create list of servers with affinity policy
266 affinity_servers = []
267 for server in list_of_servers:
268 for pg in server.placement_groups:
269 if pg.policy == "affinity":
270 affinity_servers.append(server)
273 # add servers with affinity policy
274 for server in affinity_servers:
275 if server.stack_name in added_servers:
278 for pg in server.placement_groups:
279 update_scheduler_hints(scheduler_hints, added_servers, pg)
280 server.add_to_template(template, list(self.networks.values()),
282 added_servers.append(server.stack_name)
285 for sg in self.server_groups:
286 template.add_server_group(sg.name, sg.policy)
288 # add remaining servers with no placement group configured
289 for server in list_of_servers:
290 # TODO placement_group and server_group should combine
291 if not server.placement_groups:
293 # affinity/anti-aff server group
294 sg = server.server_group
296 scheduler_hints["group"] = {'get_resource': sg.name}
297 server.add_to_template(template,
298 list(self.networks.values()),
301 def get_neutron_info(self):
302 if not self.shade_client:
303 self.shade_client = get_shade_client()
305 networks = self.shade_client.list_networks()
306 for network in self.networks.values():
307 for neutron_net in (net for net in networks if net.name == network.stack_name):
308 network.segmentation_id = neutron_net.get('provider:segmentation_id')
309 # we already have physical_network
310 # network.physical_network = neutron_net.get('provider:physical_network')
311 network.network_type = neutron_net.get('provider:network_type')
312 network.neutron_info = neutron_net
314 def _create_new_stack(self, heat_template):
316 return heat_template.create(block=True,
317 timeout=self.heat_timeout)
318 except KeyboardInterrupt:
319 raise y_exc.StackCreationInterrupt
321 LOG.exception("stack failed")
322 # let the other failures happen, we want stack trace
325 def _retrieve_existing_stack(self, stack_name):
326 stack = HeatStack(stack_name)
330 LOG.warning("Stack %s does not exist", self.name)
334 """deploys template into a stack using cloud"""
335 LOG.info("Deploying context '%s' START", self.name)
337 self.key_filename = ''.join(
338 [consts.YARDSTICK_ROOT_PATH,
339 'yardstick/resources/files/yardstick_key-',
341 # Permissions may have changed since creation; this can be fixed. If we
342 # overwrite the file, we lose future access to VMs using this key.
343 # As long as the file exists, even if it is unreadable, keep it intact
344 if not os.path.exists(self.key_filename):
345 SSH.gen_keys(self.key_filename)
347 heat_template = HeatTemplate(
348 self.name, template_file=self.template_file,
349 heat_parameters=self.heat_parameters,
350 os_cloud_config=self._flags.os_cloud_config)
352 if self.template_file is None:
353 self._add_resources_to_template(heat_template)
355 if self._flags.no_setup:
356 # Try to get an existing stack, returns a stack or None
357 self.stack = self._retrieve_existing_stack(self.name)
359 self.stack = self._create_new_stack(heat_template)
362 self.stack = self._create_new_stack(heat_template)
364 # TODO: use Neutron to get segmentation-id
365 self.get_neutron_info()
367 # copy some vital stack output into server objects
368 for server in self.servers:
370 self.add_server_port(server)
372 if server.floating_ip:
374 self.stack.outputs[server.floating_ip["stack_name"]]
376 LOG.info("Deploying context '%s' DONE", self.name)
379 def _port_net_is_existing(port_info):
380 net_flags = port_info.get('net_flags', {})
381 return net_flags.get(consts.IS_EXISTING)
384 def _port_net_is_public(port_info):
385 net_flags = port_info.get('net_flags', {})
386 return net_flags.get(consts.IS_PUBLIC)
388 def add_server_port(self, server):
389 server_ports = server.ports.values()
390 for server_port in server_ports:
391 port_info = server_port[0]
392 port_ip = self.stack.outputs[port_info["stack_name"]]
393 port_net_is_existing = self._port_net_is_existing(port_info)
394 port_net_is_public = self._port_net_is_public(port_info)
395 if port_net_is_existing and (port_net_is_public or
396 len(server_ports) == 1):
397 server.public_ip = port_ip
398 if not server.private_ip or len(server_ports) == 1:
399 server.private_ip = port_ip
401 server.interfaces = {}
402 for network_name, ports in server.ports.items():
404 # port['port'] is either port name from mapping or default network_name
405 if self._port_net_is_existing(port):
407 server.interfaces[port['port']] = self.make_interface_dict(network_name,
411 server.override_ip(network_name, port)
413 def make_interface_dict(self, network_name, port, stack_name, outputs):
414 private_ip = outputs[stack_name]
415 mac_address = outputs[h_join(stack_name, "mac_address")]
416 # these are attributes of the network, not the port
417 output_subnet_cidr = outputs[h_join(self.name, network_name,
420 # these are attributes of the network, not the port
421 output_subnet_gateway = outputs[h_join(self.name, network_name,
422 'subnet', 'gateway_ip')]
425 # add default port name
427 "private_ip": private_ip,
428 "subnet_id": outputs[h_join(stack_name, "subnet_id")],
429 "subnet_cidr": output_subnet_cidr,
430 "network": str(ipaddress.ip_network(output_subnet_cidr).network_address),
431 "netmask": str(ipaddress.ip_network(output_subnet_cidr).netmask),
432 "gateway_ip": output_subnet_gateway,
433 "mac_address": mac_address,
434 "device_id": outputs[h_join(stack_name, "device_id")],
435 "network_id": outputs[h_join(stack_name, "network_id")],
436 # this should be == vld_id for NSB tests
437 "network_name": network_name,
438 # to match vnf_generic
439 "local_mac": mac_address,
440 "local_ip": private_ip,
443 def _delete_key_file(self):
445 utils.remove_file(self.key_filename)
446 utils.remove_file(self.key_filename + ".pub")
448 LOG.exception("There was an error removing the key file %s",
452 """undeploys stack from cloud"""
453 if self._flags.no_teardown:
454 LOG.info("Undeploying context '%s' SKIP", self.name)
458 LOG.info("Undeploying context '%s' START", self.name)
461 LOG.info("Undeploying context '%s' DONE", self.name)
463 self._delete_key_file()
465 super(HeatContext, self).undeploy()
468 def generate_routing_table(server):
471 "network": intf["network"],
472 "netmask": intf["netmask"],
474 # We have to encode a None gateway as '' for Jinja2 to YAML conversion
475 "gateway": intf["gateway_ip"] if intf["gateway_ip"] else '',
477 for name, intf in server.interfaces.items()
481 def _get_server(self, attr_name):
482 """lookup server info by name from context
483 attr_name: either a name for a server created by yardstick or a dict
484 with attribute name mapping when using external heat templates
486 if isinstance(attr_name, collections.Mapping):
487 node_name, cname = self.split_host_name(attr_name['name'])
488 if cname is None or cname != self.name:
491 # Create a dummy server instance for holding the *_ip attributes
492 server = Server(node_name, self, {})
493 server.public_ip = self.stack.outputs.get(
494 attr_name.get("public_ip_attr", object()), None)
496 server.private_ip = self.stack.outputs.get(
497 attr_name.get("private_ip_attr", object()), None)
500 server = self._server_map[attr_name]
502 attr_name_no_suffix = attr_name.split("-")[0]
503 server = self._server_map.get(attr_name_no_suffix, None)
507 pkey = pkg_resources.resource_string(
508 'yardstick.resources',
509 h_join('files/yardstick_key', self.name)).decode('utf-8')
512 "user": server.context.user,
514 "private_ip": server.private_ip,
515 "interfaces": server.interfaces,
516 "routing_table": self.generate_routing_table(server),
517 # empty IPv6 routing table
519 # we want to save the contex name so we can generate pod.yaml
522 # Target server may only have private_ip
524 result["ip"] = server.public_ip
528 def _get_network(self, attr_name):
529 if not isinstance(attr_name, collections.Mapping):
530 network = self.networks.get(attr_name, None)
533 # Only take the first key, value
534 key, value = next(iter(attr_name.items()), (None, None))
537 network_iter = (n for n in self.networks.values() if getattr(n, key) == value)
538 network = next(network_iter, None)
544 "name": network.name,
545 "segmentation_id": network.segmentation_id,
546 "network_type": network.network_type,
547 "physical_network": network.physical_network,
551 def _get_physical_nodes(self):
554 def _get_physical_node_for_server(self, server_name):
555 node_name, ctx_name = self.split_host_name(server_name)
556 if ctx_name is None or self.name != ctx_name:
559 matching_nodes = [s for s in self.servers if s.name == node_name]
560 if len(matching_nodes) == 0:
563 server = openstack_utils.get_server(self.shade_client,
564 name_or_id=server_name)
567 server = server.toDict()
568 list_hypervisors = self.operator_client.list_hypervisors()
570 for hypervisor in list_hypervisors:
571 if hypervisor.hypervisor_hostname == server['OS-EXT-SRV-ATTR:hypervisor_hostname']:
572 for node in self.nodes:
573 if node['ip'] == hypervisor.host_ip:
574 return "{}.{}".format(node['name'], self._name)