1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
10 from __future__ import absolute_import
11 from __future__ import print_function
17 from collections import OrderedDict
22 from yardstick.benchmark.contexts.base import Context
23 from yardstick.benchmark.contexts.model import Network
24 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
25 from yardstick.benchmark.contexts.model import Server
26 from yardstick.benchmark.contexts.model import update_scheduler_hints
27 from yardstick.common import exceptions as y_exc
28 from yardstick.common.openstack_utils import get_shade_client
29 from yardstick.orchestrator.heat import HeatStack
30 from yardstick.orchestrator.heat import HeatTemplate
31 from yardstick.common import constants as consts
32 from yardstick.common import utils
33 from yardstick.common.utils import source_env
34 from yardstick.ssh import SSH
36 LOG = logging.getLogger(__name__)
38 DEFAULT_HEAT_TIMEOUT = 3600
41 def join_args(sep, *args):
49 class HeatContext(Context):
50 """Class that represents a context in the logical model"""
52 __context_type__ = "Heat"
56 self.networks = OrderedDict()
57 self.heat_timeout = None
59 self.placement_groups = []
60 self.server_groups = []
61 self.keypair_name = None
62 self.secgroup_name = None
69 self.template_file = None
70 self.heat_parameters = None
71 self.shade_client = None
72 self.heat_timeout = None
73 self.key_filename = None
74 super(HeatContext, self).__init__()
77 def assign_external_network(networks):
78 sorted_networks = sorted(networks.items())
79 external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
81 have_external_network = any(net.get("external_network") for net in networks.values())
82 if not have_external_network:
83 # try looking for mgmt network first
85 networks['mgmt']["external_network"] = external_network
88 # otherwise assign it to first network using os.environ
89 sorted_networks[0][1]["external_network"] = external_network
91 return sorted_networks
93 def init(self, attrs):
94 """Initializes itself from the supplied arguments"""
95 super(HeatContext, self).init(attrs)
97 self.check_environment()
98 self._user = attrs.get("user")
100 self.template_file = attrs.get("heat_template")
102 self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
103 if self.template_file:
104 self.heat_parameters = attrs.get("heat_parameters")
107 self.keypair_name = h_join(self.name, "key")
108 self.secgroup_name = h_join(self.name, "secgroup")
110 self._image = attrs.get("image")
112 self._flavor = attrs.get("flavor")
114 self.placement_groups = [PlacementGroup(name, self, pg_attrs["policy"])
115 for name, pg_attrs in attrs.get(
116 "placement_groups", {}).items()]
118 self.server_groups = [ServerGroup(name, self, sg_attrs["policy"])
119 for name, sg_attrs in attrs.get(
120 "server_groups", {}).items()]
122 # we have to do this first, because we are injecting external_network
124 sorted_networks = self.assign_external_network(attrs["networks"])
126 self.networks = OrderedDict(
127 (name, Network(name, self, net_attrs)) for name, net_attrs in
130 for name, server_attrs in sorted(attrs["servers"].items()):
131 server = Server(name, self, server_attrs)
132 self.servers.append(server)
133 self._server_map[server.dn] = server
137 def check_environment(self):
139 os.environ['OS_AUTH_URL']
142 source_env(consts.OPENRC)
144 if e.errno != errno.EEXIST:
145 LOG.error('OPENRC file not found')
148 LOG.error('OS_AUTH_URL not found')
152 """returns application's default image name"""
157 """returns application's default flavor name"""
162 """return login user name corresponding to image"""
165 def _add_resources_to_template(self, template):
166 """add to the template the resources represented by this context"""
169 if isinstance(self.flavor, dict):
170 flavor = self.flavor.setdefault("name", self.name + "-flavor")
171 template.add_flavor(**self.flavor)
172 self.flavors.add(flavor)
174 template.add_keypair(self.keypair_name, self.name)
175 template.add_security_group(self.secgroup_name)
177 for network in self.networks.values():
178 template.add_network(network.stack_name,
179 network.physical_network,
181 network.segmentation_id,
182 network.port_security_enabled,
183 network.network_type)
184 template.add_subnet(network.subnet_stack_name, network.stack_name,
190 template.add_router(network.router.stack_name,
191 network.router.external_gateway_info,
192 network.subnet_stack_name)
193 template.add_router_interface(network.router.stack_if_name,
194 network.router.stack_name,
195 network.subnet_stack_name)
197 # create a list of servers sorted by increasing no of placement groups
198 list_of_servers = sorted(self.servers,
199 key=lambda s: len(s.placement_groups))
202 # add servers with scheduler hints derived from placement groups
205 # create list of servers with availability policy
206 availability_servers = []
207 for server in list_of_servers:
208 for pg in server.placement_groups:
209 if pg.policy == "availability":
210 availability_servers.append(server)
213 for server in availability_servers:
214 if isinstance(server.flavor, dict):
216 self.flavors.add(server.flavor["name"])
218 self.flavors.add(h_join(server.stack_name, "flavor"))
220 # add servers with availability policy
222 for server in availability_servers:
224 for pg in server.placement_groups:
225 update_scheduler_hints(scheduler_hints, added_servers, pg)
226 # workaround for openstack nova bug, check JIRA: YARDSTICK-200
228 if len(availability_servers) == 2:
229 if not scheduler_hints["different_host"]:
230 scheduler_hints.pop("different_host", None)
231 server.add_to_template(template,
232 list(self.networks.values()),
235 scheduler_hints["different_host"] = \
236 scheduler_hints["different_host"][0]
237 server.add_to_template(template,
238 list(self.networks.values()),
241 server.add_to_template(template,
242 list(self.networks.values()),
244 added_servers.append(server.stack_name)
246 # create list of servers with affinity policy
247 affinity_servers = []
248 for server in list_of_servers:
249 for pg in server.placement_groups:
250 if pg.policy == "affinity":
251 affinity_servers.append(server)
254 # add servers with affinity policy
255 for server in affinity_servers:
256 if server.stack_name in added_servers:
259 for pg in server.placement_groups:
260 update_scheduler_hints(scheduler_hints, added_servers, pg)
261 server.add_to_template(template, list(self.networks.values()),
263 added_servers.append(server.stack_name)
266 for sg in self.server_groups:
267 template.add_server_group(sg.name, sg.policy)
269 # add remaining servers with no placement group configured
270 for server in list_of_servers:
271 # TODO placement_group and server_group should combine
272 if not server.placement_groups:
274 # affinity/anti-aff server group
275 sg = server.server_group
277 scheduler_hints["group"] = {'get_resource': sg.name}
278 server.add_to_template(template,
279 list(self.networks.values()),
282 def get_neutron_info(self):
283 if not self.shade_client:
284 self.shade_client = get_shade_client()
286 networks = self.shade_client.list_networks()
287 for network in self.networks.values():
288 for neutron_net in (net for net in networks if net.name == network.stack_name):
289 network.segmentation_id = neutron_net.get('provider:segmentation_id')
290 # we already have physical_network
291 # network.physical_network = neutron_net.get('provider:physical_network')
292 network.network_type = neutron_net.get('provider:network_type')
293 network.neutron_info = neutron_net
295 def _create_new_stack(self, heat_template):
297 return heat_template.create(block=True,
298 timeout=self.heat_timeout)
299 except KeyboardInterrupt:
300 raise y_exc.StackCreationInterrupt
302 LOG.exception("stack failed")
303 # let the other failures happen, we want stack trace
306 def _retrieve_existing_stack(self, stack_name):
307 stack = HeatStack(stack_name)
311 LOG.warning("Stack %s does not exist", self.name)
315 """deploys template into a stack using cloud"""
316 LOG.info("Deploying context '%s' START", self.name)
318 self.key_filename = ''.join(
319 [consts.YARDSTICK_ROOT_PATH,
320 'yardstick/resources/files/yardstick_key-',
322 # Permissions may have changed since creation; this can be fixed. If we
323 # overwrite the file, we lose future access to VMs using this key.
324 # As long as the file exists, even if it is unreadable, keep it intact
325 if not os.path.exists(self.key_filename):
326 SSH.gen_keys(self.key_filename)
328 heat_template = HeatTemplate(self.name, self.template_file,
329 self.heat_parameters)
331 if self.template_file is None:
332 self._add_resources_to_template(heat_template)
334 if self._flags.no_setup:
335 # Try to get an existing stack, returns a stack or None
336 self.stack = self._retrieve_existing_stack(self.name)
338 self.stack = self._create_new_stack(heat_template)
341 self.stack = self._create_new_stack(heat_template)
343 # TODO: use Neutron to get segmentation-id
344 self.get_neutron_info()
346 # copy some vital stack output into server objects
347 for server in self.servers:
349 self.add_server_port(server)
351 if server.floating_ip:
353 self.stack.outputs[server.floating_ip["stack_name"]]
355 LOG.info("Deploying context '%s' DONE", self.name)
357 def add_server_port(self, server):
358 # use private ip from first port in first network
360 private_port = next(iter(server.ports.values()))[0]
362 LOG.exception("Unable to find first private port in %s", server.ports)
364 server.private_ip = self.stack.outputs[private_port["stack_name"]]
365 server.interfaces = {}
366 for network_name, ports in server.ports.items():
368 # port['port'] is either port name from mapping or default network_name
369 server.interfaces[port['port']] = self.make_interface_dict(network_name,
373 server.override_ip(network_name, port)
375 def make_interface_dict(self, network_name, port, stack_name, outputs):
376 private_ip = outputs[stack_name]
377 mac_address = outputs[h_join(stack_name, "mac_address")]
378 # these are attributes of the network, not the port
379 output_subnet_cidr = outputs[h_join(self.name, network_name,
382 # these are attributes of the network, not the port
383 output_subnet_gateway = outputs[h_join(self.name, network_name,
384 'subnet', 'gateway_ip')]
387 # add default port name
389 "private_ip": private_ip,
390 "subnet_id": outputs[h_join(stack_name, "subnet_id")],
391 "subnet_cidr": output_subnet_cidr,
392 "network": str(ipaddress.ip_network(output_subnet_cidr).network_address),
393 "netmask": str(ipaddress.ip_network(output_subnet_cidr).netmask),
394 "gateway_ip": output_subnet_gateway,
395 "mac_address": mac_address,
396 "device_id": outputs[h_join(stack_name, "device_id")],
397 "network_id": outputs[h_join(stack_name, "network_id")],
398 # this should be == vld_id for NSB tests
399 "network_name": network_name,
400 # to match vnf_generic
401 "local_mac": mac_address,
402 "local_ip": private_ip,
405 def _delete_key_file(self):
407 utils.remove_file(self.key_filename)
408 utils.remove_file(self.key_filename + ".pub")
410 LOG.exception("There was an error removing the key file %s",
414 """undeploys stack from cloud"""
415 if self._flags.no_teardown:
416 LOG.info("Undeploying context '%s' SKIP", self.name)
420 LOG.info("Undeploying context '%s' START", self.name)
423 LOG.info("Undeploying context '%s' DONE", self.name)
425 self._delete_key_file()
427 super(HeatContext, self).undeploy()
430 def generate_routing_table(server):
433 "network": intf["network"],
434 "netmask": intf["netmask"],
436 # We have to encode a None gateway as '' for Jinja2 to YAML conversion
437 "gateway": intf["gateway_ip"] if intf["gateway_ip"] else '',
439 for name, intf in server.interfaces.items()
443 def _get_server(self, attr_name):
444 """lookup server info by name from context
445 attr_name: either a name for a server created by yardstick or a dict
446 with attribute name mapping when using external heat templates
448 if isinstance(attr_name, collections.Mapping):
449 node_name, cname = self.split_name(attr_name['name'])
450 if cname is None or cname != self.name:
453 # Create a dummy server instance for holding the *_ip attributes
454 server = Server(node_name, self, {})
455 server.public_ip = self.stack.outputs.get(
456 attr_name.get("public_ip_attr", object()), None)
458 server.private_ip = self.stack.outputs.get(
459 attr_name.get("private_ip_attr", object()), None)
462 server = self._server_map[attr_name]
464 attr_name_no_suffix = attr_name.split("-")[0]
465 server = self._server_map.get(attr_name_no_suffix, None)
469 pkey = pkg_resources.resource_string(
470 'yardstick.resources',
471 h_join('files/yardstick_key', self.name)).decode('utf-8')
474 "user": server.context.user,
476 "private_ip": server.private_ip,
477 "interfaces": server.interfaces,
478 "routing_table": self.generate_routing_table(server),
479 # empty IPv6 routing table
481 # we want to save the contex name so we can generate pod.yaml
484 # Target server may only have private_ip
486 result["ip"] = server.public_ip
490 def _get_network(self, attr_name):
491 if not isinstance(attr_name, collections.Mapping):
492 network = self.networks.get(attr_name, None)
495 # Only take the first key, value
496 key, value = next(iter(attr_name.items()), (None, None))
499 network_iter = (n for n in self.networks.values() if getattr(n, key) == value)
500 network = next(network_iter, None)
506 "name": network.name,
507 "segmentation_id": network.segmentation_id,
508 "network_type": network.network_type,
509 "physical_network": network.physical_network,