1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
10 from __future__ import absolute_import
11 from __future__ import print_function
17 from collections import OrderedDict
22 from yardstick.benchmark.contexts.base import Context
23 from yardstick.benchmark.contexts.model import Network
24 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
25 from yardstick.benchmark.contexts.model import Server
26 from yardstick.benchmark.contexts.model import update_scheduler_hints
27 from yardstick.common import exceptions as y_exc
28 from yardstick.common.openstack_utils import get_neutron_client
29 from yardstick.orchestrator.heat import HeatStack
30 from yardstick.orchestrator.heat import HeatTemplate
31 from yardstick.common import constants as consts
32 from yardstick.common.utils import source_env
33 from yardstick.ssh import SSH
35 LOG = logging.getLogger(__name__)
37 DEFAULT_HEAT_TIMEOUT = 3600
40 def join_args(sep, *args):
48 class HeatContext(Context):
49 """Class that represents a context in the logical model"""
51 __context_type__ = "Heat"
55 self.networks = OrderedDict()
56 self.heat_timeout = None
58 self.placement_groups = []
59 self.server_groups = []
60 self.keypair_name = None
61 self.secgroup_name = None
68 self.template_file = None
69 self.heat_parameters = None
70 self.neutron_client = None
71 self.heat_timeout = None
72 self.key_filename = None
73 super(HeatContext, self).__init__()
76 def assign_external_network(networks):
77 sorted_networks = sorted(networks.items())
78 external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
80 have_external_network = any(net.get("external_network") for net in networks.values())
81 if not have_external_network:
82 # try looking for mgmt network first
84 networks['mgmt']["external_network"] = external_network
87 # otherwise assign it to first network using os.environ
88 sorted_networks[0][1]["external_network"] = external_network
90 return sorted_networks
92 def init(self, attrs):
93 """Initializes itself from the supplied arguments"""
94 super(HeatContext, self).init(attrs)
96 self.check_environment()
97 self._user = attrs.get("user")
99 self.template_file = attrs.get("heat_template")
101 self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
102 if self.template_file:
103 self.heat_parameters = attrs.get("heat_parameters")
106 self.keypair_name = h_join(self.name, "key")
107 self.secgroup_name = h_join(self.name, "secgroup")
109 self._image = attrs.get("image")
111 self._flavor = attrs.get("flavor")
113 self.placement_groups = [PlacementGroup(name, self, pg_attrs["policy"])
114 for name, pg_attrs in attrs.get(
115 "placement_groups", {}).items()]
117 self.server_groups = [ServerGroup(name, self, sg_attrs["policy"])
118 for name, sg_attrs in attrs.get(
119 "server_groups", {}).items()]
121 # we have to do this first, because we are injecting external_network
123 sorted_networks = self.assign_external_network(attrs["networks"])
125 self.networks = OrderedDict(
126 (name, Network(name, self, net_attrs)) for name, net_attrs in
129 for name, server_attrs in sorted(attrs["servers"].items()):
130 server = Server(name, self, server_attrs)
131 self.servers.append(server)
132 self._server_map[server.dn] = server
136 self.key_filename = ''.join(
137 [consts.YARDSTICK_ROOT_PATH,
138 'yardstick/resources/files/yardstick_key-',
140 # Permissions may have changed since creation; this can be fixed. If we
141 # overwrite the file, we lose future access to VMs using this key.
142 # As long as the file exists, even if it is unreadable, keep it intact
143 if not os.path.exists(self.key_filename):
144 SSH.gen_keys(self.key_filename)
146 def check_environment(self):
148 os.environ['OS_AUTH_URL']
151 source_env(consts.OPENRC)
153 if e.errno != errno.EEXIST:
154 LOG.error('OPENRC file not found')
157 LOG.error('OS_AUTH_URL not found')
161 """returns application's default image name"""
166 """returns application's default flavor name"""
171 """return login user name corresponding to image"""
174 def _add_resources_to_template(self, template):
175 """add to the template the resources represented by this context"""
178 if isinstance(self.flavor, dict):
179 flavor = self.flavor.setdefault("name", self.name + "-flavor")
180 template.add_flavor(**self.flavor)
181 self.flavors.add(flavor)
183 template.add_keypair(self.keypair_name, self.name)
184 template.add_security_group(self.secgroup_name)
186 for network in self.networks.values():
187 template.add_network(network.stack_name,
188 network.physical_network,
190 network.segmentation_id,
191 network.port_security_enabled,
192 network.network_type)
193 template.add_subnet(network.subnet_stack_name, network.stack_name,
199 template.add_router(network.router.stack_name,
200 network.router.external_gateway_info,
201 network.subnet_stack_name)
202 template.add_router_interface(network.router.stack_if_name,
203 network.router.stack_name,
204 network.subnet_stack_name)
206 # create a list of servers sorted by increasing no of placement groups
207 list_of_servers = sorted(self.servers,
208 key=lambda s: len(s.placement_groups))
211 # add servers with scheduler hints derived from placement groups
214 # create list of servers with availability policy
215 availability_servers = []
216 for server in list_of_servers:
217 for pg in server.placement_groups:
218 if pg.policy == "availability":
219 availability_servers.append(server)
222 for server in availability_servers:
223 if isinstance(server.flavor, dict):
225 self.flavors.add(server.flavor["name"])
227 self.flavors.add(h_join(server.stack_name, "flavor"))
229 # add servers with availability policy
231 for server in availability_servers:
233 for pg in server.placement_groups:
234 update_scheduler_hints(scheduler_hints, added_servers, pg)
235 # workaround for openstack nova bug, check JIRA: YARDSTICK-200
237 if len(availability_servers) == 2:
238 if not scheduler_hints["different_host"]:
239 scheduler_hints.pop("different_host", None)
240 server.add_to_template(template,
241 list(self.networks.values()),
244 scheduler_hints["different_host"] = \
245 scheduler_hints["different_host"][0]
246 server.add_to_template(template,
247 list(self.networks.values()),
250 server.add_to_template(template,
251 list(self.networks.values()),
253 added_servers.append(server.stack_name)
255 # create list of servers with affinity policy
256 affinity_servers = []
257 for server in list_of_servers:
258 for pg in server.placement_groups:
259 if pg.policy == "affinity":
260 affinity_servers.append(server)
263 # add servers with affinity policy
264 for server in affinity_servers:
265 if server.stack_name in added_servers:
268 for pg in server.placement_groups:
269 update_scheduler_hints(scheduler_hints, added_servers, pg)
270 server.add_to_template(template, list(self.networks.values()),
272 added_servers.append(server.stack_name)
275 for sg in self.server_groups:
276 template.add_server_group(sg.name, sg.policy)
278 # add remaining servers with no placement group configured
279 for server in list_of_servers:
280 # TODO placement_group and server_group should combine
281 if not server.placement_groups:
283 # affinity/anti-aff server group
284 sg = server.server_group
286 scheduler_hints["group"] = {'get_resource': sg.name}
287 server.add_to_template(template,
288 list(self.networks.values()),
291 def get_neutron_info(self):
292 if not self.neutron_client:
293 self.neutron_client = get_neutron_client()
295 networks = self.neutron_client.list_networks()
296 for network in self.networks.values():
297 for neutron_net in networks['networks']:
298 if neutron_net['name'] == network.stack_name:
299 network.segmentation_id = neutron_net.get('provider:segmentation_id')
300 # we already have physical_network
301 # network.physical_network = neutron_net.get('provider:physical_network')
302 network.network_type = neutron_net.get('provider:network_type')
303 network.neutron_info = neutron_net
305 def _create_new_stack(self, heat_template):
307 return heat_template.create(block=True,
308 timeout=self.heat_timeout)
309 except KeyboardInterrupt:
310 raise y_exc.StackCreationInterrupt
312 LOG.exception("stack failed")
313 # let the other failures happen, we want stack trace
316 def _retrieve_existing_stack(self, stack_name):
317 stack = HeatStack(stack_name)
321 LOG.warning("Stack %s does not exist", self.name)
325 """deploys template into a stack using cloud"""
326 LOG.info("Deploying context '%s' START", self.name)
328 heat_template = HeatTemplate(self.name, self.template_file,
329 self.heat_parameters)
331 if self.template_file is None:
332 self._add_resources_to_template(heat_template)
334 if self._flags.no_setup:
335 # Try to get an existing stack, returns a stack or None
336 self.stack = self._retrieve_existing_stack(self.name)
338 self.stack = self._create_new_stack(heat_template)
341 self.stack = self._create_new_stack(heat_template)
343 # TODO: use Neutron to get segmentation-id
344 self.get_neutron_info()
346 # copy some vital stack output into server objects
347 for server in self.servers:
349 self.add_server_port(server)
351 if server.floating_ip:
353 self.stack.outputs[server.floating_ip["stack_name"]]
355 LOG.info("Deploying context '%s' DONE", self.name)
357 def add_server_port(self, server):
358 # use private ip from first port in first network
360 private_port = next(iter(server.ports.values()))[0]
362 LOG.exception("Unable to find first private port in %s", server.ports)
364 server.private_ip = self.stack.outputs[private_port["stack_name"]]
365 server.interfaces = {}
366 for network_name, ports in server.ports.items():
368 # port['port'] is either port name from mapping or default network_name
369 server.interfaces[port['port']] = self.make_interface_dict(network_name,
373 server.override_ip(network_name, port)
375 def make_interface_dict(self, network_name, port, stack_name, outputs):
376 private_ip = outputs[stack_name]
377 mac_address = outputs[h_join(stack_name, "mac_address")]
378 # these are attributes of the network, not the port
379 output_subnet_cidr = outputs[h_join(self.name, network_name,
382 # these are attributes of the network, not the port
383 output_subnet_gateway = outputs[h_join(self.name, network_name,
384 'subnet', 'gateway_ip')]
387 # add default port name
389 "private_ip": private_ip,
390 "subnet_id": outputs[h_join(stack_name, "subnet_id")],
391 "subnet_cidr": output_subnet_cidr,
392 "network": str(ipaddress.ip_network(output_subnet_cidr).network_address),
393 "netmask": str(ipaddress.ip_network(output_subnet_cidr).netmask),
394 "gateway_ip": output_subnet_gateway,
395 "mac_address": mac_address,
396 "device_id": outputs[h_join(stack_name, "device_id")],
397 "network_id": outputs[h_join(stack_name, "network_id")],
398 # this should be == vld_id for NSB tests
399 "network_name": network_name,
400 # to match vnf_generic
401 "local_mac": mac_address,
402 "local_ip": private_ip,
406 """undeploys stack from cloud"""
407 if self._flags.no_teardown:
408 LOG.info("Undeploying context '%s' SKIP", self.name)
412 LOG.info("Undeploying context '%s' START", self.name)
415 LOG.info("Undeploying context '%s' DONE", self.name)
417 if os.path.exists(self.key_filename):
419 os.remove(self.key_filename)
420 os.remove(self.key_filename + ".pub")
422 LOG.exception("Key filename %s", self.key_filename)
424 super(HeatContext, self).undeploy()
427 def generate_routing_table(server):
430 "network": intf["network"],
431 "netmask": intf["netmask"],
433 # We have to encode a None gateway as '' for Jinja2 to YAML conversion
434 "gateway": intf["gateway_ip"] if intf["gateway_ip"] else '',
436 for name, intf in server.interfaces.items()
440 def _get_server(self, attr_name):
441 """lookup server info by name from context
442 attr_name: either a name for a server created by yardstick or a dict
443 with attribute name mapping when using external heat templates
445 if isinstance(attr_name, collections.Mapping):
446 node_name, cname = self.split_name(attr_name['name'])
447 if cname is None or cname != self.name:
450 # Create a dummy server instance for holding the *_ip attributes
451 server = Server(node_name, self, {})
452 server.public_ip = self.stack.outputs.get(
453 attr_name.get("public_ip_attr", object()), None)
455 server.private_ip = self.stack.outputs.get(
456 attr_name.get("private_ip_attr", object()), None)
459 server = self._server_map[attr_name]
461 attr_name_no_suffix = attr_name.split("-")[0]
462 server = self._server_map.get(attr_name_no_suffix, None)
466 pkey = pkg_resources.resource_string(
467 'yardstick.resources',
468 h_join('files/yardstick_key', self.name)).decode('utf-8')
471 "user": server.context.user,
473 "private_ip": server.private_ip,
474 "interfaces": server.interfaces,
475 "routing_table": self.generate_routing_table(server),
476 # empty IPv6 routing table
478 # we want to save the contex name so we can generate pod.yaml
481 # Target server may only have private_ip
483 result["ip"] = server.public_ip
487 def _get_network(self, attr_name):
488 if not isinstance(attr_name, collections.Mapping):
489 network = self.networks.get(attr_name, None)
492 # Only take the first key, value
493 key, value = next(iter(attr_name.items()), (None, None))
496 network_iter = (n for n in self.networks.values() if getattr(n, key) == value)
497 network = next(network_iter, None)
503 "name": network.name,
504 "segmentation_id": network.segmentation_id,
505 "network_type": network.network_type,
506 "physical_network": network.physical_network,