1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
10 from __future__ import absolute_import
11 from __future__ import print_function
18 from collections import OrderedDict
23 from yardstick.benchmark.contexts.base import Context
24 from yardstick.benchmark.contexts.model import Network
25 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
26 from yardstick.benchmark.contexts.model import Server
27 from yardstick.benchmark.contexts.model import update_scheduler_hints
28 from yardstick.common.openstack_utils import get_neutron_client
29 from yardstick.orchestrator.heat import HeatTemplate, get_short_key_uuid
30 from yardstick.common import constants as consts
31 from yardstick.common.utils import source_env
32 from yardstick.ssh import SSH
34 LOG = logging.getLogger(__name__)
36 DEFAULT_HEAT_TIMEOUT = 3600
39 def join_args(sep, *args):
47 class HeatContext(Context):
48 """Class that represents a context in the logical model"""
50 __context_type__ = "Heat"
55 self.networks = OrderedDict()
56 self.heat_timeout = None
58 self.placement_groups = []
59 self.server_groups = []
60 self.keypair_name = None
61 self.secgroup_name = None
68 self.template_file = None
69 self.heat_parameters = None
70 self.neutron_client = None
71 # generate an uuid to identify yardstick_key
72 # the first 8 digits of the uuid will be used
73 self.key_uuid = uuid.uuid4()
74 self.heat_timeout = None
75 self.key_filename = ''.join(
76 [consts.YARDSTICK_ROOT_PATH, 'yardstick/resources/files/yardstick_key-',
77 get_short_key_uuid(self.key_uuid)])
78 super(HeatContext, self).__init__()
81 def assign_external_network(networks):
82 sorted_networks = sorted(networks.items())
83 external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
85 have_external_network = any(net.get("external_network") for net in networks.values())
86 if not have_external_network:
87 # try looking for mgmt network first
89 networks['mgmt']["external_network"] = external_network
92 # otherwise assign it to first network using os.environ
93 sorted_networks[0][1]["external_network"] = external_network
95 return sorted_networks
97 def init(self, attrs):
98 """initializes itself from the supplied arguments"""
99 self.check_environment()
100 self.name = attrs["name"]
102 self._user = attrs.get("user")
104 self.template_file = attrs.get("heat_template")
106 self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
107 if self.template_file:
108 self.heat_parameters = attrs.get("heat_parameters")
111 self.keypair_name = h_join(self.name, "key")
112 self.secgroup_name = h_join(self.name, "secgroup")
114 self._image = attrs.get("image")
116 self._flavor = attrs.get("flavor")
118 self.placement_groups = [PlacementGroup(name, self, pg_attrs["policy"])
119 for name, pg_attrs in attrs.get(
120 "placement_groups", {}).items()]
122 self.server_groups = [ServerGroup(name, self, sg_attrs["policy"])
123 for name, sg_attrs in attrs.get(
124 "server_groups", {}).items()]
126 # we have to do this first, because we are injecting external_network
128 sorted_networks = self.assign_external_network(attrs["networks"])
130 self.networks = OrderedDict(
131 (name, Network(name, self, net_attrs)) for name, net_attrs in
134 for name, server_attrs in sorted(attrs["servers"].items()):
135 server = Server(name, self, server_attrs)
136 self.servers.append(server)
137 self._server_map[server.dn] = server
140 SSH.gen_keys(self.key_filename)
142 def check_environment(self):
144 os.environ['OS_AUTH_URL']
147 source_env(consts.OPENRC)
149 if e.errno != errno.EEXIST:
150 LOG.error('OPENRC file not found')
153 LOG.error('OS_AUTH_URL not found')
157 """returns application's default image name"""
162 """returns application's default flavor name"""
167 """return login user name corresponding to image"""
170 def _add_resources_to_template(self, template):
171 """add to the template the resources represented by this context"""
174 if isinstance(self.flavor, dict):
175 flavor = self.flavor.setdefault("name", self.name + "-flavor")
176 template.add_flavor(**self.flavor)
177 self.flavors.add(flavor)
179 template.add_keypair(self.keypair_name, self.key_uuid)
180 template.add_security_group(self.secgroup_name)
182 for network in self.networks.values():
183 # Using existing network
184 if network.is_existing():
186 template.add_network(network.stack_name,
187 network.physical_network,
189 network.segmentation_id,
190 network.port_security_enabled,
191 network.network_type)
192 template.add_subnet(network.subnet_stack_name, network.stack_name,
198 template.add_router(network.router.stack_name,
199 network.router.external_gateway_info,
200 network.subnet_stack_name)
201 template.add_router_interface(network.router.stack_if_name,
202 network.router.stack_name,
203 network.subnet_stack_name)
205 # create a list of servers sorted by increasing no of placement groups
206 list_of_servers = sorted(self.servers,
207 key=lambda s: len(s.placement_groups))
210 # add servers with scheduler hints derived from placement groups
213 # create list of servers with availability policy
214 availability_servers = []
215 for server in list_of_servers:
216 for pg in server.placement_groups:
217 if pg.policy == "availability":
218 availability_servers.append(server)
221 for server in availability_servers:
222 if isinstance(server.flavor, dict):
224 self.flavors.add(server.flavor["name"])
226 self.flavors.add(h_join(server.stack_name, "flavor"))
228 # add servers with availability policy
230 for server in availability_servers:
232 for pg in server.placement_groups:
233 update_scheduler_hints(scheduler_hints, added_servers, pg)
234 # workaround for openstack nova bug, check JIRA: YARDSTICK-200
236 if len(availability_servers) == 2:
237 if not scheduler_hints["different_host"]:
238 scheduler_hints.pop("different_host", None)
239 server.add_to_template(template,
240 list(self.networks.values()),
243 scheduler_hints["different_host"] = \
244 scheduler_hints["different_host"][0]
245 server.add_to_template(template,
246 list(self.networks.values()),
249 server.add_to_template(template,
250 list(self.networks.values()),
252 added_servers.append(server.stack_name)
254 # create list of servers with affinity policy
255 affinity_servers = []
256 for server in list_of_servers:
257 for pg in server.placement_groups:
258 if pg.policy == "affinity":
259 affinity_servers.append(server)
262 # add servers with affinity policy
263 for server in affinity_servers:
264 if server.stack_name in added_servers:
267 for pg in server.placement_groups:
268 update_scheduler_hints(scheduler_hints, added_servers, pg)
269 server.add_to_template(template, list(self.networks.values()),
271 added_servers.append(server.stack_name)
274 for sg in self.server_groups:
275 template.add_server_group(sg.name, sg.policy)
277 # add remaining servers with no placement group configured
278 for server in list_of_servers:
279 # TODO placement_group and server_group should combine
280 if not server.placement_groups:
282 # affinity/anti-aff server group
283 sg = server.server_group
285 scheduler_hints["group"] = {'get_resource': sg.name}
286 server.add_to_template(template,
287 list(self.networks.values()),
290 def get_neutron_info(self):
291 if not self.neutron_client:
292 self.neutron_client = get_neutron_client()
294 networks = self.neutron_client.list_networks()
295 for network in self.networks.values():
296 for neutron_net in networks['networks']:
297 if neutron_net['name'] == network.stack_name:
298 network.segmentation_id = neutron_net.get('provider:segmentation_id')
299 # we already have physical_network
300 # network.physical_network = neutron_net.get('provider:physical_network')
301 network.network_type = neutron_net.get('provider:network_type')
302 network.neutron_info = neutron_net
305 """deploys template into a stack using cloud"""
306 LOG.info("Deploying context '%s' START", self.name)
308 heat_template = HeatTemplate(self.name, self.template_file,
309 self.heat_parameters)
311 if self.template_file is None:
312 self._add_resources_to_template(heat_template)
315 self.stack = heat_template.create(block=True,
316 timeout=self.heat_timeout)
317 except KeyboardInterrupt:
318 raise SystemExit("\nStack create interrupted")
320 LOG.exception("stack failed")
321 # let the other failures happen, we want stack trace
324 # TODO: use Neutron to get segmentation-id
325 self.get_neutron_info()
327 # copy some vital stack output into server objects
328 for server in self.servers:
330 self.add_server_port(server)
332 if server.floating_ip:
334 self.stack.outputs[server.floating_ip["stack_name"]]
336 LOG.info("Deploying context '%s' DONE", self.name)
339 def _port_net_is_existing(port_info):
340 net_flags = port_info.get('net_flags', {})
341 return net_flags.get(consts.IS_EXISTING)
344 def _port_net_is_public(port_info):
345 net_flags = port_info.get('net_flags', {})
346 return net_flags.get(consts.IS_PUBLIC)
348 def add_server_port(self, server):
349 server_ports = server.ports.values()
350 for server_port in server_ports:
351 port_info = server_port[0]
352 port_ip = self.stack.outputs[port_info["stack_name"]]
353 port_net_is_existing = self._port_net_is_existing(port_info)
354 port_net_is_public = self._port_net_is_public(port_info)
355 if port_net_is_existing and (port_net_is_public or
356 len(server_ports) == 1):
357 server.public_ip = port_ip
358 if not server.private_ip or len(server_ports) == 1:
359 server.private_ip = port_ip
361 server.interfaces = {}
362 for network_name, ports in server.ports.items():
364 # port['port'] is either port name from mapping or default network_name
365 if self._port_net_is_existing(port):
367 server.interfaces[port['port']] = self.make_interface_dict(network_name,
371 server.override_ip(network_name, port)
373 def make_interface_dict(self, network_name, port, stack_name, outputs):
374 private_ip = outputs[stack_name]
375 mac_address = outputs[h_join(stack_name, "mac_address")]
376 # these are attributes of the network, not the port
377 output_subnet_cidr = outputs[h_join(self.name, network_name,
380 # these are attributes of the network, not the port
381 output_subnet_gateway = outputs[h_join(self.name, network_name,
382 'subnet', 'gateway_ip')]
385 # add default port name
387 "private_ip": private_ip,
388 "subnet_id": outputs[h_join(stack_name, "subnet_id")],
389 "subnet_cidr": output_subnet_cidr,
390 "network": str(ipaddress.ip_network(output_subnet_cidr).network_address),
391 "netmask": str(ipaddress.ip_network(output_subnet_cidr).netmask),
392 "gateway_ip": output_subnet_gateway,
393 "mac_address": mac_address,
394 "device_id": outputs[h_join(stack_name, "device_id")],
395 "network_id": outputs[h_join(stack_name, "network_id")],
396 # this should be == vld_id for NSB tests
397 "network_name": network_name,
398 # to match vnf_generic
399 "local_mac": mac_address,
400 "local_ip": private_ip,
404 """undeploys stack from cloud"""
406 LOG.info("Undeploying context '%s' START", self.name)
409 LOG.info("Undeploying context '%s' DONE", self.name)
411 if os.path.exists(self.key_filename):
413 os.remove(self.key_filename)
414 os.remove(self.key_filename + ".pub")
416 LOG.exception("Key filename %s", self.key_filename)
418 super(HeatContext, self).undeploy()
421 def generate_routing_table(server):
424 "network": intf["network"],
425 "netmask": intf["netmask"],
427 # We have to encode a None gateway as '' for Jinja2 to YAML conversion
428 "gateway": intf["gateway_ip"] if intf["gateway_ip"] else '',
430 for name, intf in server.interfaces.items()
434 def _get_server(self, attr_name):
435 """lookup server info by name from context
436 attr_name: either a name for a server created by yardstick or a dict
437 with attribute name mapping when using external heat templates
439 if isinstance(attr_name, collections.Mapping):
440 node_name, cname = self.split_name(attr_name['name'])
441 if cname is None or cname != self.name:
444 # Create a dummy server instance for holding the *_ip attributes
445 server = Server(node_name, self, {})
446 server.public_ip = self.stack.outputs.get(
447 attr_name.get("public_ip_attr", object()), None)
449 server.private_ip = self.stack.outputs.get(
450 attr_name.get("private_ip_attr", object()), None)
452 server = self._server_map.get(attr_name, None)
456 pkey = pkg_resources.resource_string(
457 'yardstick.resources',
458 h_join('files/yardstick_key', get_short_key_uuid(self.key_uuid))).decode('utf-8')
461 "user": server.context.user,
463 "private_ip": server.private_ip,
464 "interfaces": server.interfaces,
465 "routing_table": self.generate_routing_table(server),
466 # empty IPv6 routing table
468 # we want to save the contex name so we can generate pod.yaml
471 # Target server may only have private_ip
473 result["ip"] = server.public_ip
477 def _get_network(self, attr_name):
478 if not isinstance(attr_name, collections.Mapping):
479 network = self.networks.get(attr_name, None)
482 # Only take the first key, value
483 key, value = next(iter(attr_name.items()), (None, None))
486 network_iter = (n for n in self.networks.values() if getattr(n, key) == value)
487 network = next(network_iter, None)
493 "name": network.name,
494 "segmentation_id": network.segmentation_id,
495 "network_type": network.network_type,
496 "physical_network": network.physical_network,