1 ##############################################################################
2 # Copyright (c) 2015 Ericsson AB and others.
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
10 from __future__ import absolute_import
11 from __future__ import print_function
18 from collections import OrderedDict
23 from yardstick.benchmark.contexts.base import Context
24 from yardstick.benchmark.contexts.model import Network
25 from yardstick.benchmark.contexts.model import PlacementGroup, ServerGroup
26 from yardstick.benchmark.contexts.model import Server
27 from yardstick.benchmark.contexts.model import update_scheduler_hints
28 from yardstick.common.openstack_utils import get_neutron_client
29 from yardstick.orchestrator.heat import HeatTemplate, get_short_key_uuid
30 from yardstick.common import constants as consts
31 from yardstick.common.utils import source_env
32 from yardstick.ssh import SSH
34 LOG = logging.getLogger(__name__)
36 DEFAULT_HEAT_TIMEOUT = 3600
39 def join_args(sep, *args):
47 class HeatContext(Context):
48 """Class that represents a context in the logical model"""
50 __context_type__ = "Heat"
54 self.networks = OrderedDict()
55 self.heat_timeout = None
57 self.placement_groups = []
58 self.server_groups = []
59 self.keypair_name = None
60 self.secgroup_name = None
67 self.template_file = None
68 self.heat_parameters = None
69 self.neutron_client = None
70 # generate an uuid to identify yardstick_key
71 # the first 8 digits of the uuid will be used
72 self.key_uuid = uuid.uuid4()
73 self.heat_timeout = None
74 self.key_filename = ''.join(
75 [consts.YARDSTICK_ROOT_PATH, 'yardstick/resources/files/yardstick_key-',
76 get_short_key_uuid(self.key_uuid)])
77 super(HeatContext, self).__init__()
80 def assign_external_network(networks):
81 sorted_networks = sorted(networks.items())
82 external_network = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
84 have_external_network = any(net.get("external_network") for net in networks.values())
85 if not have_external_network:
86 # try looking for mgmt network first
88 networks['mgmt']["external_network"] = external_network
91 # otherwise assign it to first network using os.environ
92 sorted_networks[0][1]["external_network"] = external_network
94 return sorted_networks
96 def init(self, attrs):
97 """Initializes itself from the supplied arguments"""
98 super(HeatContext, self).init(attrs)
100 self.check_environment()
101 self._user = attrs.get("user")
103 self.template_file = attrs.get("heat_template")
105 self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
106 if self.template_file:
107 self.heat_parameters = attrs.get("heat_parameters")
110 self.keypair_name = h_join(self.name, "key")
111 self.secgroup_name = h_join(self.name, "secgroup")
113 self._image = attrs.get("image")
115 self._flavor = attrs.get("flavor")
117 self.placement_groups = [PlacementGroup(name, self, pg_attrs["policy"])
118 for name, pg_attrs in attrs.get(
119 "placement_groups", {}).items()]
121 self.server_groups = [ServerGroup(name, self, sg_attrs["policy"])
122 for name, sg_attrs in attrs.get(
123 "server_groups", {}).items()]
125 # we have to do this first, because we are injecting external_network
127 sorted_networks = self.assign_external_network(attrs["networks"])
129 self.networks = OrderedDict(
130 (name, Network(name, self, net_attrs)) for name, net_attrs in
133 for name, server_attrs in sorted(attrs["servers"].items()):
134 server = Server(name, self, server_attrs)
135 self.servers.append(server)
136 self._server_map[server.dn] = server
139 SSH.gen_keys(self.key_filename)
141 def check_environment(self):
143 os.environ['OS_AUTH_URL']
146 source_env(consts.OPENRC)
148 if e.errno != errno.EEXIST:
149 LOG.error('OPENRC file not found')
152 LOG.error('OS_AUTH_URL not found')
156 """returns application's default image name"""
161 """returns application's default flavor name"""
166 """return login user name corresponding to image"""
169 def _add_resources_to_template(self, template):
170 """add to the template the resources represented by this context"""
173 if isinstance(self.flavor, dict):
174 flavor = self.flavor.setdefault("name", self.name + "-flavor")
175 template.add_flavor(**self.flavor)
176 self.flavors.add(flavor)
178 template.add_keypair(self.keypair_name, self.key_uuid)
179 template.add_security_group(self.secgroup_name)
181 for network in self.networks.values():
182 template.add_network(network.stack_name,
183 network.physical_network,
185 network.segmentation_id,
186 network.port_security_enabled,
187 network.network_type)
188 template.add_subnet(network.subnet_stack_name, network.stack_name,
194 template.add_router(network.router.stack_name,
195 network.router.external_gateway_info,
196 network.subnet_stack_name)
197 template.add_router_interface(network.router.stack_if_name,
198 network.router.stack_name,
199 network.subnet_stack_name)
201 # create a list of servers sorted by increasing no of placement groups
202 list_of_servers = sorted(self.servers,
203 key=lambda s: len(s.placement_groups))
206 # add servers with scheduler hints derived from placement groups
209 # create list of servers with availability policy
210 availability_servers = []
211 for server in list_of_servers:
212 for pg in server.placement_groups:
213 if pg.policy == "availability":
214 availability_servers.append(server)
217 for server in availability_servers:
218 if isinstance(server.flavor, dict):
220 self.flavors.add(server.flavor["name"])
222 self.flavors.add(h_join(server.stack_name, "flavor"))
224 # add servers with availability policy
226 for server in availability_servers:
228 for pg in server.placement_groups:
229 update_scheduler_hints(scheduler_hints, added_servers, pg)
230 # workaround for openstack nova bug, check JIRA: YARDSTICK-200
232 if len(availability_servers) == 2:
233 if not scheduler_hints["different_host"]:
234 scheduler_hints.pop("different_host", None)
235 server.add_to_template(template,
236 list(self.networks.values()),
239 scheduler_hints["different_host"] = \
240 scheduler_hints["different_host"][0]
241 server.add_to_template(template,
242 list(self.networks.values()),
245 server.add_to_template(template,
246 list(self.networks.values()),
248 added_servers.append(server.stack_name)
250 # create list of servers with affinity policy
251 affinity_servers = []
252 for server in list_of_servers:
253 for pg in server.placement_groups:
254 if pg.policy == "affinity":
255 affinity_servers.append(server)
258 # add servers with affinity policy
259 for server in affinity_servers:
260 if server.stack_name in added_servers:
263 for pg in server.placement_groups:
264 update_scheduler_hints(scheduler_hints, added_servers, pg)
265 server.add_to_template(template, list(self.networks.values()),
267 added_servers.append(server.stack_name)
270 for sg in self.server_groups:
271 template.add_server_group(sg.name, sg.policy)
273 # add remaining servers with no placement group configured
274 for server in list_of_servers:
275 # TODO placement_group and server_group should combine
276 if not server.placement_groups:
278 # affinity/anti-aff server group
279 sg = server.server_group
281 scheduler_hints["group"] = {'get_resource': sg.name}
282 server.add_to_template(template,
283 list(self.networks.values()),
286 def get_neutron_info(self):
287 if not self.neutron_client:
288 self.neutron_client = get_neutron_client()
290 networks = self.neutron_client.list_networks()
291 for network in self.networks.values():
292 for neutron_net in networks['networks']:
293 if neutron_net['name'] == network.stack_name:
294 network.segmentation_id = neutron_net.get('provider:segmentation_id')
295 # we already have physical_network
296 # network.physical_network = neutron_net.get('provider:physical_network')
297 network.network_type = neutron_net.get('provider:network_type')
298 network.neutron_info = neutron_net
301 """deploys template into a stack using cloud"""
302 LOG.info("Deploying context '%s' START", self.name)
304 heat_template = HeatTemplate(self.name, self.template_file,
305 self.heat_parameters)
307 if self.template_file is None:
308 self._add_resources_to_template(heat_template)
311 self.stack = heat_template.create(block=True,
312 timeout=self.heat_timeout)
313 except KeyboardInterrupt:
314 raise SystemExit("\nStack create interrupted")
316 LOG.exception("stack failed")
317 # let the other failures happen, we want stack trace
320 # TODO: use Neutron to get segmentation-id
321 self.get_neutron_info()
323 # copy some vital stack output into server objects
324 for server in self.servers:
326 self.add_server_port(server)
328 if server.floating_ip:
330 self.stack.outputs[server.floating_ip["stack_name"]]
332 LOG.info("Deploying context '%s' DONE", self.name)
334 def add_server_port(self, server):
335 # use private ip from first port in first network
337 private_port = next(iter(server.ports.values()))[0]
339 LOG.exception("Unable to find first private port in %s", server.ports)
341 server.private_ip = self.stack.outputs[private_port["stack_name"]]
342 server.interfaces = {}
343 for network_name, ports in server.ports.items():
345 # port['port'] is either port name from mapping or default network_name
346 server.interfaces[port['port']] = self.make_interface_dict(network_name,
350 server.override_ip(network_name, port)
352 def make_interface_dict(self, network_name, port, stack_name, outputs):
353 private_ip = outputs[stack_name]
354 mac_address = outputs[h_join(stack_name, "mac_address")]
355 # these are attributes of the network, not the port
356 output_subnet_cidr = outputs[h_join(self.name, network_name,
359 # these are attributes of the network, not the port
360 output_subnet_gateway = outputs[h_join(self.name, network_name,
361 'subnet', 'gateway_ip')]
364 # add default port name
366 "private_ip": private_ip,
367 "subnet_id": outputs[h_join(stack_name, "subnet_id")],
368 "subnet_cidr": output_subnet_cidr,
369 "network": str(ipaddress.ip_network(output_subnet_cidr).network_address),
370 "netmask": str(ipaddress.ip_network(output_subnet_cidr).netmask),
371 "gateway_ip": output_subnet_gateway,
372 "mac_address": mac_address,
373 "device_id": outputs[h_join(stack_name, "device_id")],
374 "network_id": outputs[h_join(stack_name, "network_id")],
375 # this should be == vld_id for NSB tests
376 "network_name": network_name,
377 # to match vnf_generic
378 "local_mac": mac_address,
379 "local_ip": private_ip,
383 """undeploys stack from cloud"""
385 LOG.info("Undeploying context '%s' START", self.name)
388 LOG.info("Undeploying context '%s' DONE", self.name)
390 if os.path.exists(self.key_filename):
392 os.remove(self.key_filename)
393 os.remove(self.key_filename + ".pub")
395 LOG.exception("Key filename %s", self.key_filename)
397 super(HeatContext, self).undeploy()
400 def generate_routing_table(server):
403 "network": intf["network"],
404 "netmask": intf["netmask"],
406 # We have to encode a None gateway as '' for Jinja2 to YAML conversion
407 "gateway": intf["gateway_ip"] if intf["gateway_ip"] else '',
409 for name, intf in server.interfaces.items()
413 def _get_server(self, attr_name):
414 """lookup server info by name from context
415 attr_name: either a name for a server created by yardstick or a dict
416 with attribute name mapping when using external heat templates
418 if isinstance(attr_name, collections.Mapping):
419 node_name, cname = self.split_name(attr_name['name'])
420 if cname is None or cname != self.name:
423 # Create a dummy server instance for holding the *_ip attributes
424 server = Server(node_name, self, {})
425 server.public_ip = self.stack.outputs.get(
426 attr_name.get("public_ip_attr", object()), None)
428 server.private_ip = self.stack.outputs.get(
429 attr_name.get("private_ip_attr", object()), None)
431 server = self._server_map.get(attr_name, None)
435 pkey = pkg_resources.resource_string(
436 'yardstick.resources',
437 h_join('files/yardstick_key', get_short_key_uuid(self.key_uuid))).decode('utf-8')
440 "user": server.context.user,
442 "private_ip": server.private_ip,
443 "interfaces": server.interfaces,
444 "routing_table": self.generate_routing_table(server),
445 # empty IPv6 routing table
447 # we want to save the contex name so we can generate pod.yaml
450 # Target server may only have private_ip
452 result["ip"] = server.public_ip
456 def _get_network(self, attr_name):
457 if not isinstance(attr_name, collections.Mapping):
458 network = self.networks.get(attr_name, None)
461 # Only take the first key, value
462 key, value = next(iter(attr_name.items()), (None, None))
465 network_iter = (n for n in self.networks.values() if getattr(n, key) == value)
466 network = next(network_iter, None)
472 "name": network.name,
473 "segmentation_id": network.segmentation_id,
474 "network_type": network.network_type,
475 "physical_network": network.physical_network,