Merge "Remove redundant comment"
[yardstick.git] / yardstick / benchmark / contexts / standalone / ovs_dpdk.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import collections
17 import logging
18 import os
19 import re
20 import time
21
22 from yardstick import ssh
23 from yardstick.benchmark import contexts
24 from yardstick.benchmark.contexts import base
25 from yardstick.benchmark.contexts.standalone import model
26 from yardstick.common import exceptions
27 from yardstick.common import utils as common_utils
28 from yardstick.network_services import utils
29 from yardstick.network_services.utils import get_nsb_option
30
31
32 LOG = logging.getLogger(__name__)
33
34 MAIN_BRIDGE = 'br0'
35
36
37 class OvsDpdkContext(base.Context):
38     """ This class handles OVS standalone nodes - VM running on Non-Managed NFVi
39     Configuration: ovs_dpdk
40     """
41
42     __context_type__ = contexts.CONTEXT_STANDALONEOVSDPDK
43
44     SUPPORTED_OVS_TO_DPDK_MAP = {
45         '2.6.0': '16.07.1',
46         '2.6.1': '16.07.2',
47         '2.7.0': '16.11.1',
48         '2.7.1': '16.11.2',
49         '2.7.2': '16.11.3',
50         '2.8.0': '17.05.2',
51         '2.8.1': '17.05.2'
52     }
53
54     DEFAULT_OVS = '2.6.0'
55     CMD_TIMEOUT = 30
56     DEFAULT_USER_PATH = '/usr/local'
57
58     def __init__(self):
59         self.file_path = None
60         self.sriov = []
61         self.first_run = True
62         self.dpdk_devbind = os.path.join(get_nsb_option('bin_path'),
63                                          'dpdk-devbind.py')
64         self.vm_names = []
65         self.nfvi_host = []
66         self.nodes = []
67         self.networks = {}
68         self.attrs = {}
69         self.vm_flavor = None
70         self.servers = None
71         self.helper = model.StandaloneContextHelper()
72         self.vnf_node = model.Server()
73         self.ovs_properties = {}
74         self.wait_for_vswitchd = 10
75         super(OvsDpdkContext, self).__init__()
76
77     def get_dpdk_socket_mem_size(self, socket_id):
78         """Get the size of OvS DPDK socket memory (Mb)"""
79         ram = self.ovs_properties.get("ram", {})
80         return ram.get('socket_%d' % (socket_id), 2048)
81
82     def init(self, attrs):
83         """initializes itself from the supplied arguments"""
84         super(OvsDpdkContext, self).init(attrs)
85
86         self.file_path = attrs.get("file", "pod.yaml")
87
88         self.nodes, self.nfvi_host, self.host_mgmt = \
89             self.helper.parse_pod_file(self.file_path, 'OvsDpdk')
90
91         self.attrs = attrs
92         self.vm_flavor = attrs.get('flavor', {})
93         self.servers = attrs.get('servers', {})
94         self.vm_deploy = attrs.get("vm_deploy", True)
95         self.ovs_properties = attrs.get('ovs_properties', {})
96         # add optional static network definition
97         self.networks = attrs.get("networks", {})
98
99         LOG.debug("Nodes: %r", self.nodes)
100         LOG.debug("NFVi Node: %r", self.nfvi_host)
101         LOG.debug("Networks: %r", self.networks)
102
103     def setup_ovs(self):
104         """Initialize OVS-DPDK"""
105         vpath = self.ovs_properties.get('vpath', self.DEFAULT_USER_PATH)
106         create_from = os.path.join(vpath, 'etc/openvswitch/conf.db')
107         create_to = os.path.join(vpath, 'share/openvswitch/vswitch.ovsschema')
108
109         cmd_list = [
110             'killall -r "ovs.*" -q | true',
111             'mkdir -p {0}/etc/openvswitch'.format(vpath),
112             'mkdir -p {0}/var/run/openvswitch'.format(vpath),
113             'rm {0}/etc/openvswitch/conf.db | true'.format(vpath),
114             'ovsdb-tool create {0} {1}'.format(create_from, create_to),
115             'modprobe vfio-pci',
116             'chmod a+x /dev/vfio',
117             'chmod 0666 /dev/vfio/*',
118         ]
119
120         bind_cmd = '%s --force -b vfio-pci {port}' % self.dpdk_devbind
121         for port in self.networks.values():
122             cmd_list.append(bind_cmd.format(port=port.get('phy_port')))
123
124         for cmd in cmd_list:
125             LOG.info(cmd)
126             exit_status, _, stderr = self.connection.execute(
127                 cmd, timeout=self.CMD_TIMEOUT)
128             if exit_status:
129                 raise exceptions.OVSSetupError(command=cmd, error=stderr)
130
131     def start_ovs_serverswitch(self):
132         vpath = self.ovs_properties.get("vpath")
133         pmd_nums = int(self.ovs_properties.get("pmd_threads", 2))
134         ovs_sock_path = '/var/run/openvswitch/db.sock'
135         log_path = '/var/log/openvswitch/ovs-vswitchd.log'
136
137         pmd_cpu_mask = self.ovs_properties.get("pmd_cpu_mask", '')
138         pmd_mask = hex(sum(2 ** num for num in range(pmd_nums)) << 1)
139         if pmd_cpu_mask:
140             pmd_mask = pmd_cpu_mask
141
142         ovs_other_config = "ovs-vsctl {0}set Open_vSwitch . other_config:{1}"
143         detach_cmd = "ovs-vswitchd unix:{0}{1} --pidfile --detach --log-file={2}"
144
145         lcore_mask = self.ovs_properties.get("lcore_mask", '')
146         if lcore_mask:
147             lcore_mask = ovs_other_config.format("--no-wait ", "dpdk-lcore-mask='%s'" % lcore_mask)
148
149         max_idle = self.ovs_properties.get("max_idle", '')
150         if max_idle:
151             max_idle = ovs_other_config.format("", "max-idle=%s" % max_idle)
152
153         cmd_list = [
154             "mkdir -p /usr/local/var/run/openvswitch",
155             "mkdir -p {}".format(os.path.dirname(log_path)),
156             ("ovsdb-server --remote=punix:/{0}/{1} --remote=ptcp:6640"
157              " --pidfile --detach").format(vpath, ovs_sock_path),
158             ovs_other_config.format("--no-wait ", "dpdk-init=true"),
159             ovs_other_config.format("--no-wait ", "dpdk-socket-mem='%d,%d'" % (
160                 self.get_dpdk_socket_mem_size(0),
161                 self.get_dpdk_socket_mem_size(1))),
162             lcore_mask,
163             detach_cmd.format(vpath, ovs_sock_path, log_path),
164             ovs_other_config.format("", "pmd-cpu-mask=%s" % pmd_mask),
165             max_idle,
166         ]
167
168         for cmd in cmd_list:
169             LOG.info(cmd)
170             self.connection.execute(cmd)
171         time.sleep(self.wait_for_vswitchd)
172
173     def setup_ovs_bridge_add_flows(self):
174         dpdk_args = ""
175         vpath = self.ovs_properties.get("vpath", "/usr/local")
176         version = self.ovs_properties.get('version', {})
177         ovs_ver = [int(x) for x in version.get('ovs', self.DEFAULT_OVS).split('.')]
178         ovs_add_port = ('ovs-vsctl add-port {br} {port} -- '
179                         'set Interface {port} type={type_}{dpdk_args}'
180                         '{dpdk_rxq}{pmd_rx_aff}')
181         chmod_vpath = 'chmod 0777 {0}/var/run/openvswitch/dpdkvhostuser*'
182
183         cmd_list = [
184             'ovs-vsctl --if-exists del-br {0}'.format(MAIN_BRIDGE),
185             'rm -rf {0}/var/run/openvswitch/dpdkvhostuser*'.format(vpath),
186             'ovs-vsctl add-br {0} -- set bridge {0} datapath_type=netdev'.
187             format(MAIN_BRIDGE)
188         ]
189         dpdk_rxq = ""
190         queues = self.ovs_properties.get("queues")
191         if queues:
192             dpdk_rxq = " options:n_rxq={queue}".format(queue=queues)
193
194         # Sorting the array to make sure we execute dpdk0... in the order
195         ordered_network = collections.OrderedDict(
196             sorted(self.networks.items(), key=lambda t: t[1].get('port_num', 0)))
197         pmd_rx_aff_ports = self.ovs_properties.get("dpdk_pmd-rxq-affinity", {})
198         for index, vnf in enumerate(ordered_network.values()):
199             if ovs_ver >= [2, 7, 0]:
200                 dpdk_args = " options:dpdk-devargs=%s" % vnf.get("phy_port")
201             affinity = pmd_rx_aff_ports.get(vnf.get("port_num", -1), "")
202             if affinity:
203                 pmd_rx_aff = ' other_config:pmd-rxq-affinity=' \
204                              '"{affinity}"'.format(affinity=affinity)
205             else:
206                 pmd_rx_aff = ""
207             cmd_list.append(ovs_add_port.format(
208                 br=MAIN_BRIDGE, port='dpdk%s' % vnf.get("port_num", 0),
209                 type_='dpdk', dpdk_args=dpdk_args, dpdk_rxq=dpdk_rxq,
210                 pmd_rx_aff=pmd_rx_aff))
211
212         # Need to do two for loop to maintain the dpdk/vhost ports.
213         pmd_rx_aff_ports = self.ovs_properties.get("vhost_pmd-rxq-affinity",
214                                                    {})
215         for index, _ in enumerate(ordered_network):
216             affinity = pmd_rx_aff_ports.get(index)
217             if affinity:
218                 pmd_rx_aff = ' other_config:pmd-rxq-affinity=' \
219                              '"{affinity}"'.format(affinity=affinity)
220             else:
221                 pmd_rx_aff = ""
222             cmd_list.append(ovs_add_port.format(
223                 br=MAIN_BRIDGE, port='dpdkvhostuser%s' % index,
224                 type_='dpdkvhostuser', dpdk_args="", dpdk_rxq=dpdk_rxq,
225                 pmd_rx_aff=pmd_rx_aff))
226
227         ovs_flow = ("ovs-ofctl add-flow {0} in_port=%s,action=output:%s".
228                     format(MAIN_BRIDGE))
229         network_count = len(ordered_network) + 1
230         for in_port, out_port in zip(range(1, network_count),
231                                      range(network_count, network_count * 2)):
232             cmd_list.append(ovs_flow % (in_port, out_port))
233             cmd_list.append(ovs_flow % (out_port, in_port))
234
235         cmd_list.append(chmod_vpath.format(vpath))
236
237         for cmd in cmd_list:
238             LOG.info(cmd)
239             exit_status, _, stderr = self.connection.execute(
240                 cmd, timeout=self.CMD_TIMEOUT)
241             if exit_status:
242                 raise exceptions.OVSSetupError(command=cmd, error=stderr)
243
244     def _check_hugepages(self):
245         meminfo = io.BytesIO()
246         self.connection.get_file_obj('/proc/meminfo', meminfo)
247         regex = re.compile(r"HugePages_Total:\s+(?P<hp_total>\d+)[\n\r]"
248                            r"HugePages_Free:\s+(?P<hp_free>\d+)")
249         match = regex.search(meminfo.getvalue().decode('utf-8'))
250         if not match:
251             raise exceptions.OVSHugepagesInfoError()
252         if int(match.group('hp_total')) == 0:
253             raise exceptions.OVSHugepagesNotConfigured()
254         if int(match.group('hp_free')) == 0:
255             raise exceptions.OVSHugepagesZeroFree(
256                 total_hugepages=int(match.group('hp_total')))
257
258     def cleanup_ovs_dpdk_env(self):
259         self.connection.execute(
260             'ovs-vsctl --if-exists del-br {0}'.format(MAIN_BRIDGE))
261         self.connection.execute("pkill -9 ovs")
262
263     def check_ovs_dpdk_env(self):
264         self.cleanup_ovs_dpdk_env()
265
266         version = self.ovs_properties.get("version", {})
267         ovs_ver = version.get("ovs", self.DEFAULT_OVS)
268         dpdk_ver = version.get("dpdk", "16.07.2").split('.')
269
270         supported_version = self.SUPPORTED_OVS_TO_DPDK_MAP.get(ovs_ver, None)
271         if supported_version is None or supported_version.split('.')[:2] != dpdk_ver[:2]:
272             raise exceptions.OVSUnsupportedVersion(
273                 ovs_version=ovs_ver,
274                 ovs_to_dpdk_map=self.SUPPORTED_OVS_TO_DPDK_MAP)
275
276         status = self.connection.execute("ovs-vsctl -V | grep -i '%s'" % ovs_ver)[0]
277         if status:
278             deploy = model.OvsDeploy(self.connection,
279                                      utils.get_nsb_option("bin_path"),
280                                      self.ovs_properties)
281             deploy.ovs_deploy()
282
283     def deploy(self):
284         """don't need to deploy"""
285
286         # Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.
287         if not self.vm_deploy:
288             return
289
290         self.connection = ssh.SSH.from_node(self.host_mgmt)
291
292         # Check dpdk/ovs version, if not present install
293         self.check_ovs_dpdk_env()
294         #    Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.
295         model.StandaloneContextHelper.install_req_libs(self.connection)
296         self.networks = model.StandaloneContextHelper.get_nic_details(
297             self.connection, self.networks, self.dpdk_devbind)
298
299         self.setup_ovs()
300         self.start_ovs_serverswitch()
301         self.setup_ovs_bridge_add_flows()
302         self.nodes = self.setup_ovs_dpdk_context()
303         LOG.debug("Waiting for VM to come up...")
304         self.nodes = model.StandaloneContextHelper.wait_for_vnfs_to_start(
305             self.connection, self.servers, self.nodes)
306
307     def undeploy(self):
308
309         if not self.vm_deploy:
310             return
311
312         # Cleanup the ovs installation...
313         self.cleanup_ovs_dpdk_env()
314
315         # Bind nics back to kernel
316         bind_cmd = "{dpdk_devbind} --force -b {driver} {port}"
317         for port in self.networks.values():
318             vpci = port.get("phy_port")
319             phy_driver = port.get("driver")
320             self.connection.execute(bind_cmd.format(
321                 dpdk_devbind=self.dpdk_devbind, driver=phy_driver, port=vpci))
322
323         # Todo: NFVi undeploy (sriov, vswitch, ovs etc) based on the config.
324         for vm in self.vm_names:
325             model.Libvirt.check_if_vm_exists_and_delete(vm, self.connection)
326
327     def _get_physical_nodes(self):
328         return self.nfvi_host
329
330     def _get_physical_node_for_server(self, server_name):
331         node_name, ctx_name = self.split_host_name(server_name)
332         if ctx_name is None or self.name != ctx_name:
333             return None
334
335         matching_nodes = [s for s in self.servers if s == node_name]
336         if len(matching_nodes) == 0:
337             return None
338
339         # self.nfvi_host always contain only one host
340         return "{}.{}".format(self.nfvi_host[0]["name"], self._name)
341
342     def _get_server(self, attr_name):
343         """lookup server info by name from context
344
345         Keyword arguments:
346         attr_name -- A name for a server listed in nodes config file
347         """
348         node_name, name = self.split_host_name(attr_name)
349         if name is None or self.name != name:
350             return None
351
352         matching_nodes = (n for n in self.nodes if n["name"] == node_name)
353         try:
354             # A clone is created in order to avoid affecting the
355             # original one.
356             node = dict(next(matching_nodes))
357         except StopIteration:
358             return None
359
360         try:
361             duplicate = next(matching_nodes)
362         except StopIteration:
363             pass
364         else:
365             raise ValueError("Duplicate nodes!!! Nodes: %s %s" % (node, duplicate))
366
367         node["name"] = attr_name
368         return node
369
370     def _get_network(self, attr_name):
371         if not isinstance(attr_name, collections.Mapping):
372             network = self.networks.get(attr_name)
373
374         else:
375             # Don't generalize too much  Just support vld_id
376             vld_id = attr_name.get('vld_id', {})
377             # for standalone context networks are dicts
378             iter1 = (n for n in self.networks.values() if n.get('vld_id') == vld_id)
379             network = next(iter1, None)
380
381         if network is None:
382             return None
383
384         result = {
385             # name is required
386             "name": network["name"],
387             "vld_id": network.get("vld_id"),
388             "segmentation_id": network.get("segmentation_id"),
389             "network_type": network.get("network_type"),
390             "physical_network": network.get("physical_network"),
391         }
392         return result
393
394     def configure_nics_for_ovs_dpdk(self):
395         portlist = collections.OrderedDict(self.networks)
396         for key in portlist:
397             mac = model.StandaloneContextHelper.get_mac_address()
398             portlist[key].update({'mac': mac})
399         self.networks = portlist
400         LOG.info("Ports %s", self.networks)
401
402     def _enable_interfaces(self, index, vfs, xml_str):
403         vpath = self.ovs_properties.get("vpath", "/usr/local")
404         queue = self.ovs_properties.get("queues", 1)
405         vf = self.networks[vfs[0]]
406         port_num = vf.get('port_num', 0)
407         vpci = utils.PciAddress(vf['vpci'].strip())
408         # Generate the vpci for the interfaces
409         slot = index + port_num + 10
410         vf['vpci'] = \
411             "{}:{}:{:02x}.{}".format(vpci.domain, vpci.bus, slot, vpci.function)
412         return model.Libvirt.add_ovs_interface(
413             vpath, port_num, vf['vpci'], vf['mac'], xml_str, queue)
414
415     def setup_ovs_dpdk_context(self):
416         nodes = []
417
418         self.configure_nics_for_ovs_dpdk()
419
420         hp_total_mb = int(self.vm_flavor.get('ram', '4096')) * len(self.servers)
421         common_utils.setup_hugepages(self.connection, (hp_total_mb + \
422                                      self.get_dpdk_socket_mem_size(0) + \
423                                      self.get_dpdk_socket_mem_size(1)) * 1024)
424
425         self._check_hugepages()
426
427         for index, (key, vnf) in enumerate(collections.OrderedDict(
428                 self.servers).items()):
429             cfg = '/tmp/vm_ovs_%d.xml' % index
430             vm_name = "vm-%d" % index
431             cdrom_img = "/var/lib/libvirt/images/cdrom-%d.img" % index
432
433             # 1. Check and delete VM if already exists
434             model.Libvirt.check_if_vm_exists_and_delete(vm_name,
435                                                         self.connection)
436             xml_str, mac = model.Libvirt.build_vm_xml(
437                 self.connection, self.vm_flavor, vm_name, index, cdrom_img)
438
439             # 2: Cleanup already available VMs
440             for vfs in [vfs for vfs_name, vfs in vnf["network_ports"].items()
441                         if vfs_name != 'mgmt']:
442                 xml_str = self._enable_interfaces(index, vfs, xml_str)
443
444             # copy xml to target...
445             model.Libvirt.write_file(cfg, xml_str)
446             self.connection.put(cfg, cfg)
447
448             node = self.vnf_node.generate_vnf_instance(self.vm_flavor,
449                                                        self.networks,
450                                                        self.host_mgmt.get('ip'),
451                                                        key, vnf, mac)
452             # Generate public/private keys if password or private key file is not provided
453             node = model.StandaloneContextHelper.check_update_key(self.connection,
454                                                                   node,
455                                                                   vm_name,
456                                                                   self.name,
457                                                                   cdrom_img,
458                                                                   mac)
459
460             # store vnf node details
461             nodes.append(node)
462
463             # NOTE: launch through libvirt
464             LOG.info("virsh create ...")
465             model.Libvirt.virsh_create_vm(self.connection, cfg)
466
467             self.vm_names.append(vm_name)
468
469         return nodes