Merge "Setup hugepages on SA host(sriov, ovs_dpdk)"
[yardstick.git] / yardstick / benchmark / contexts / standalone / ovs_dpdk.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import collections
17 import logging
18 import os
19 import re
20 import time
21
22 from yardstick import ssh
23 from yardstick.benchmark import contexts
24 from yardstick.benchmark.contexts import base
25 from yardstick.benchmark.contexts.standalone import model
26 from yardstick.common import exceptions
27 from yardstick.common import utils as common_utils
28 from yardstick.network_services import utils
29 from yardstick.network_services.utils import get_nsb_option
30
31
32 LOG = logging.getLogger(__name__)
33
34 MAIN_BRIDGE = 'br0'
35
36
37 class OvsDpdkContext(base.Context):
38     """ This class handles OVS standalone nodes - VM running on Non-Managed NFVi
39     Configuration: ovs_dpdk
40     """
41
42     __context_type__ = contexts.CONTEXT_STANDALONEOVSDPDK
43
44     SUPPORTED_OVS_TO_DPDK_MAP = {
45         '2.6.0': '16.07.1',
46         '2.6.1': '16.07.2',
47         '2.7.0': '16.11.1',
48         '2.7.1': '16.11.2',
49         '2.7.2': '16.11.3',
50         '2.8.0': '17.05.2',
51         '2.8.1': '17.05.2'
52     }
53
54     DEFAULT_OVS = '2.6.0'
55     CMD_TIMEOUT = 30
56     DEFAULT_USER_PATH = '/usr/local'
57
58     def __init__(self):
59         self.file_path = None
60         self.sriov = []
61         self.first_run = True
62         self.dpdk_devbind = os.path.join(get_nsb_option('bin_path'),
63                                          'dpdk-devbind.py')
64         self.vm_names = []
65         self.nfvi_host = []
66         self.nodes = []
67         self.networks = {}
68         self.attrs = {}
69         self.vm_flavor = None
70         self.servers = None
71         self.helper = model.StandaloneContextHelper()
72         self.vnf_node = model.Server()
73         self.ovs_properties = {}
74         self.wait_for_vswitchd = 10
75         super(OvsDpdkContext, self).__init__()
76
77     def init(self, attrs):
78         """initializes itself from the supplied arguments"""
79         super(OvsDpdkContext, self).init(attrs)
80
81         self.file_path = attrs.get("file", "pod.yaml")
82
83         self.nodes, self.nfvi_host, self.host_mgmt = \
84             self.helper.parse_pod_file(self.file_path, 'OvsDpdk')
85
86         self.attrs = attrs
87         self.vm_flavor = attrs.get('flavor', {})
88         self.servers = attrs.get('servers', {})
89         self.vm_deploy = attrs.get("vm_deploy", True)
90         self.ovs_properties = attrs.get('ovs_properties', {})
91         # add optional static network definition
92         self.networks = attrs.get("networks", {})
93
94         LOG.debug("Nodes: %r", self.nodes)
95         LOG.debug("NFVi Node: %r", self.nfvi_host)
96         LOG.debug("Networks: %r", self.networks)
97
98     def setup_ovs(self):
99         """Initialize OVS-DPDK"""
100         vpath = self.ovs_properties.get('vpath', self.DEFAULT_USER_PATH)
101         create_from = os.path.join(vpath, 'etc/openvswitch/conf.db')
102         create_to = os.path.join(vpath, 'share/openvswitch/vswitch.ovsschema')
103
104         cmd_list = [
105             'killall -r "ovs.*" -q | true',
106             'mkdir -p {0}/etc/openvswitch'.format(vpath),
107             'mkdir -p {0}/var/run/openvswitch'.format(vpath),
108             'rm {0}/etc/openvswitch/conf.db | true'.format(vpath),
109             'ovsdb-tool create {0} {1}'.format(create_from, create_to),
110             'modprobe vfio-pci',
111             'chmod a+x /dev/vfio',
112             'chmod 0666 /dev/vfio/*',
113         ]
114
115         bind_cmd = '%s --force -b vfio-pci {port}' % self.dpdk_devbind
116         for port in self.networks.values():
117             cmd_list.append(bind_cmd.format(port=port.get('phy_port')))
118
119         for cmd in cmd_list:
120             LOG.info(cmd)
121             exit_status, _, stderr = self.connection.execute(
122                 cmd, timeout=self.CMD_TIMEOUT)
123             if exit_status:
124                 raise exceptions.OVSSetupError(command=cmd, error=stderr)
125
126     def start_ovs_serverswitch(self):
127         vpath = self.ovs_properties.get("vpath")
128         pmd_nums = int(self.ovs_properties.get("pmd_threads", 2))
129         ovs_sock_path = '/var/run/openvswitch/db.sock'
130         log_path = '/var/log/openvswitch/ovs-vswitchd.log'
131
132         pmd_cpu_mask = self.ovs_properties.get("pmd_cpu_mask", '')
133         pmd_mask = hex(sum(2 ** num for num in range(pmd_nums)) << 1)
134         if pmd_cpu_mask:
135             pmd_mask = pmd_cpu_mask
136
137         socket0 = self.ovs_properties.get("ram", {}).get("socket_0", "2048")
138         socket1 = self.ovs_properties.get("ram", {}).get("socket_1", "2048")
139
140         ovs_other_config = "ovs-vsctl {0}set Open_vSwitch . other_config:{1}"
141         detach_cmd = "ovs-vswitchd unix:{0}{1} --pidfile --detach --log-file={2}"
142
143         lcore_mask = self.ovs_properties.get("lcore_mask", '')
144         if lcore_mask:
145             lcore_mask = ovs_other_config.format("--no-wait ", "dpdk-lcore-mask='%s'" % lcore_mask)
146
147         max_idle = self.ovs_properties.get("max_idle", '')
148         if max_idle:
149             max_idle = ovs_other_config.format("", "max-idle=%s" % max_idle)
150
151         cmd_list = [
152             "mkdir -p /usr/local/var/run/openvswitch",
153             "mkdir -p {}".format(os.path.dirname(log_path)),
154             ("ovsdb-server --remote=punix:/{0}/{1} --remote=ptcp:6640"
155              " --pidfile --detach").format(vpath, ovs_sock_path),
156             ovs_other_config.format("--no-wait ", "dpdk-init=true"),
157             ovs_other_config.format("--no-wait ", "dpdk-socket-mem='%s,%s'" % (socket0, socket1)),
158             lcore_mask,
159             detach_cmd.format(vpath, ovs_sock_path, log_path),
160             ovs_other_config.format("", "pmd-cpu-mask=%s" % pmd_mask),
161             max_idle,
162         ]
163
164         for cmd in cmd_list:
165             LOG.info(cmd)
166             self.connection.execute(cmd)
167         time.sleep(self.wait_for_vswitchd)
168
169     def setup_ovs_bridge_add_flows(self):
170         dpdk_args = ""
171         dpdk_list = []
172         vpath = self.ovs_properties.get("vpath", "/usr/local")
173         version = self.ovs_properties.get('version', {})
174         ovs_ver = [int(x) for x in version.get('ovs', self.DEFAULT_OVS).split('.')]
175         ovs_add_port = ('ovs-vsctl add-port {br} {port} -- '
176                         'set Interface {port} type={type_}{dpdk_args}{dpdk_rxq}')
177         chmod_vpath = 'chmod 0777 {0}/var/run/openvswitch/dpdkvhostuser*'
178
179         cmd_list = [
180             'ovs-vsctl --if-exists del-br {0}'.format(MAIN_BRIDGE),
181             'rm -rf {0}/var/run/openvswitch/dpdkvhostuser*'.format(vpath),
182             'ovs-vsctl add-br {0} -- set bridge {0} datapath_type=netdev'.
183             format(MAIN_BRIDGE)
184         ]
185         dpdk_rxq = ""
186         queues = self.ovs_properties.get("queues")
187         if queues:
188             dpdk_rxq = " options:n_rxq={queue}".format(queue=queues)
189
190         ordered_network = collections.OrderedDict(self.networks)
191         for index, vnf in enumerate(ordered_network.values()):
192             if ovs_ver >= [2, 7, 0]:
193                 dpdk_args = " options:dpdk-devargs=%s" % vnf.get("phy_port")
194             dpdk_list.append(ovs_add_port.format(
195                 br=MAIN_BRIDGE, port='dpdk%s' % vnf.get("port_num", 0),
196                 type_='dpdk', dpdk_args=dpdk_args, dpdk_rxq=dpdk_rxq))
197
198         # Sorting the array to make sure we execute dpdk0... in the order
199         list.sort(dpdk_list)
200         cmd_list.extend(dpdk_list)
201
202         # Need to do two for loop to maintain the dpdk/vhost ports.
203         for index, _ in enumerate(ordered_network):
204             cmd_list.append(ovs_add_port.format(
205                 br=MAIN_BRIDGE, port='dpdkvhostuser%s' % index,
206                 type_='dpdkvhostuser', dpdk_args="", dpdk_rxq=""))
207
208         ovs_flow = ("ovs-ofctl add-flow {0} in_port=%s,action=output:%s".
209                     format(MAIN_BRIDGE))
210         network_count = len(ordered_network) + 1
211         for in_port, out_port in zip(range(1, network_count),
212                                      range(network_count, network_count * 2)):
213             cmd_list.append(ovs_flow % (in_port, out_port))
214             cmd_list.append(ovs_flow % (out_port, in_port))
215
216         cmd_list.append(chmod_vpath.format(vpath))
217
218         for cmd in cmd_list:
219             LOG.info(cmd)
220             exit_status, _, stderr = self.connection.execute(
221                 cmd, timeout=self.CMD_TIMEOUT)
222             if exit_status:
223                 raise exceptions.OVSSetupError(command=cmd, error=stderr)
224
225     def _check_hugepages(self):
226         meminfo = io.BytesIO()
227         self.connection.get_file_obj('/proc/meminfo', meminfo)
228         regex = re.compile(r"HugePages_Total:\s+(?P<hp_total>\d+)[\n\r]"
229                            r"HugePages_Free:\s+(?P<hp_free>\d+)")
230         match = regex.search(meminfo.getvalue().decode('utf-8'))
231         if not match:
232             raise exceptions.OVSHugepagesInfoError()
233         if int(match.group('hp_total')) == 0:
234             raise exceptions.OVSHugepagesNotConfigured()
235         if int(match.group('hp_free')) == 0:
236             raise exceptions.OVSHugepagesZeroFree(
237                 total_hugepages=int(match.group('hp_total')))
238
239     def cleanup_ovs_dpdk_env(self):
240         self.connection.execute(
241             'ovs-vsctl --if-exists del-br {0}'.format(MAIN_BRIDGE))
242         self.connection.execute("pkill -9 ovs")
243
244     def check_ovs_dpdk_env(self):
245         self.cleanup_ovs_dpdk_env()
246
247         version = self.ovs_properties.get("version", {})
248         ovs_ver = version.get("ovs", self.DEFAULT_OVS)
249         dpdk_ver = version.get("dpdk", "16.07.2").split('.')
250
251         supported_version = self.SUPPORTED_OVS_TO_DPDK_MAP.get(ovs_ver, None)
252         if supported_version is None or supported_version.split('.')[:2] != dpdk_ver[:2]:
253             raise exceptions.OVSUnsupportedVersion(
254                 ovs_version=ovs_ver,
255                 ovs_to_dpdk_map=self.SUPPORTED_OVS_TO_DPDK_MAP)
256
257         status = self.connection.execute("ovs-vsctl -V | grep -i '%s'" % ovs_ver)[0]
258         if status:
259             deploy = model.OvsDeploy(self.connection,
260                                      utils.get_nsb_option("bin_path"),
261                                      self.ovs_properties)
262             deploy.ovs_deploy()
263
264     def deploy(self):
265         """don't need to deploy"""
266
267         # Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.
268         if not self.vm_deploy:
269             return
270
271         self.connection = ssh.SSH.from_node(self.host_mgmt)
272
273         # Check dpdk/ovs version, if not present install
274         self.check_ovs_dpdk_env()
275         #    Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.
276         model.StandaloneContextHelper.install_req_libs(self.connection)
277         self.networks = model.StandaloneContextHelper.get_nic_details(
278             self.connection, self.networks, self.dpdk_devbind)
279
280         self.setup_ovs()
281         self.start_ovs_serverswitch()
282         self.setup_ovs_bridge_add_flows()
283         self.nodes = self.setup_ovs_dpdk_context()
284         LOG.debug("Waiting for VM to come up...")
285         self.nodes = model.StandaloneContextHelper.wait_for_vnfs_to_start(
286             self.connection, self.servers, self.nodes)
287
288     def undeploy(self):
289
290         if not self.vm_deploy:
291             return
292
293         # Cleanup the ovs installation...
294         self.cleanup_ovs_dpdk_env()
295
296         # Bind nics back to kernel
297         bind_cmd = "{dpdk_devbind} --force -b {driver} {port}"
298         for port in self.networks.values():
299             vpci = port.get("phy_port")
300             phy_driver = port.get("driver")
301             self.connection.execute(bind_cmd.format(
302                 dpdk_devbind=self.dpdk_devbind, driver=phy_driver, port=vpci))
303
304         # Todo: NFVi undeploy (sriov, vswitch, ovs etc) based on the config.
305         for vm in self.vm_names:
306             model.Libvirt.check_if_vm_exists_and_delete(vm, self.connection)
307
308     def _get_physical_nodes(self):
309         return self.nfvi_host
310
311     def _get_physical_node_for_server(self, server_name):
312         node_name, ctx_name = self.split_host_name(server_name)
313         if ctx_name is None or self.name != ctx_name:
314             return None
315
316         matching_nodes = [s for s in self.servers if s == node_name]
317         if len(matching_nodes) == 0:
318             return None
319
320         # self.nfvi_host always contain only one host
321         return "{}.{}".format(self.nfvi_host[0]["name"], self._name)
322
323     def _get_server(self, attr_name):
324         """lookup server info by name from context
325
326         Keyword arguments:
327         attr_name -- A name for a server listed in nodes config file
328         """
329         node_name, name = self.split_host_name(attr_name)
330         if name is None or self.name != name:
331             return None
332
333         matching_nodes = (n for n in self.nodes if n["name"] == node_name)
334         try:
335             # A clone is created in order to avoid affecting the
336             # original one.
337             node = dict(next(matching_nodes))
338         except StopIteration:
339             return None
340
341         try:
342             duplicate = next(matching_nodes)
343         except StopIteration:
344             pass
345         else:
346             raise ValueError("Duplicate nodes!!! Nodes: %s %s" % (node, duplicate))
347
348         node["name"] = attr_name
349         return node
350
351     def _get_network(self, attr_name):
352         if not isinstance(attr_name, collections.Mapping):
353             network = self.networks.get(attr_name)
354
355         else:
356             # Don't generalize too much  Just support vld_id
357             vld_id = attr_name.get('vld_id', {})
358             # for standalone context networks are dicts
359             iter1 = (n for n in self.networks.values() if n.get('vld_id') == vld_id)
360             network = next(iter1, None)
361
362         if network is None:
363             return None
364
365         result = {
366             # name is required
367             "name": network["name"],
368             "vld_id": network.get("vld_id"),
369             "segmentation_id": network.get("segmentation_id"),
370             "network_type": network.get("network_type"),
371             "physical_network": network.get("physical_network"),
372         }
373         return result
374
375     def configure_nics_for_ovs_dpdk(self):
376         portlist = collections.OrderedDict(self.networks)
377         for key in portlist:
378             mac = model.StandaloneContextHelper.get_mac_address()
379             portlist[key].update({'mac': mac})
380         self.networks = portlist
381         LOG.info("Ports %s", self.networks)
382
383     def _enable_interfaces(self, index, vfs, xml_str):
384         vpath = self.ovs_properties.get("vpath", "/usr/local")
385         queue = self.ovs_properties.get("queues", 1)
386         vf = self.networks[vfs[0]]
387         port_num = vf.get('port_num', 0)
388         vpci = utils.PciAddress(vf['vpci'].strip())
389         # Generate the vpci for the interfaces
390         slot = index + port_num + 10
391         vf['vpci'] = \
392             "{}:{}:{:02x}.{}".format(vpci.domain, vpci.bus, slot, vpci.function)
393         return model.Libvirt.add_ovs_interface(
394             vpath, port_num, vf['vpci'], vf['mac'], xml_str, queue)
395
396     def setup_ovs_dpdk_context(self):
397         nodes = []
398
399         self.configure_nics_for_ovs_dpdk()
400
401         hp_total_mb = int(self.vm_flavor.get('ram', '4096')) * len(self.servers)
402         common_utils.setup_hugepages(self.connection, hp_total_mb * 1024)
403
404         self._check_hugepages()
405
406         for index, (key, vnf) in enumerate(collections.OrderedDict(
407                 self.servers).items()):
408             cfg = '/tmp/vm_ovs_%d.xml' % index
409             vm_name = "vm-%d" % index
410             cdrom_img = "/var/lib/libvirt/images/cdrom-%d.img" % index
411
412             # 1. Check and delete VM if already exists
413             model.Libvirt.check_if_vm_exists_and_delete(vm_name,
414                                                         self.connection)
415             xml_str, mac = model.Libvirt.build_vm_xml(
416                 self.connection, self.vm_flavor, vm_name, index, cdrom_img)
417
418             # 2: Cleanup already available VMs
419             for vfs in [vfs for vfs_name, vfs in vnf["network_ports"].items()
420                         if vfs_name != 'mgmt']:
421                 xml_str = self._enable_interfaces(index, vfs, xml_str)
422
423             # copy xml to target...
424             model.Libvirt.write_file(cfg, xml_str)
425             self.connection.put(cfg, cfg)
426
427             node = self.vnf_node.generate_vnf_instance(self.vm_flavor,
428                                                        self.networks,
429                                                        self.host_mgmt.get('ip'),
430                                                        key, vnf, mac)
431             # Generate public/private keys if password or private key file is not provided
432             node = model.StandaloneContextHelper.check_update_key(self.connection,
433                                                                   node,
434                                                                   vm_name,
435                                                                   self.name,
436                                                                   cdrom_img,
437                                                                   mac)
438
439             # store vnf node details
440             nodes.append(node)
441
442             # NOTE: launch through libvirt
443             LOG.info("virsh create ...")
444             model.Libvirt.virsh_create_vm(self.connection, cfg)
445
446             self.vm_names.append(vm_name)
447
448         return nodes