Merge "Cleanup OpenStack utils test cases"
[yardstick.git] / yardstick / benchmark / contexts / standalone / ovs_dpdk.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import collections
17 import logging
18 import os
19 import re
20 import time
21
22 from yardstick import ssh
23 from yardstick.network_services.utils import get_nsb_option
24 from yardstick.benchmark.contexts.base import Context
25 from yardstick.benchmark.contexts.standalone import model
26 from yardstick.common import exceptions
27 from yardstick.network_services import utils
28
29
30 LOG = logging.getLogger(__name__)
31
32 MAIN_BRIDGE = 'br0'
33
34
35 class OvsDpdkContext(Context):
36     """ This class handles OVS standalone nodes - VM running on Non-Managed NFVi
37     Configuration: ovs_dpdk
38     """
39
40     __context_type__ = "StandaloneOvsDpdk"
41
42     SUPPORTED_OVS_TO_DPDK_MAP = {
43         '2.6.0': '16.07.1',
44         '2.6.1': '16.07.2',
45         '2.7.0': '16.11.1',
46         '2.7.1': '16.11.2',
47         '2.7.2': '16.11.3',
48         '2.8.0': '17.05.2'
49     }
50
51     DEFAULT_OVS = '2.6.0'
52     CMD_TIMEOUT = 30
53     DEFAULT_USER_PATH = '/usr/local'
54
55     def __init__(self):
56         self.file_path = None
57         self.sriov = []
58         self.first_run = True
59         self.dpdk_devbind = os.path.join(get_nsb_option('bin_path'),
60                                          'dpdk-devbind.py')
61         self.vm_names = []
62         self.nfvi_host = []
63         self.nodes = []
64         self.networks = {}
65         self.attrs = {}
66         self.vm_flavor = None
67         self.servers = None
68         self.helper = model.StandaloneContextHelper()
69         self.vnf_node = model.Server()
70         self.ovs_properties = {}
71         self.wait_for_vswitchd = 10
72         super(OvsDpdkContext, self).__init__()
73
74     def init(self, attrs):
75         """initializes itself from the supplied arguments"""
76         super(OvsDpdkContext, self).init(attrs)
77
78         self.file_path = attrs.get("file", "pod.yaml")
79
80         self.nodes, self.nfvi_host, self.host_mgmt = \
81             self.helper.parse_pod_file(self.file_path, 'OvsDpdk')
82
83         self.attrs = attrs
84         self.vm_flavor = attrs.get('flavor', {})
85         self.servers = attrs.get('servers', {})
86         self.vm_deploy = attrs.get("vm_deploy", True)
87         self.ovs_properties = attrs.get('ovs_properties', {})
88         # add optional static network definition
89         self.networks = attrs.get("networks", {})
90
91         LOG.debug("Nodes: %r", self.nodes)
92         LOG.debug("NFVi Node: %r", self.nfvi_host)
93         LOG.debug("Networks: %r", self.networks)
94
95     def setup_ovs(self):
96         """Initialize OVS-DPDK"""
97         vpath = self.ovs_properties.get('vpath', self.DEFAULT_USER_PATH)
98         create_from = os.path.join(vpath, 'etc/openvswitch/conf.db')
99         create_to = os.path.join(vpath, 'share/openvswitch/vswitch.ovsschema')
100
101         cmd_list = [
102             'killall -r "ovs.*" -q | true',
103             'mkdir -p {0}/etc/openvswitch'.format(vpath),
104             'mkdir -p {0}/var/run/openvswitch'.format(vpath),
105             'rm {0}/etc/openvswitch/conf.db | true'.format(vpath),
106             'ovsdb-tool create {0} {1}'.format(create_from, create_to),
107             'modprobe vfio-pci',
108             'chmod a+x /dev/vfio',
109             'chmod 0666 /dev/vfio/*',
110         ]
111
112         bind_cmd = '%s --force -b vfio-pci {port}' % self.dpdk_devbind
113         for port in self.networks.values():
114             cmd_list.append(bind_cmd.format(port=port.get('phy_port')))
115
116         for cmd in cmd_list:
117             LOG.info(cmd)
118             exit_status, _, stderr = self.connection.execute(
119                 cmd, timeout=self.CMD_TIMEOUT)
120             if exit_status:
121                 raise exceptions.OVSSetupError(command=cmd, error=stderr)
122
123     def start_ovs_serverswitch(self):
124         vpath = self.ovs_properties.get("vpath")
125         pmd_nums = int(self.ovs_properties.get("pmd_threads", 2))
126         ovs_sock_path = '/var/run/openvswitch/db.sock'
127         log_path = '/var/log/openvswitch/ovs-vswitchd.log'
128
129         pmd_cpu_mask = self.ovs_properties.get("pmd_cpu_mask", '')
130         pmd_mask = hex(sum(2 ** num for num in range(pmd_nums)) << 1)
131         if pmd_cpu_mask:
132             pmd_mask = pmd_cpu_mask
133
134         socket0 = self.ovs_properties.get("ram", {}).get("socket_0", "2048")
135         socket1 = self.ovs_properties.get("ram", {}).get("socket_1", "2048")
136
137         ovs_other_config = "ovs-vsctl {0}set Open_vSwitch . other_config:{1}"
138         detach_cmd = "ovs-vswitchd unix:{0}{1} --pidfile --detach --log-file={2}"
139
140         lcore_mask = self.ovs_properties.get("lcore_mask", '')
141         if lcore_mask:
142             lcore_mask = ovs_other_config.format("--no-wait ", "dpdk-lcore-mask='%s'" % lcore_mask)
143
144         cmd_list = [
145             "mkdir -p /usr/local/var/run/openvswitch",
146             "mkdir -p {}".format(os.path.dirname(log_path)),
147             "ovsdb-server --remote=punix:/{0}/{1}  --pidfile --detach".format(vpath,
148                                                                               ovs_sock_path),
149             ovs_other_config.format("--no-wait ", "dpdk-init=true"),
150             ovs_other_config.format("--no-wait ", "dpdk-socket-mem='%s,%s'" % (socket0, socket1)),
151             lcore_mask,
152             detach_cmd.format(vpath, ovs_sock_path, log_path),
153             ovs_other_config.format("", "pmd-cpu-mask=%s" % pmd_mask),
154         ]
155
156         for cmd in cmd_list:
157             LOG.info(cmd)
158             self.connection.execute(cmd)
159         time.sleep(self.wait_for_vswitchd)
160
161     def setup_ovs_bridge_add_flows(self):
162         dpdk_args = ""
163         dpdk_list = []
164         vpath = self.ovs_properties.get("vpath", "/usr/local")
165         version = self.ovs_properties.get('version', {})
166         ovs_ver = [int(x) for x in version.get('ovs', self.DEFAULT_OVS).split('.')]
167         ovs_add_port = ('ovs-vsctl add-port {br} {port} -- '
168                         'set Interface {port} type={type_}{dpdk_args}')
169         ovs_add_queue = 'ovs-vsctl set Interface {port} options:n_rxq={queue}'
170         chmod_vpath = 'chmod 0777 {0}/var/run/openvswitch/dpdkvhostuser*'
171
172         cmd_list = [
173             'ovs-vsctl --if-exists del-br {0}'.format(MAIN_BRIDGE),
174             'rm -rf {0}/var/run/openvswitch/dpdkvhostuser*'.format(vpath),
175             'ovs-vsctl add-br {0} -- set bridge {0} datapath_type=netdev'.
176             format(MAIN_BRIDGE)
177         ]
178
179         ordered_network = collections.OrderedDict(self.networks)
180         for index, vnf in enumerate(ordered_network.values()):
181             if ovs_ver >= [2, 7, 0]:
182                 dpdk_args = " options:dpdk-devargs=%s" % vnf.get("phy_port")
183             dpdk_list.append(ovs_add_port.format(
184                 br=MAIN_BRIDGE, port='dpdk%s' % vnf.get("port_num", 0),
185                 type_='dpdk', dpdk_args=dpdk_args))
186             dpdk_list.append(ovs_add_queue.format(
187                 port='dpdk%s' % vnf.get("port_num", 0),
188                 queue=self.ovs_properties.get("queues", 1)))
189
190         # Sorting the array to make sure we execute dpdk0... in the order
191         list.sort(dpdk_list)
192         cmd_list.extend(dpdk_list)
193
194         # Need to do two for loop to maintain the dpdk/vhost ports.
195         for index, _ in enumerate(ordered_network):
196             cmd_list.append(ovs_add_port.format(
197                 br=MAIN_BRIDGE, port='dpdkvhostuser%s' % index,
198                 type_='dpdkvhostuser', dpdk_args=""))
199
200         ovs_flow = ("ovs-ofctl add-flow {0} in_port=%s,action=output:%s".
201                     format(MAIN_BRIDGE))
202         network_count = len(ordered_network) + 1
203         for in_port, out_port in zip(range(1, network_count),
204                                      range(network_count, network_count * 2)):
205             cmd_list.append(ovs_flow % (in_port, out_port))
206             cmd_list.append(ovs_flow % (out_port, in_port))
207
208         cmd_list.append(chmod_vpath.format(vpath))
209
210         for cmd in cmd_list:
211             LOG.info(cmd)
212             exit_status, _, stderr = self.connection.execute(
213                 cmd, timeout=self.CMD_TIMEOUT)
214             if exit_status:
215                 raise exceptions.OVSSetupError(command=cmd, error=stderr)
216
217     def _check_hugepages(self):
218         meminfo = io.BytesIO()
219         self.connection.get_file_obj('/proc/meminfo', meminfo)
220         regex = re.compile(r"HugePages_Total:\s+(?P<hp_total>\d+)[\n\r]"
221                            r"HugePages_Free:\s+(?P<hp_free>\d+)")
222         match = regex.search(meminfo.getvalue().decode('utf-8'))
223         if not match:
224             raise exceptions.OVSHugepagesInfoError()
225         if int(match.group('hp_total')) == 0:
226             raise exceptions.OVSHugepagesNotConfigured()
227         if int(match.group('hp_free')) == 0:
228             raise exceptions.OVSHugepagesZeroFree(
229                 total_hugepages=int(match.group('hp_total')))
230
231     def cleanup_ovs_dpdk_env(self):
232         self.connection.execute(
233             'ovs-vsctl --if-exists del-br {0}'.format(MAIN_BRIDGE))
234         self.connection.execute("pkill -9 ovs")
235
236     def check_ovs_dpdk_env(self):
237         self.cleanup_ovs_dpdk_env()
238         self._check_hugepages()
239
240         version = self.ovs_properties.get("version", {})
241         ovs_ver = version.get("ovs", self.DEFAULT_OVS)
242         dpdk_ver = version.get("dpdk", "16.07.2").split('.')
243
244         supported_version = self.SUPPORTED_OVS_TO_DPDK_MAP.get(ovs_ver, None)
245         if supported_version is None or supported_version.split('.')[:2] != dpdk_ver[:2]:
246             raise exceptions.OVSUnsupportedVersion(
247                 ovs_version=ovs_ver,
248                 ovs_to_dpdk_map=self.SUPPORTED_OVS_TO_DPDK_MAP)
249
250         status = self.connection.execute("ovs-vsctl -V | grep -i '%s'" % ovs_ver)[0]
251         if status:
252             deploy = model.OvsDeploy(self.connection,
253                                      utils.get_nsb_option("bin_path"),
254                                      self.ovs_properties)
255             deploy.ovs_deploy()
256
257     def deploy(self):
258         """don't need to deploy"""
259
260         # Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.
261         if not self.vm_deploy:
262             return
263
264         self.connection = ssh.SSH.from_node(self.host_mgmt)
265
266         # Check dpdk/ovs version, if not present install
267         self.check_ovs_dpdk_env()
268         #    Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.
269         model.StandaloneContextHelper.install_req_libs(self.connection)
270         self.networks = model.StandaloneContextHelper.get_nic_details(
271             self.connection, self.networks, self.dpdk_devbind)
272
273         self.setup_ovs()
274         self.start_ovs_serverswitch()
275         self.setup_ovs_bridge_add_flows()
276         self.nodes = self.setup_ovs_dpdk_context()
277         LOG.debug("Waiting for VM to come up...")
278         self.nodes = model.StandaloneContextHelper.wait_for_vnfs_to_start(
279             self.connection, self.servers, self.nodes)
280
281     def undeploy(self):
282
283         if not self.vm_deploy:
284             return
285
286         # Cleanup the ovs installation...
287         self.cleanup_ovs_dpdk_env()
288
289         # Bind nics back to kernel
290         bind_cmd = "{dpdk_devbind} --force -b {driver} {port}"
291         for port in self.networks.values():
292             vpci = port.get("phy_port")
293             phy_driver = port.get("driver")
294             self.connection.execute(bind_cmd.format(
295                 dpdk_devbind=self.dpdk_devbind, driver=phy_driver, port=vpci))
296
297         # Todo: NFVi undeploy (sriov, vswitch, ovs etc) based on the config.
298         for vm in self.vm_names:
299             model.Libvirt.check_if_vm_exists_and_delete(vm, self.connection)
300
301     def _get_physical_nodes(self):
302         return self.nfvi_host
303
304     def _get_physical_node_for_server(self, server_name):
305         node_name, ctx_name = self.split_host_name(server_name)
306         if ctx_name is None or self.name != ctx_name:
307             return None
308
309         matching_nodes = [s for s in self.servers if s == node_name]
310         if len(matching_nodes) == 0:
311             return None
312
313         # self.nfvi_host always contain only one host
314         return "{}.{}".format(self.nfvi_host[0]["name"], self._name)
315
316     def _get_server(self, attr_name):
317         """lookup server info by name from context
318
319         Keyword arguments:
320         attr_name -- A name for a server listed in nodes config file
321         """
322         node_name, name = self.split_host_name(attr_name)
323         if name is None or self.name != name:
324             return None
325
326         matching_nodes = (n for n in self.nodes if n["name"] == node_name)
327         try:
328             # A clone is created in order to avoid affecting the
329             # original one.
330             node = dict(next(matching_nodes))
331         except StopIteration:
332             return None
333
334         try:
335             duplicate = next(matching_nodes)
336         except StopIteration:
337             pass
338         else:
339             raise ValueError("Duplicate nodes!!! Nodes: %s %s" % (node, duplicate))
340
341         node["name"] = attr_name
342         return node
343
344     def _get_network(self, attr_name):
345         if not isinstance(attr_name, collections.Mapping):
346             network = self.networks.get(attr_name)
347
348         else:
349             # Don't generalize too much  Just support vld_id
350             vld_id = attr_name.get('vld_id', {})
351             # for standalone context networks are dicts
352             iter1 = (n for n in self.networks.values() if n.get('vld_id') == vld_id)
353             network = next(iter1, None)
354
355         if network is None:
356             return None
357
358         result = {
359             # name is required
360             "name": network["name"],
361             "vld_id": network.get("vld_id"),
362             "segmentation_id": network.get("segmentation_id"),
363             "network_type": network.get("network_type"),
364             "physical_network": network.get("physical_network"),
365         }
366         return result
367
368     def configure_nics_for_ovs_dpdk(self):
369         portlist = collections.OrderedDict(self.networks)
370         for key in portlist:
371             mac = model.StandaloneContextHelper.get_mac_address()
372             portlist[key].update({'mac': mac})
373         self.networks = portlist
374         LOG.info("Ports %s", self.networks)
375
376     def _enable_interfaces(self, index, vfs, xml_str):
377         vpath = self.ovs_properties.get("vpath", "/usr/local")
378         vf = self.networks[vfs[0]]
379         port_num = vf.get('port_num', 0)
380         vpci = utils.PciAddress(vf['vpci'].strip())
381         # Generate the vpci for the interfaces
382         slot = index + port_num + 10
383         vf['vpci'] = \
384             "{}:{}:{:02x}.{}".format(vpci.domain, vpci.bus, slot, vpci.function)
385         return model.Libvirt.add_ovs_interface(
386             vpath, port_num, vf['vpci'], vf['mac'], xml_str)
387
388     def setup_ovs_dpdk_context(self):
389         nodes = []
390
391         self.configure_nics_for_ovs_dpdk()
392
393         for index, (key, vnf) in enumerate(collections.OrderedDict(
394                 self.servers).items()):
395             cfg = '/tmp/vm_ovs_%d.xml' % index
396             vm_name = "vm_%d" % index
397
398             # 1. Check and delete VM if already exists
399             model.Libvirt.check_if_vm_exists_and_delete(vm_name,
400                                                         self.connection)
401             xml_str, mac = model.Libvirt.build_vm_xml(
402                 self.connection, self.vm_flavor, vm_name, index)
403
404             # 2: Cleanup already available VMs
405             for vfs in [vfs for vfs_name, vfs in vnf["network_ports"].items()
406                         if vfs_name != 'mgmt']:
407                 xml_str = self._enable_interfaces(index, vfs, xml_str)
408
409             # copy xml to target...
410             model.Libvirt.write_file(cfg, xml_str)
411             self.connection.put(cfg, cfg)
412
413             # NOTE: launch through libvirt
414             LOG.info("virsh create ...")
415             model.Libvirt.virsh_create_vm(self.connection, cfg)
416
417             self.vm_names.append(vm_name)
418
419             # build vnf node details
420             nodes.append(self.vnf_node.generate_vnf_instance(self.vm_flavor,
421                                                              self.networks,
422                                                              self.host_mgmt.get('ip'),
423                                                              key, vnf, mac))
424
425         return nodes