Merge "Resolve NameError in test_utils.py"
[yardstick.git] / yardstick / benchmark / contexts / standalone / ovs_dpdk.py
index e990e46..e6a6f99 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from __future__ import absolute_import
-import os
-import logging
+import io
 import collections
+import logging
+import os
+import re
 import time
 
-from collections import OrderedDict
-
 from yardstick import ssh
 from yardstick.network_services.utils import get_nsb_option
-from yardstick.network_services.utils import provision_tool
 from yardstick.benchmark.contexts.base import Context
-from yardstick.benchmark.contexts.standalone.model import Libvirt
-from yardstick.benchmark.contexts.standalone.model import StandaloneContextHelper
-from yardstick.benchmark.contexts.standalone.model import Server
-from yardstick.benchmark.contexts.standalone.model import OvsDeploy
-from yardstick.network_services.utils import PciAddress
+from yardstick.benchmark.contexts.standalone import model
+from yardstick.common import exceptions
+from yardstick.network_services import utils
+
 
 LOG = logging.getLogger(__name__)
 
+MAIN_BRIDGE = 'br0'
+
 
 class OvsDpdkContext(Context):
     """ This class handles OVS standalone nodes - VM running on Non-Managed NFVi
@@ -50,32 +49,32 @@ class OvsDpdkContext(Context):
     }
 
     DEFAULT_OVS = '2.6.0'
-
-    PKILL_TEMPLATE = "pkill %s %s"
+    CMD_TIMEOUT = 30
+    DEFAULT_USER_PATH = '/usr/local'
 
     def __init__(self):
         self.file_path = None
         self.sriov = []
         self.first_run = True
-        self.dpdk_nic_bind = ""
+        self.dpdk_devbind = os.path.join(get_nsb_option('bin_path'),
+                                         'dpdk-devbind.py')
         self.vm_names = []
-        self.name = None
         self.nfvi_host = []
         self.nodes = []
         self.networks = {}
         self.attrs = {}
         self.vm_flavor = None
         self.servers = None
-        self.helper = StandaloneContextHelper()
-        self.vnf_node = Server()
+        self.helper = model.StandaloneContextHelper()
+        self.vnf_node = model.Server()
         self.ovs_properties = {}
         self.wait_for_vswitchd = 10
         super(OvsDpdkContext, self).__init__()
 
     def init(self, attrs):
         """initializes itself from the supplied arguments"""
+        super(OvsDpdkContext, self).init(attrs)
 
-        self.name = attrs["name"]
         self.file_path = attrs.get("file", "pod.yaml")
 
         self.nodes, self.nfvi_host, self.host_mgmt = \
@@ -94,34 +93,32 @@ class OvsDpdkContext(Context):
         LOG.debug("Networks: %r", self.networks)
 
     def setup_ovs(self):
-        vpath = self.ovs_properties.get("vpath", "/usr/local")
-        xargs_kill_cmd = self.PKILL_TEMPLATE % ('-9', 'ovs')
-
+        """Initialize OVS-DPDK"""
+        vpath = self.ovs_properties.get('vpath', self.DEFAULT_USER_PATH)
         create_from = os.path.join(vpath, 'etc/openvswitch/conf.db')
         create_to = os.path.join(vpath, 'share/openvswitch/vswitch.ovsschema')
 
         cmd_list = [
-            "chmod 0666 /dev/vfio/*",
-            "chmod a+x /dev/vfio",
-            "pkill -9 ovs",
-            xargs_kill_cmd,
-            "killall -r 'ovs*'",
-            "mkdir -p {0}/etc/openvswitch".format(vpath),
-            "mkdir -p {0}/var/run/openvswitch".format(vpath),
-            "rm {0}/etc/openvswitch/conf.db".format(vpath),
-            "ovsdb-tool create {0} {1}".format(create_from, create_to),
-            "modprobe vfio-pci",
-            "chmod a+x /dev/vfio",
-            "chmod 0666 /dev/vfio/*",
+            'killall -r "ovs.*" -q | true',
+            'mkdir -p {0}/etc/openvswitch'.format(vpath),
+            'mkdir -p {0}/var/run/openvswitch'.format(vpath),
+            'rm {0}/etc/openvswitch/conf.db | true'.format(vpath),
+            'ovsdb-tool create {0} {1}'.format(create_from, create_to),
+            'modprobe vfio-pci',
+            'chmod a+x /dev/vfio',
+            'chmod 0666 /dev/vfio/*',
         ]
+
+        bind_cmd = '%s --force -b vfio-pci {port}' % self.dpdk_devbind
+        for port in self.networks.values():
+            cmd_list.append(bind_cmd.format(port=port.get('phy_port')))
+
         for cmd in cmd_list:
-            self.connection.execute(cmd)
-        bind_cmd = "{dpdk_nic_bind} --force -b {driver} {port}"
-        phy_driver = "vfio-pci"
-        for key, port in self.networks.items():
-            vpci = port.get("phy_port")
-            self.connection.execute(bind_cmd.format(dpdk_nic_bind=self.dpdk_nic_bind,
-                                                    driver=phy_driver, port=vpci))
+            LOG.info(cmd)
+            exit_status, _, stderr = self.connection.execute(
+                cmd, timeout=self.CMD_TIMEOUT)
+            if exit_status:
+                raise exceptions.OVSSetupError(command=cmd, error=stderr)
 
     def start_ovs_serverswitch(self):
         vpath = self.ovs_properties.get("vpath")
@@ -129,19 +126,29 @@ class OvsDpdkContext(Context):
         ovs_sock_path = '/var/run/openvswitch/db.sock'
         log_path = '/var/log/openvswitch/ovs-vswitchd.log'
 
+        pmd_cpu_mask = self.ovs_properties.get("pmd_cpu_mask", '')
         pmd_mask = hex(sum(2 ** num for num in range(pmd_nums)) << 1)
+        if pmd_cpu_mask:
+            pmd_mask = pmd_cpu_mask
+
         socket0 = self.ovs_properties.get("ram", {}).get("socket_0", "2048")
         socket1 = self.ovs_properties.get("ram", {}).get("socket_1", "2048")
 
         ovs_other_config = "ovs-vsctl {0}set Open_vSwitch . other_config:{1}"
         detach_cmd = "ovs-vswitchd unix:{0}{1} --pidfile --detach --log-file={2}"
 
+        lcore_mask = self.ovs_properties.get("lcore_mask", '')
+        if lcore_mask:
+            lcore_mask = ovs_other_config.format("--no-wait ", "dpdk-lcore-mask='%s'" % lcore_mask)
+
         cmd_list = [
             "mkdir -p /usr/local/var/run/openvswitch",
+            "mkdir -p {}".format(os.path.dirname(log_path)),
             "ovsdb-server --remote=punix:/{0}/{1}  --pidfile --detach".format(vpath,
                                                                               ovs_sock_path),
             ovs_other_config.format("--no-wait ", "dpdk-init=true"),
             ovs_other_config.format("--no-wait ", "dpdk-socket-mem='%s,%s'" % (socket0, socket1)),
+            lcore_mask,
             detach_cmd.format(vpath, ovs_sock_path, log_path),
             ovs_other_config.format("", "pmd-cpu-mask=%s" % pmd_mask),
         ]
@@ -157,56 +164,78 @@ class OvsDpdkContext(Context):
         vpath = self.ovs_properties.get("vpath", "/usr/local")
         version = self.ovs_properties.get('version', {})
         ovs_ver = [int(x) for x in version.get('ovs', self.DEFAULT_OVS).split('.')]
-        ovs_add_port = \
-            "ovs-vsctl add-port {br} {port} -- set Interface {port} type={type_}{dpdk_args}"
-        ovs_add_queue = "ovs-vsctl set Interface {port} options:n_rxq={queue}"
-        chmod_vpath = "chmod 0777 {0}/var/run/openvswitch/dpdkvhostuser*"
-
-        cmd_dpdk_list = [
-            "ovs-vsctl del-br br0",
-            "rm -rf {0}/var/run/openvswitch/dpdkvhostuser*".format(vpath),
-            "ovs-vsctl add-br br0 -- set bridge br0 datapath_type=netdev",
+        ovs_add_port = ('ovs-vsctl add-port {br} {port} -- '
+                        'set Interface {port} type={type_}{dpdk_args}')
+        ovs_add_queue = 'ovs-vsctl set Interface {port} options:n_rxq={queue}'
+        chmod_vpath = 'chmod 0777 {0}/var/run/openvswitch/dpdkvhostuser*'
+
+        cmd_list = [
+            'ovs-vsctl --if-exists del-br {0}'.format(MAIN_BRIDGE),
+            'rm -rf {0}/var/run/openvswitch/dpdkvhostuser*'.format(vpath),
+            'ovs-vsctl add-br {0} -- set bridge {0} datapath_type=netdev'.
+            format(MAIN_BRIDGE)
         ]
 
-        ordered_network = OrderedDict(self.networks)
-        for index, (key, vnf) in enumerate(ordered_network.items()):
+        ordered_network = collections.OrderedDict(self.networks)
+        for index, vnf in enumerate(ordered_network.values()):
             if ovs_ver >= [2, 7, 0]:
                 dpdk_args = " options:dpdk-devargs=%s" % vnf.get("phy_port")
-            dpdk_list.append(ovs_add_port.format(br='br0', port='dpdk%s' % vnf.get("port_num", 0),
-                                                 type_='dpdk', dpdk_args=dpdk_args))
-            dpdk_list.append(ovs_add_queue.format(port='dpdk%s' % vnf.get("port_num", 0),
-                                                  queue=self.ovs_properties.get("queues", 1)))
+            dpdk_list.append(ovs_add_port.format(
+                br=MAIN_BRIDGE, port='dpdk%s' % vnf.get("port_num", 0),
+                type_='dpdk', dpdk_args=dpdk_args))
+            dpdk_list.append(ovs_add_queue.format(
+                port='dpdk%s' % vnf.get("port_num", 0),
+                queue=self.ovs_properties.get("queues", 1)))
 
         # Sorting the array to make sure we execute dpdk0... in the order
         list.sort(dpdk_list)
-        cmd_dpdk_list.extend(dpdk_list)
+        cmd_list.extend(dpdk_list)
 
         # Need to do two for loop to maintain the dpdk/vhost ports.
         for index, _ in enumerate(ordered_network):
-            cmd_dpdk_list.append(ovs_add_port.format(br='br0', port='dpdkvhostuser%s' % index,
-                                                     type_='dpdkvhostuser', dpdk_args=""))
-
-        for cmd in cmd_dpdk_list:
-            LOG.info(cmd)
-            self.connection.execute(cmd)
-
-        # Fixme: add flows code
-        ovs_flow = "ovs-ofctl add-flow br0 in_port=%s,action=output:%s"
+            cmd_list.append(ovs_add_port.format(
+                br=MAIN_BRIDGE, port='dpdkvhostuser%s' % index,
+                type_='dpdkvhostuser', dpdk_args=""))
 
+        ovs_flow = ("ovs-ofctl add-flow {0} in_port=%s,action=output:%s".
+                    format(MAIN_BRIDGE))
         network_count = len(ordered_network) + 1
         for in_port, out_port in zip(range(1, network_count),
                                      range(network_count, network_count * 2)):
-            self.connection.execute(ovs_flow % (in_port, out_port))
-            self.connection.execute(ovs_flow % (out_port, in_port))
+            cmd_list.append(ovs_flow % (in_port, out_port))
+            cmd_list.append(ovs_flow % (out_port, in_port))
+
+        cmd_list.append(chmod_vpath.format(vpath))
 
-        self.connection.execute(chmod_vpath.format(vpath))
+        for cmd in cmd_list:
+            LOG.info(cmd)
+            exit_status, _, stderr = self.connection.execute(
+                cmd, timeout=self.CMD_TIMEOUT)
+            if exit_status:
+                raise exceptions.OVSSetupError(command=cmd, error=stderr)
+
+    def _check_hugepages(self):
+        meminfo = io.BytesIO()
+        self.connection.get_file_obj('/proc/meminfo', meminfo)
+        regex = re.compile(r"HugePages_Total:\s+(?P<hp_total>\d+)[\n\r]"
+                           r"HugePages_Free:\s+(?P<hp_free>\d+)")
+        match = regex.search(meminfo.getvalue().decode('utf-8'))
+        if not match:
+            raise exceptions.OVSHugepagesInfoError()
+        if int(match.group('hp_total')) == 0:
+            raise exceptions.OVSHugepagesNotConfigured()
+        if int(match.group('hp_free')) == 0:
+            raise exceptions.OVSHugepagesZeroFree(
+                total_hugepages=int(match.group('hp_total')))
 
     def cleanup_ovs_dpdk_env(self):
-        self.connection.execute("ovs-vsctl del-br br0")
+        self.connection.execute(
+            'ovs-vsctl --if-exists del-br {0}'.format(MAIN_BRIDGE))
         self.connection.execute("pkill -9 ovs")
 
     def check_ovs_dpdk_env(self):
         self.cleanup_ovs_dpdk_env()
+        self._check_hugepages()
 
         version = self.ovs_properties.get("version", {})
         ovs_ver = version.get("ovs", self.DEFAULT_OVS)
@@ -214,13 +243,15 @@ class OvsDpdkContext(Context):
 
         supported_version = self.SUPPORTED_OVS_TO_DPDK_MAP.get(ovs_ver, None)
         if supported_version is None or supported_version.split('.')[:2] != dpdk_ver[:2]:
-            raise Exception("Unsupported ovs '{}'. Please check the config...".format(ovs_ver))
+            raise exceptions.OVSUnsupportedVersion(
+                ovs_version=ovs_ver,
+                ovs_to_dpdk_map=self.SUPPORTED_OVS_TO_DPDK_MAP)
 
         status = self.connection.execute("ovs-vsctl -V | grep -i '%s'" % ovs_ver)[0]
         if status:
-            deploy = OvsDeploy(self.connection,
-                               get_nsb_option("bin_path"),
-                               self.ovs_properties)
+            deploy = model.OvsDeploy(self.connection,
+                                     utils.get_nsb_option("bin_path"),
+                                     self.ovs_properties)
             deploy.ovs_deploy()
 
     def deploy(self):
@@ -231,26 +262,21 @@ class OvsDpdkContext(Context):
             return
 
         self.connection = ssh.SSH.from_node(self.host_mgmt)
-        self.dpdk_nic_bind = provision_tool(
-            self.connection,
-            os.path.join(get_nsb_option("bin_path"), "dpdk-devbind.py"))
 
         # Check dpdk/ovs version, if not present install
         self.check_ovs_dpdk_env()
         #    Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.
-        StandaloneContextHelper.install_req_libs(self.connection)
-        self.networks = StandaloneContextHelper.get_nic_details(self.connection,
-                                                                self.networks,
-                                                                self.dpdk_nic_bind)
+        model.StandaloneContextHelper.install_req_libs(self.connection)
+        self.networks = model.StandaloneContextHelper.get_nic_details(
+            self.connection, self.networks, self.dpdk_devbind)
 
         self.setup_ovs()
         self.start_ovs_serverswitch()
         self.setup_ovs_bridge_add_flows()
         self.nodes = self.setup_ovs_dpdk_context()
         LOG.debug("Waiting for VM to come up...")
-        self.nodes = StandaloneContextHelper.wait_for_vnfs_to_start(self.connection,
-                                                                    self.servers,
-                                                                    self.nodes)
+        self.nodes = model.StandaloneContextHelper.wait_for_vnfs_to_start(
+            self.connection, self.servers, self.nodes)
 
     def undeploy(self):
 
@@ -261,16 +287,31 @@ class OvsDpdkContext(Context):
         self.cleanup_ovs_dpdk_env()
 
         # Bind nics back to kernel
-        bind_cmd = "{dpdk_nic_bind} --force -b {driver} {port}"
-        for key, port in self.networks.items():
+        bind_cmd = "{dpdk_devbind} --force -b {driver} {port}"
+        for port in self.networks.values():
             vpci = port.get("phy_port")
             phy_driver = port.get("driver")
-            self.connection.execute(bind_cmd.format(dpdk_nic_bind=self.dpdk_nic_bind,
-                                                    driver=phy_driver, port=vpci))
+            self.connection.execute(bind_cmd.format(
+                dpdk_devbind=self.dpdk_devbind, driver=phy_driver, port=vpci))
 
         # Todo: NFVi undeploy (sriov, vswitch, ovs etc) based on the config.
         for vm in self.vm_names:
-            Libvirt.check_if_vm_exists_and_delete(vm, self.connection)
+            model.Libvirt.check_if_vm_exists_and_delete(vm, self.connection)
+
+    def _get_physical_nodes(self):
+        return self.nfvi_host
+
+    def _get_physical_node_for_server(self, server_name):
+        node_name, ctx_name = self.split_host_name(server_name)
+        if ctx_name is None or self.name != ctx_name:
+            return None
+
+        matching_nodes = [s for s in self.servers if s == node_name]
+        if len(matching_nodes) == 0:
+            return None
+
+        # self.nfvi_host always contain only one host
+        return "{}.{}".format(self.nfvi_host[0]["name"], self._name)
 
     def _get_server(self, attr_name):
         """lookup server info by name from context
@@ -278,7 +319,7 @@ class OvsDpdkContext(Context):
         Keyword arguments:
         attr_name -- A name for a server listed in nodes config file
         """
-        node_name, name = self.split_name(attr_name)
+        node_name, name = self.split_host_name(attr_name)
         if name is None or self.name != name:
             return None
 
@@ -295,8 +336,7 @@ class OvsDpdkContext(Context):
         except StopIteration:
             pass
         else:
-            raise ValueError("Duplicate nodes!!! Nodes: %s %s",
-                             (node, duplicate))
+            raise ValueError("Duplicate nodes!!! Nodes: %s %s" % (node, duplicate))
 
         node["name"] = attr_name
         return node
@@ -326,49 +366,53 @@ class OvsDpdkContext(Context):
         return result
 
     def configure_nics_for_ovs_dpdk(self):
-        portlist = OrderedDict(self.networks)
-        for key, ports in portlist.items():
-            mac = StandaloneContextHelper.get_mac_address()
+        portlist = collections.OrderedDict(self.networks)
+        for key in portlist:
+            mac = model.StandaloneContextHelper.get_mac_address()
             portlist[key].update({'mac': mac})
         self.networks = portlist
-        LOG.info("Ports %s" % self.networks)
+        LOG.info("Ports %s", self.networks)
 
-    def _enable_interfaces(self, index, vfs, cfg):
+    def _enable_interfaces(self, index, vfs, xml_str):
         vpath = self.ovs_properties.get("vpath", "/usr/local")
         vf = self.networks[vfs[0]]
         port_num = vf.get('port_num', 0)
-        vpci = PciAddress.parse_address(vf['vpci'].strip(), multi_line=True)
+        vpci = utils.PciAddress(vf['vpci'].strip())
         # Generate the vpci for the interfaces
         slot = index + port_num + 10
         vf['vpci'] = \
             "{}:{}:{:02x}.{}".format(vpci.domain, vpci.bus, slot, vpci.function)
-        Libvirt.add_ovs_interface(vpath, port_num, vf['vpci'], vf['mac'], str(cfg))
+        return model.Libvirt.add_ovs_interface(
+            vpath, port_num, vf['vpci'], vf['mac'], xml_str)
 
     def setup_ovs_dpdk_context(self):
         nodes = []
 
         self.configure_nics_for_ovs_dpdk()
 
-        for index, (key, vnf) in enumerate(OrderedDict(self.servers).items()):
+        for index, (key, vnf) in enumerate(collections.OrderedDict(
+                self.servers).items()):
             cfg = '/tmp/vm_ovs_%d.xml' % index
             vm_name = "vm_%d" % index
 
             # 1. Check and delete VM if already exists
-            Libvirt.check_if_vm_exists_and_delete(vm_name, self.connection)
+            model.Libvirt.check_if_vm_exists_and_delete(vm_name,
+                                                        self.connection)
+            xml_str, mac = model.Libvirt.build_vm_xml(
+                self.connection, self.vm_flavor, vm_name, index)
 
-            vcpu, mac = Libvirt.build_vm_xml(self.connection, self.vm_flavor, cfg, vm_name, index)
             # 2: Cleanup already available VMs
-            for idx, (vkey, vfs) in enumerate(OrderedDict(vnf["network_ports"]).items()):
-                if vkey == "mgmt":
-                    continue
-                self._enable_interfaces(index, vfs, cfg)
+            for vfs in [vfs for vfs_name, vfs in vnf["network_ports"].items()
+                        if vfs_name != 'mgmt']:
+                xml_str = self._enable_interfaces(index, vfs, xml_str)
 
             # copy xml to target...
+            model.Libvirt.write_file(cfg, xml_str)
             self.connection.put(cfg, cfg)
 
-            #    FIXME: launch through libvirt
+            # NOTE: launch through libvirt
             LOG.info("virsh create ...")
-            Libvirt.virsh_create_vm(self.connection, cfg)
+            model.Libvirt.virsh_create_vm(self.connection, cfg)
 
             self.vm_names.append(vm_name)