NSB: Fix standalone.model.Libvirt SR-IOV modeling
[yardstick.git] / yardstick / benchmark / contexts / standalone / model.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16 import os
17 import re
18 import time
19 import uuid
20 import random
21 import logging
22 import errno
23
24 from netaddr import IPNetwork
25 import xml.etree.ElementTree as ET
26
27 from yardstick import ssh
28 from yardstick.common.constants import YARDSTICK_ROOT_PATH
29 from yardstick.common.yaml_loader import yaml_load
30 from yardstick.network_services.utils import PciAddress
31 from yardstick.network_services.helpers.cpu import CpuSysCores
32 from yardstick.common.utils import write_file
33
34 LOG = logging.getLogger(__name__)
35
36 VM_TEMPLATE = """
37 <domain type="kvm">
38  <name>{vm_name}</name>
39   <uuid>{random_uuid}</uuid>
40   <memory unit="MB">{memory}</memory>
41   <currentMemory unit="MB">{memory}</currentMemory>
42   <memoryBacking>
43     <hugepages />
44   </memoryBacking>
45   <vcpu cpuset='{cpuset}'>{vcpu}</vcpu>
46   <os>
47     <type arch="x86_64" machine="pc-i440fx-utopic">hvm</type>
48     <boot dev="hd" />
49   </os>
50   <features>
51     <acpi />
52     <apic />
53     <pae />
54   </features>
55   <cpu mode='host-passthrough'>
56     <topology cores="{cpu}" sockets="{socket}" threads="{threads}" />
57     <numa>
58        <cell id='0' cpus='{numa_cpus}' memory='{memory}' unit='MB' memAccess='shared'/>
59     </numa>
60   </cpu>
61   <clock offset="utc">
62     <timer name="rtc" tickpolicy="catchup" />
63     <timer name="pit" tickpolicy="delay" />
64     <timer name="hpet" present="no" />
65   </clock>
66   <on_poweroff>destroy</on_poweroff>
67   <on_reboot>restart</on_reboot>
68   <on_crash>restart</on_crash>
69   <devices>
70     <emulator>/usr/bin/kvm-spice</emulator>
71     <disk device="disk" type="file">
72       <driver name="qemu" type="qcow2" />
73       <source file="{vm_image}"/>
74       <target bus="virtio" dev="vda" />
75     </disk>
76     <graphics autoport="yes" listen="0.0.0.0" port="-1" type="vnc" />
77     <interface type="bridge">
78       <mac address='{mac_addr}'/>
79       <source bridge="br-int" />
80       <model type='virtio'/>
81     </interface>
82    </devices>
83 </domain>
84 """
85 WAIT_FOR_BOOT = 30
86
87
88 class Libvirt(object):
89     """ This class handles all the libvirt updates to lauch VM
90     """
91
92     @staticmethod
93     def check_if_vm_exists_and_delete(vm_name, connection):
94         cmd_template = "virsh list --name | grep -i %s"
95         status = connection.execute(cmd_template % vm_name)[0]
96         if status == 0:
97             LOG.info("VM '%s' is already present... destroying", vm_name)
98             connection.execute("virsh destroy %s" % vm_name)
99
100     @staticmethod
101     def virsh_create_vm(connection, cfg):
102         err = connection.execute("virsh create %s" % cfg)[0]
103         LOG.info("VM create status: %s", err)
104
105     @staticmethod
106     def virsh_destroy_vm(vm_name, connection):
107         connection.execute("virsh destroy %s" % vm_name)
108
109     @staticmethod
110     def _add_interface_address(interface, pci_address):
111         """Add a PCI 'address' XML node
112
113         <address type='pci' domain='0x0000' bus='0x00' slot='0x08'
114          function='0x0'/>
115
116         Refence: https://software.intel.com/en-us/articles/
117                  configure-sr-iov-network-virtual-functions-in-linux-kvm
118         """
119         vm_pci = ET.SubElement(interface, 'address')
120         vm_pci.set('type', 'pci')
121         vm_pci.set('domain', '0x{}'.format(pci_address.domain))
122         vm_pci.set('bus', '0x{}'.format(pci_address.bus))
123         vm_pci.set('slot', '0x{}'.format(pci_address.slot))
124         vm_pci.set('function', '0x{}'.format(pci_address.function))
125         return vm_pci
126
127     @classmethod
128     def add_ovs_interface(cls, vpath, port_num, vpci, vports_mac, xml):
129         """Add a DPDK OVS 'interface' XML node in 'devices' node
130
131         <devices>
132             <interface type='vhostuser'>
133                 <mac address='00:00:00:00:00:01'/>
134                 <source type='unix' path='/usr/local/var/run/openvswitch/
135                  dpdkvhostuser0' mode='client'/>
136                 <model type='virtio'/>
137                 <driver queues='4'>
138                     <host mrg_rxbuf='off'/>
139                 </driver>
140                 <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
141                  function='0x0'/>
142             </interface>
143             ...
144         </devices>
145
146         Reference: http://docs.openvswitch.org/en/latest/topics/dpdk/
147                    vhost-user/
148         """
149
150         vhost_path = ('{0}/var/run/openvswitch/dpdkvhostuser{1}'.
151                       format(vpath, port_num))
152         root = ET.parse(xml)
153         pci_address = PciAddress(vpci.strip())
154         device = root.find('devices')
155
156         interface = ET.SubElement(device, 'interface')
157         interface.set('type', 'vhostuser')
158         mac = ET.SubElement(interface, 'mac')
159         mac.set('address', vports_mac)
160
161         source = ET.SubElement(interface, 'source')
162         source.set('type', 'unix')
163         source.set('path', vhost_path)
164         source.set('mode', 'client')
165
166         model = ET.SubElement(interface, 'model')
167         model.set('type', 'virtio')
168
169         driver = ET.SubElement(interface, 'driver')
170         driver.set('queues', '4')
171
172         host = ET.SubElement(driver, 'host')
173         host.set('mrg_rxbuf', 'off')
174
175         cls._add_interface_address(interface, pci_address)
176
177         root.write(xml)
178
179     @classmethod
180     def add_sriov_interfaces(cls, vm_pci, vf_pci, vf_mac, xml):
181         """Add a SR-IOV 'interface' XML node in 'devices' node
182
183         <devices>
184            <interface type='hostdev' managed='yes'>
185              <source>
186                <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
187                 function='0x0'/>
188              </source>
189              <mac address='52:54:00:6d:90:02'>
190              <address type='pci' domain='0x0000' bus='0x02' slot='0x04'
191               function='0x1'/>
192            </interface>
193            ...
194          </devices>
195
196         Reference: https://access.redhat.com/documentation/en-us/
197             red_hat_enterprise_linux/6/html/
198             virtualization_host_configuration_and_guest_installation_guide/
199             sect-virtualization_host_configuration_and_guest_installation_guide
200             -sr_iov-how_sr_iov_libvirt_works
201         """
202
203         root = ET.parse(xml)
204         device = root.find('devices')
205
206         interface = ET.SubElement(device, 'interface')
207         interface.set('managed', 'yes')
208         interface.set('type', 'hostdev')
209
210         mac = ET.SubElement(interface, 'mac')
211         mac.set('address', vf_mac)
212
213         source = ET.SubElement(interface, 'source')
214         addr = ET.SubElement(source, 'address')
215         pci_address = PciAddress(vf_pci.strip())
216         cls._add_interface_address(addr, pci_address)
217
218         pci_vm_address = PciAddress(vm_pci.strip())
219         cls._add_interface_address(interface, pci_vm_address)
220
221         root.write(xml)
222
223     @staticmethod
224     def create_snapshot_qemu(connection, index, vm_image):
225         # build snapshot image
226         image = "/var/lib/libvirt/images/%s.qcow2" % index
227         connection.execute("rm %s" % image)
228         qemu_template = "qemu-img create -f qcow2 -o backing_file=%s %s"
229         connection.execute(qemu_template % (vm_image, image))
230
231         return image
232
233     @classmethod
234     def build_vm_xml(cls, connection, flavor, cfg, vm_name, index):
235         memory = flavor.get('ram', '4096')
236         extra_spec = flavor.get('extra_specs', {})
237         cpu = extra_spec.get('hw:cpu_cores', '2')
238         socket = extra_spec.get('hw:cpu_sockets', '1')
239         threads = extra_spec.get('hw:cpu_threads', '2')
240         vcpu = int(cpu) * int(threads)
241         numa_cpus = '0-%s' % (vcpu - 1)
242         hw_socket = flavor.get('hw_socket', '0')
243         cpuset = Libvirt.pin_vcpu_for_perf(connection, hw_socket)
244
245         mac = StandaloneContextHelper.get_mac_address(0x00)
246         image = cls.create_snapshot_qemu(connection, index,
247                                          flavor.get("images", None))
248         vm_xml = VM_TEMPLATE.format(
249             vm_name=vm_name,
250             random_uuid=uuid.uuid4(),
251             mac_addr=mac,
252             memory=memory, vcpu=vcpu, cpu=cpu,
253             numa_cpus=numa_cpus,
254             socket=socket, threads=threads,
255             vm_image=image, cpuset=cpuset)
256
257         write_file(cfg, vm_xml)
258
259         return [vcpu, mac]
260
261     @staticmethod
262     def update_interrupts_hugepages_perf(connection):
263         connection.execute("echo 1 > /sys/module/kvm/parameters/allow_unsafe_assigned_interrupts")
264         connection.execute("echo never > /sys/kernel/mm/transparent_hugepage/enabled")
265
266     @classmethod
267     def pin_vcpu_for_perf(cls, connection, socket='0'):
268         threads = ""
269         sys_obj = CpuSysCores(connection)
270         soc_cpu = sys_obj.get_core_socket()
271         sys_cpu = int(soc_cpu["cores_per_socket"])
272         cores = "%s-%s" % (soc_cpu[socket][0], soc_cpu[socket][sys_cpu - 1])
273         if int(soc_cpu["thread_per_core"]) > 1:
274             threads = "%s-%s" % (soc_cpu[socket][sys_cpu], soc_cpu[socket][-1])
275         cpuset = "%s,%s" % (cores, threads)
276         return cpuset
277
278
279 class StandaloneContextHelper(object):
280     """ This class handles all the common code for standalone
281     """
282     def __init__(self):
283         self.file_path = None
284         super(StandaloneContextHelper, self).__init__()
285
286     @staticmethod
287     def install_req_libs(connection, extra_pkgs=[]):
288         pkgs = ["qemu-kvm", "libvirt-bin", "bridge-utils", "numactl", "fping"]
289         pkgs.extend(extra_pkgs)
290         cmd_template = "dpkg-query -W --showformat='${Status}\\n' \"%s\"|grep 'ok installed'"
291         for pkg in pkgs:
292             if connection.execute(cmd_template % pkg)[0]:
293                 connection.execute("apt-get update")
294                 connection.execute("apt-get -y install %s" % pkg)
295
296     @staticmethod
297     def get_kernel_module(connection, pci, driver):
298         if not driver:
299             out = connection.execute("lspci -k -s %s" % pci)[1]
300             driver = out.split("Kernel modules:").pop().strip()
301         return driver
302
303     @classmethod
304     def get_nic_details(cls, connection, networks, dpdk_nic_bind):
305         for key, ports in networks.items():
306             if key == "mgmt":
307                 continue
308
309             phy_ports = ports['phy_port']
310             phy_driver = ports.get('phy_driver', None)
311             driver = cls.get_kernel_module(connection, phy_ports, phy_driver)
312
313             # Make sure that ports are bound to kernel drivers e.g. i40e/ixgbe
314             bind_cmd = "{dpdk_nic_bind} --force -b {driver} {port}"
315             lshw_cmd = "lshw -c network -businfo | grep '{port}'"
316             link_show_cmd = "ip -s link show {interface}"
317
318             cmd = bind_cmd.format(dpdk_nic_bind=dpdk_nic_bind,
319                                   driver=driver, port=ports['phy_port'])
320             connection.execute(cmd)
321
322             out = connection.execute(lshw_cmd.format(port=phy_ports))[1]
323             interface = out.split()[1]
324
325             connection.execute(link_show_cmd.format(interface=interface))
326
327             ports.update({
328                 'interface': str(interface),
329                 'driver': driver
330             })
331         LOG.info(networks)
332
333         return networks
334
335     @staticmethod
336     def get_virtual_devices(connection, pci):
337         cmd = "cat /sys/bus/pci/devices/{0}/virtfn0/uevent"
338         output = connection.execute(cmd.format(pci))[1]
339
340         pattern = "PCI_SLOT_NAME=({})".format(PciAddress.PCI_PATTERN_STR)
341         m = re.search(pattern, output, re.MULTILINE)
342
343         pf_vfs = {}
344         if m:
345             pf_vfs = {pci: m.group(1).rstrip()}
346
347         LOG.info("pf_vfs:\n%s", pf_vfs)
348
349         return pf_vfs
350
351     def read_config_file(self):
352         """Read from config file"""
353
354         with open(self.file_path) as stream:
355             LOG.info("Parsing pod file: %s", self.file_path)
356             cfg = yaml_load(stream)
357         return cfg
358
359     def parse_pod_file(self, file_path, nfvi_role='Sriov'):
360         self.file_path = file_path
361         nodes = []
362         nfvi_host = []
363         try:
364             cfg = self.read_config_file()
365         except IOError as io_error:
366             if io_error.errno != errno.ENOENT:
367                 raise
368             self.file_path = os.path.join(YARDSTICK_ROOT_PATH, file_path)
369             cfg = self.read_config_file()
370
371         nodes.extend([node for node in cfg["nodes"] if str(node["role"]) != nfvi_role])
372         nfvi_host.extend([node for node in cfg["nodes"] if str(node["role"]) == nfvi_role])
373         if not nfvi_host:
374             raise("Node role is other than SRIOV")
375
376         host_mgmt = {'user': nfvi_host[0]['user'],
377                      'ip': str(IPNetwork(nfvi_host[0]['ip']).ip),
378                      'password': nfvi_host[0]['password'],
379                      'ssh_port': nfvi_host[0].get('ssh_port', 22),
380                      'key_filename': nfvi_host[0].get('key_filename')}
381
382         return [nodes, nfvi_host, host_mgmt]
383
384     @staticmethod
385     def get_mac_address(end=0x7f):
386         mac = [0x52, 0x54, 0x00,
387                random.randint(0x00, end),
388                random.randint(0x00, 0xff),
389                random.randint(0x00, 0xff)]
390         mac_address = ':'.join('%02x' % x for x in mac)
391         return mac_address
392
393     @staticmethod
394     def get_mgmt_ip(connection, mac, cidr, node):
395         mgmtip = None
396         times = 10
397         while not mgmtip and times:
398             connection.execute("fping -c 1 -g %s > /dev/null 2>&1" % cidr)
399             out = connection.execute("ip neighbor | grep '%s'" % mac)[1]
400             LOG.info("fping -c 1 -g %s > /dev/null 2>&1", cidr)
401             if out.strip():
402                 mgmtip = str(out.split(" ")[0]).strip()
403                 client = ssh.SSH.from_node(node, overrides={"ip": mgmtip})
404                 client.wait()
405                 break
406
407             time.sleep(WAIT_FOR_BOOT)  # FixMe: How to find if VM is booted?
408             times = times - 1
409         return mgmtip
410
411     @classmethod
412     def wait_for_vnfs_to_start(cls, connection, servers, nodes):
413         for node in nodes:
414             vnf = servers[node["name"]]
415             mgmtip = vnf["network_ports"]["mgmt"]["cidr"]
416             ip = cls.get_mgmt_ip(connection, node["mac"], mgmtip, node)
417             if ip:
418                 node["ip"] = ip
419         return nodes
420
421
422 class Server(object):
423     """ This class handles geting vnf nodes
424     """
425
426     @staticmethod
427     def build_vnf_interfaces(vnf, ports):
428         interfaces = {}
429         index = 0
430
431         for key, vfs in vnf["network_ports"].items():
432             if key == "mgmt":
433                 mgmtip = str(IPNetwork(vfs['cidr']).ip)
434                 continue
435
436             vf = ports[vfs[0]]
437             ip = IPNetwork(vf['cidr'])
438             interfaces.update({
439                 key: {
440                     'vpci': vf['vpci'],
441                     'driver': "%svf" % vf['driver'],
442                     'local_mac': vf['mac'],
443                     'dpdk_port_num': index,
444                     'local_ip': str(ip.ip),
445                     'netmask': str(ip.netmask)
446                     },
447             })
448             index = index + 1
449
450         return mgmtip, interfaces
451
452     @classmethod
453     def generate_vnf_instance(cls, flavor, ports, ip, key, vnf, mac):
454         mgmtip, interfaces = cls.build_vnf_interfaces(vnf, ports)
455
456         result = {
457             "ip": mgmtip,
458             "mac": mac,
459             "host": ip,
460             "user": flavor.get('user', 'root'),
461             "interfaces": interfaces,
462             "routing_table": [],
463             # empty IPv6 routing table
464             "nd_route_tbl": [],
465             "name": key, "role": key
466         }
467
468         try:
469             result['key_filename'] = flavor['key_filename']
470         except KeyError:
471             pass
472
473         try:
474             result['password'] = flavor['password']
475         except KeyError:
476             pass
477         LOG.info(result)
478         return result
479
480
481 class OvsDeploy(object):
482     """ This class handles deploy of ovs dpdk
483     Configuration: ovs_dpdk
484     """
485
486     OVS_DEPLOY_SCRIPT = "ovs_deploy.bash"
487
488     def __init__(self, connection, bin_path, ovs_properties):
489         self.connection = connection
490         self.bin_path = bin_path
491         self.ovs_properties = ovs_properties
492
493     def prerequisite(self):
494         pkgs = ["git", "build-essential", "pkg-config", "automake",
495                 "autotools-dev", "libltdl-dev", "cmake", "libnuma-dev",
496                 "libpcap-dev"]
497         StandaloneContextHelper.install_req_libs(self.connection, pkgs)
498
499     def ovs_deploy(self):
500         ovs_deploy = os.path.join(YARDSTICK_ROOT_PATH,
501                                   "yardstick/resources/scripts/install/",
502                                   self.OVS_DEPLOY_SCRIPT)
503         if os.path.isfile(ovs_deploy):
504             self.prerequisite()
505             remote_ovs_deploy = os.path.join(self.bin_path, self.OVS_DEPLOY_SCRIPT)
506             LOG.info(remote_ovs_deploy)
507             self.connection.put(ovs_deploy, remote_ovs_deploy)
508
509             http_proxy = os.environ.get('http_proxy', '')
510             ovs_details = self.ovs_properties.get("version", {})
511             ovs = ovs_details.get("ovs", "2.6.0")
512             dpdk = ovs_details.get("dpdk", "16.11.1")
513
514             cmd = "sudo -E %s --ovs='%s' --dpdk='%s' -p='%s'" % (remote_ovs_deploy,
515                                                                  ovs, dpdk, http_proxy)
516             self.connection.execute(cmd)