Merge "Remove references to "dpdk_nic_bind" utility"
[yardstick.git] / yardstick / benchmark / contexts / standalone / model.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16 import os
17 import re
18 import time
19 import uuid
20 import random
21 import logging
22 import errno
23
24 from netaddr import IPNetwork
25 import xml.etree.ElementTree as ET
26
27 from yardstick import ssh
28 from yardstick.common.constants import YARDSTICK_ROOT_PATH
29 from yardstick.common.yaml_loader import yaml_load
30 from yardstick.network_services.utils import PciAddress
31 from yardstick.network_services.helpers.cpu import CpuSysCores
32 from yardstick.common.utils import write_file
33
34 LOG = logging.getLogger(__name__)
35
36 VM_TEMPLATE = """
37 <domain type="kvm">
38   <name>{vm_name}</name>
39   <uuid>{random_uuid}</uuid>
40   <memory unit="MB">{memory}</memory>
41   <currentMemory unit="MB">{memory}</currentMemory>
42   <memoryBacking>
43     <hugepages />
44   </memoryBacking>
45   <vcpu cpuset='{cpuset}'>{vcpu}</vcpu>
46  {cputune}
47   <os>
48     <type arch="x86_64" machine="pc-i440fx-utopic">hvm</type>
49     <boot dev="hd" />
50   </os>
51   <features>
52     <acpi />
53     <apic />
54     <pae />
55   </features>
56   <cpu mode='host-passthrough'>
57     <topology cores="{cpu}" sockets="{socket}" threads="{threads}" />
58     <numa>
59        <cell id='0' cpus='{numa_cpus}' memory='{memory}' unit='MB' memAccess='shared'/>
60     </numa>
61   </cpu>
62   <clock offset="utc">
63     <timer name="rtc" tickpolicy="catchup" />
64     <timer name="pit" tickpolicy="delay" />
65     <timer name="hpet" present="no" />
66   </clock>
67   <on_poweroff>destroy</on_poweroff>
68   <on_reboot>restart</on_reboot>
69   <on_crash>restart</on_crash>
70   <devices>
71     <emulator>/usr/bin/kvm-spice</emulator>
72     <disk device="disk" type="file">
73       <driver name="qemu" type="qcow2" />
74       <source file="{vm_image}"/>
75       <target bus="virtio" dev="vda" />
76     </disk>
77     <graphics autoport="yes" listen="0.0.0.0" port="-1" type="vnc" />
78     <interface type="bridge">
79       <mac address='{mac_addr}'/>
80       <source bridge="br-int" />
81       <model type='virtio'/>
82     </interface>
83     <serial type='pty'>
84       <target port='0'/>
85     </serial>
86     <console type='pty'>
87       <target type='serial' port='0'/>
88     </console>
89   </devices>
90 </domain>
91 """
92 WAIT_FOR_BOOT = 30
93
94
95 class Libvirt(object):
96     """ This class handles all the libvirt updates to lauch VM
97     """
98
99     @staticmethod
100     def check_if_vm_exists_and_delete(vm_name, connection):
101         cmd_template = "virsh list --name | grep -i %s"
102         status = connection.execute(cmd_template % vm_name)[0]
103         if status == 0:
104             LOG.info("VM '%s' is already present... destroying", vm_name)
105             connection.execute("virsh destroy %s" % vm_name)
106
107     @staticmethod
108     def virsh_create_vm(connection, cfg):
109         err = connection.execute("virsh create %s" % cfg)[0]
110         LOG.info("VM create status: %s", err)
111
112     @staticmethod
113     def virsh_destroy_vm(vm_name, connection):
114         connection.execute("virsh destroy %s" % vm_name)
115
116     @staticmethod
117     def _add_interface_address(interface, pci_address):
118         """Add a PCI 'address' XML node
119
120         <address type='pci' domain='0x0000' bus='0x00' slot='0x08'
121          function='0x0'/>
122
123         Refence: https://software.intel.com/en-us/articles/
124                  configure-sr-iov-network-virtual-functions-in-linux-kvm
125         """
126         vm_pci = ET.SubElement(interface, 'address')
127         vm_pci.set('type', 'pci')
128         vm_pci.set('domain', '0x{}'.format(pci_address.domain))
129         vm_pci.set('bus', '0x{}'.format(pci_address.bus))
130         vm_pci.set('slot', '0x{}'.format(pci_address.slot))
131         vm_pci.set('function', '0x{}'.format(pci_address.function))
132         return vm_pci
133
134     @classmethod
135     def add_ovs_interface(cls, vpath, port_num, vpci, vports_mac, xml):
136         """Add a DPDK OVS 'interface' XML node in 'devices' node
137
138         <devices>
139             <interface type='vhostuser'>
140                 <mac address='00:00:00:00:00:01'/>
141                 <source type='unix' path='/usr/local/var/run/openvswitch/
142                  dpdkvhostuser0' mode='client'/>
143                 <model type='virtio'/>
144                 <driver queues='4'>
145                     <host mrg_rxbuf='off'/>
146                 </driver>
147                 <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
148                  function='0x0'/>
149             </interface>
150             ...
151         </devices>
152
153         Reference: http://docs.openvswitch.org/en/latest/topics/dpdk/
154                    vhost-user/
155         """
156
157         vhost_path = ('{0}/var/run/openvswitch/dpdkvhostuser{1}'.
158                       format(vpath, port_num))
159         root = ET.parse(xml)
160         pci_address = PciAddress(vpci.strip())
161         device = root.find('devices')
162
163         interface = ET.SubElement(device, 'interface')
164         interface.set('type', 'vhostuser')
165         mac = ET.SubElement(interface, 'mac')
166         mac.set('address', vports_mac)
167
168         source = ET.SubElement(interface, 'source')
169         source.set('type', 'unix')
170         source.set('path', vhost_path)
171         source.set('mode', 'client')
172
173         model = ET.SubElement(interface, 'model')
174         model.set('type', 'virtio')
175
176         driver = ET.SubElement(interface, 'driver')
177         driver.set('queues', '4')
178
179         host = ET.SubElement(driver, 'host')
180         host.set('mrg_rxbuf', 'off')
181
182         cls._add_interface_address(interface, pci_address)
183
184         root.write(xml)
185
186     @classmethod
187     def add_sriov_interfaces(cls, vm_pci, vf_pci, vf_mac, xml):
188         """Add a SR-IOV 'interface' XML node in 'devices' node
189
190         <devices>
191            <interface type='hostdev' managed='yes'>
192              <source>
193                <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
194                 function='0x0'/>
195              </source>
196              <mac address='52:54:00:6d:90:02'>
197              <address type='pci' domain='0x0000' bus='0x02' slot='0x04'
198               function='0x1'/>
199            </interface>
200            ...
201          </devices>
202
203         Reference: https://access.redhat.com/documentation/en-us/
204             red_hat_enterprise_linux/6/html/
205             virtualization_host_configuration_and_guest_installation_guide/
206             sect-virtualization_host_configuration_and_guest_installation_guide
207             -sr_iov-how_sr_iov_libvirt_works
208         """
209
210         root = ET.parse(xml)
211         device = root.find('devices')
212
213         interface = ET.SubElement(device, 'interface')
214         interface.set('managed', 'yes')
215         interface.set('type', 'hostdev')
216
217         mac = ET.SubElement(interface, 'mac')
218         mac.set('address', vf_mac)
219
220         source = ET.SubElement(interface, 'source')
221         pci_address = PciAddress(vf_pci.strip())
222         cls._add_interface_address(source, pci_address)
223
224         pci_vm_address = PciAddress(vm_pci.strip())
225         cls._add_interface_address(interface, pci_vm_address)
226
227         root.write(xml)
228
229     @staticmethod
230     def create_snapshot_qemu(connection, index, vm_image):
231         # build snapshot image
232         image = "/var/lib/libvirt/images/%s.qcow2" % index
233         connection.execute("rm %s" % image)
234         qemu_template = "qemu-img create -f qcow2 -o backing_file=%s %s"
235         connection.execute(qemu_template % (vm_image, image))
236
237         return image
238
239     @classmethod
240     def build_vm_xml(cls, connection, flavor, cfg, vm_name, index):
241         memory = flavor.get('ram', '4096')
242         extra_spec = flavor.get('extra_specs', {})
243         cpu = extra_spec.get('hw:cpu_cores', '2')
244         socket = extra_spec.get('hw:cpu_sockets', '1')
245         threads = extra_spec.get('hw:cpu_threads', '2')
246         vcpu = int(cpu) * int(threads)
247         numa_cpus = '0-%s' % (vcpu - 1)
248         hw_socket = flavor.get('hw_socket', '0')
249         cpuset = Libvirt.pin_vcpu_for_perf(connection, hw_socket)
250
251         cputune = extra_spec.get('cputune', '')
252         mac = StandaloneContextHelper.get_mac_address(0x00)
253         image = cls.create_snapshot_qemu(connection, index,
254                                          flavor.get("images", None))
255         vm_xml = VM_TEMPLATE.format(
256             vm_name=vm_name,
257             random_uuid=uuid.uuid4(),
258             mac_addr=mac,
259             memory=memory, vcpu=vcpu, cpu=cpu,
260             numa_cpus=numa_cpus,
261             socket=socket, threads=threads,
262             vm_image=image, cpuset=cpuset, cputune=cputune)
263
264         write_file(cfg, vm_xml)
265
266         return [vcpu, mac]
267
268     @staticmethod
269     def update_interrupts_hugepages_perf(connection):
270         connection.execute("echo 1 > /sys/module/kvm/parameters/allow_unsafe_assigned_interrupts")
271         connection.execute("echo never > /sys/kernel/mm/transparent_hugepage/enabled")
272
273     @classmethod
274     def pin_vcpu_for_perf(cls, connection, socket='0'):
275         threads = ""
276         sys_obj = CpuSysCores(connection)
277         soc_cpu = sys_obj.get_core_socket()
278         sys_cpu = int(soc_cpu["cores_per_socket"])
279         socket = str(socket)
280         cores = "%s-%s" % (soc_cpu[socket][0], soc_cpu[socket][sys_cpu - 1])
281         if int(soc_cpu["thread_per_core"]) > 1:
282             threads = "%s-%s" % (soc_cpu[socket][sys_cpu], soc_cpu[socket][-1])
283         cpuset = "%s,%s" % (cores, threads)
284         return cpuset
285
286
287 class StandaloneContextHelper(object):
288     """ This class handles all the common code for standalone
289     """
290     def __init__(self):
291         self.file_path = None
292         super(StandaloneContextHelper, self).__init__()
293
294     @staticmethod
295     def install_req_libs(connection, extra_pkgs=None):
296         extra_pkgs = extra_pkgs or []
297         pkgs = ["qemu-kvm", "libvirt-bin", "bridge-utils", "numactl", "fping"]
298         pkgs.extend(extra_pkgs)
299         cmd_template = "dpkg-query -W --showformat='${Status}\\n' \"%s\"|grep 'ok installed'"
300         for pkg in pkgs:
301             if connection.execute(cmd_template % pkg)[0]:
302                 connection.execute("apt-get update")
303                 connection.execute("apt-get -y install %s" % pkg)
304
305     @staticmethod
306     def get_kernel_module(connection, pci, driver):
307         if not driver:
308             out = connection.execute("lspci -k -s %s" % pci)[1]
309             driver = out.split("Kernel modules:").pop().strip()
310         return driver
311
312     @classmethod
313     def get_nic_details(cls, connection, networks, dpdk_devbind):
314         for key, ports in networks.items():
315             if key == "mgmt":
316                 continue
317
318             phy_ports = ports['phy_port']
319             phy_driver = ports.get('phy_driver', None)
320             driver = cls.get_kernel_module(connection, phy_ports, phy_driver)
321
322             # Make sure that ports are bound to kernel drivers e.g. i40e/ixgbe
323             bind_cmd = "{dpdk_devbind} --force -b {driver} {port}"
324             lshw_cmd = "lshw -c network -businfo | grep '{port}'"
325             link_show_cmd = "ip -s link show {interface}"
326
327             cmd = bind_cmd.format(dpdk_devbind=dpdk_devbind,
328                                   driver=driver, port=ports['phy_port'])
329             connection.execute(cmd)
330
331             out = connection.execute(lshw_cmd.format(port=phy_ports))[1]
332             interface = out.split()[1]
333
334             connection.execute(link_show_cmd.format(interface=interface))
335
336             ports.update({
337                 'interface': str(interface),
338                 'driver': driver
339             })
340         LOG.info(networks)
341
342         return networks
343
344     @staticmethod
345     def get_virtual_devices(connection, pci):
346         cmd = "cat /sys/bus/pci/devices/{0}/virtfn0/uevent"
347         output = connection.execute(cmd.format(pci))[1]
348
349         pattern = "PCI_SLOT_NAME=({})".format(PciAddress.PCI_PATTERN_STR)
350         m = re.search(pattern, output, re.MULTILINE)
351
352         pf_vfs = {}
353         if m:
354             pf_vfs = {pci: m.group(1).rstrip()}
355
356         LOG.info("pf_vfs:\n%s", pf_vfs)
357
358         return pf_vfs
359
360     def read_config_file(self):
361         """Read from config file"""
362
363         with open(self.file_path) as stream:
364             LOG.info("Parsing pod file: %s", self.file_path)
365             cfg = yaml_load(stream)
366         return cfg
367
368     def parse_pod_file(self, file_path, nfvi_role='Sriov'):
369         self.file_path = file_path
370         nodes = []
371         nfvi_host = []
372         try:
373             cfg = self.read_config_file()
374         except IOError as io_error:
375             if io_error.errno != errno.ENOENT:
376                 raise
377             self.file_path = os.path.join(YARDSTICK_ROOT_PATH, file_path)
378             cfg = self.read_config_file()
379
380         nodes.extend([node for node in cfg["nodes"] if str(node["role"]) != nfvi_role])
381         nfvi_host.extend([node for node in cfg["nodes"] if str(node["role"]) == nfvi_role])
382         if not nfvi_host:
383             raise("Node role is other than SRIOV")
384
385         host_mgmt = {'user': nfvi_host[0]['user'],
386                      'ip': str(IPNetwork(nfvi_host[0]['ip']).ip),
387                      'password': nfvi_host[0]['password'],
388                      'ssh_port': nfvi_host[0].get('ssh_port', 22),
389                      'key_filename': nfvi_host[0].get('key_filename')}
390
391         return [nodes, nfvi_host, host_mgmt]
392
393     @staticmethod
394     def get_mac_address(end=0x7f):
395         mac = [0x52, 0x54, 0x00,
396                random.randint(0x00, end),
397                random.randint(0x00, 0xff),
398                random.randint(0x00, 0xff)]
399         mac_address = ':'.join('%02x' % x for x in mac)
400         return mac_address
401
402     @staticmethod
403     def get_mgmt_ip(connection, mac, cidr, node):
404         mgmtip = None
405         times = 10
406         while not mgmtip and times:
407             connection.execute("fping -c 1 -g %s > /dev/null 2>&1" % cidr)
408             out = connection.execute("ip neighbor | grep '%s'" % mac)[1]
409             LOG.info("fping -c 1 -g %s > /dev/null 2>&1", cidr)
410             if out.strip():
411                 mgmtip = str(out.split(" ")[0]).strip()
412                 client = ssh.SSH.from_node(node, overrides={"ip": mgmtip})
413                 client.wait()
414                 break
415
416             time.sleep(WAIT_FOR_BOOT)  # FixMe: How to find if VM is booted?
417             times = times - 1
418         return mgmtip
419
420     @classmethod
421     def wait_for_vnfs_to_start(cls, connection, servers, nodes):
422         for node in nodes:
423             vnf = servers[node["name"]]
424             mgmtip = vnf["network_ports"]["mgmt"]["cidr"]
425             ip = cls.get_mgmt_ip(connection, node["mac"], mgmtip, node)
426             if ip:
427                 node["ip"] = ip
428         return nodes
429
430
431 class Server(object):
432     """ This class handles geting vnf nodes
433     """
434
435     @staticmethod
436     def build_vnf_interfaces(vnf, ports):
437         interfaces = {}
438         index = 0
439
440         for key, vfs in vnf["network_ports"].items():
441             if key == "mgmt":
442                 mgmtip = str(IPNetwork(vfs['cidr']).ip)
443                 continue
444
445             vf = ports[vfs[0]]
446             ip = IPNetwork(vf['cidr'])
447             interfaces.update({
448                 key: {
449                     'vpci': vf['vpci'],
450                     'driver': "%svf" % vf['driver'],
451                     'local_mac': vf['mac'],
452                     'dpdk_port_num': index,
453                     'local_ip': str(ip.ip),
454                     'netmask': str(ip.netmask)
455                     },
456             })
457             index = index + 1
458
459         return mgmtip, interfaces
460
461     @classmethod
462     def generate_vnf_instance(cls, flavor, ports, ip, key, vnf, mac):
463         mgmtip, interfaces = cls.build_vnf_interfaces(vnf, ports)
464
465         result = {
466             "ip": mgmtip,
467             "mac": mac,
468             "host": ip,
469             "user": flavor.get('user', 'root'),
470             "interfaces": interfaces,
471             "routing_table": [],
472             # empty IPv6 routing table
473             "nd_route_tbl": [],
474             "name": key, "role": key
475         }
476
477         try:
478             result['key_filename'] = flavor['key_filename']
479         except KeyError:
480             pass
481
482         try:
483             result['password'] = flavor['password']
484         except KeyError:
485             pass
486         LOG.info(result)
487         return result
488
489
490 class OvsDeploy(object):
491     """ This class handles deploy of ovs dpdk
492     Configuration: ovs_dpdk
493     """
494
495     OVS_DEPLOY_SCRIPT = "ovs_deploy.bash"
496
497     def __init__(self, connection, bin_path, ovs_properties):
498         self.connection = connection
499         self.bin_path = bin_path
500         self.ovs_properties = ovs_properties
501
502     def prerequisite(self):
503         pkgs = ["git", "build-essential", "pkg-config", "automake",
504                 "autotools-dev", "libltdl-dev", "cmake", "libnuma-dev",
505                 "libpcap-dev"]
506         StandaloneContextHelper.install_req_libs(self.connection, pkgs)
507
508     def ovs_deploy(self):
509         ovs_deploy = os.path.join(YARDSTICK_ROOT_PATH,
510                                   "yardstick/resources/scripts/install/",
511                                   self.OVS_DEPLOY_SCRIPT)
512         if os.path.isfile(ovs_deploy):
513             self.prerequisite()
514             remote_ovs_deploy = os.path.join(self.bin_path, self.OVS_DEPLOY_SCRIPT)
515             LOG.info(remote_ovs_deploy)
516             self.connection.put(ovs_deploy, remote_ovs_deploy)
517
518             http_proxy = os.environ.get('http_proxy', '')
519             ovs_details = self.ovs_properties.get("version", {})
520             ovs = ovs_details.get("ovs", "2.6.0")
521             dpdk = ovs_details.get("dpdk", "16.11.1")
522
523             cmd = "sudo -E %s --ovs='%s' --dpdk='%s' -p='%s'" % (remote_ovs_deploy,
524                                                                  ovs, dpdk, http_proxy)
525             self.connection.execute(cmd)