Merge "Remove "flake8" and "pep8" from test-requirements"
[yardstick.git] / yardstick / benchmark / contexts / standalone / model.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16 import os
17 import re
18 import time
19 import uuid
20 import random
21 import logging
22 import errno
23
24 from netaddr import IPNetwork
25 import xml.etree.ElementTree as ET
26
27 from yardstick import ssh
28 from yardstick.common.constants import YARDSTICK_ROOT_PATH
29 from yardstick.common.yaml_loader import yaml_load
30 from yardstick.network_services.utils import PciAddress
31 from yardstick.network_services.helpers.cpu import CpuSysCores
32 from yardstick.common.utils import write_file
33
34 LOG = logging.getLogger(__name__)
35
36 VM_TEMPLATE = """
37 <domain type="kvm">
38  <name>{vm_name}</name>
39   <uuid>{random_uuid}</uuid>
40   <memory unit="MB">{memory}</memory>
41   <currentMemory unit="MB">{memory}</currentMemory>
42   <memoryBacking>
43     <hugepages />
44   </memoryBacking>
45   <vcpu cpuset='{cpuset}'>{vcpu}</vcpu>
46  {cputune}
47   <os>
48     <type arch="x86_64" machine="pc-i440fx-utopic">hvm</type>
49     <boot dev="hd" />
50   </os>
51   <features>
52     <acpi />
53     <apic />
54     <pae />
55   </features>
56   <cpu mode='host-passthrough'>
57     <topology cores="{cpu}" sockets="{socket}" threads="{threads}" />
58     <numa>
59        <cell id='0' cpus='{numa_cpus}' memory='{memory}' unit='MB' memAccess='shared'/>
60     </numa>
61   </cpu>
62   <clock offset="utc">
63     <timer name="rtc" tickpolicy="catchup" />
64     <timer name="pit" tickpolicy="delay" />
65     <timer name="hpet" present="no" />
66   </clock>
67   <on_poweroff>destroy</on_poweroff>
68   <on_reboot>restart</on_reboot>
69   <on_crash>restart</on_crash>
70   <devices>
71     <emulator>/usr/bin/kvm-spice</emulator>
72     <disk device="disk" type="file">
73       <driver name="qemu" type="qcow2" />
74       <source file="{vm_image}"/>
75       <target bus="virtio" dev="vda" />
76     </disk>
77     <graphics autoport="yes" listen="0.0.0.0" port="-1" type="vnc" />
78     <interface type="bridge">
79       <mac address='{mac_addr}'/>
80       <source bridge="br-int" />
81       <model type='virtio'/>
82     </interface>
83    </devices>
84 </domain>
85 """
86 WAIT_FOR_BOOT = 30
87
88
89 class Libvirt(object):
90     """ This class handles all the libvirt updates to lauch VM
91     """
92
93     @staticmethod
94     def check_if_vm_exists_and_delete(vm_name, connection):
95         cmd_template = "virsh list --name | grep -i %s"
96         status = connection.execute(cmd_template % vm_name)[0]
97         if status == 0:
98             LOG.info("VM '%s' is already present... destroying", vm_name)
99             connection.execute("virsh destroy %s" % vm_name)
100
101     @staticmethod
102     def virsh_create_vm(connection, cfg):
103         err = connection.execute("virsh create %s" % cfg)[0]
104         LOG.info("VM create status: %s", err)
105
106     @staticmethod
107     def virsh_destroy_vm(vm_name, connection):
108         connection.execute("virsh destroy %s" % vm_name)
109
110     @staticmethod
111     def _add_interface_address(interface, pci_address):
112         """Add a PCI 'address' XML node
113
114         <address type='pci' domain='0x0000' bus='0x00' slot='0x08'
115          function='0x0'/>
116
117         Refence: https://software.intel.com/en-us/articles/
118                  configure-sr-iov-network-virtual-functions-in-linux-kvm
119         """
120         vm_pci = ET.SubElement(interface, 'address')
121         vm_pci.set('type', 'pci')
122         vm_pci.set('domain', '0x{}'.format(pci_address.domain))
123         vm_pci.set('bus', '0x{}'.format(pci_address.bus))
124         vm_pci.set('slot', '0x{}'.format(pci_address.slot))
125         vm_pci.set('function', '0x{}'.format(pci_address.function))
126         return vm_pci
127
128     @classmethod
129     def add_ovs_interface(cls, vpath, port_num, vpci, vports_mac, xml):
130         """Add a DPDK OVS 'interface' XML node in 'devices' node
131
132         <devices>
133             <interface type='vhostuser'>
134                 <mac address='00:00:00:00:00:01'/>
135                 <source type='unix' path='/usr/local/var/run/openvswitch/
136                  dpdkvhostuser0' mode='client'/>
137                 <model type='virtio'/>
138                 <driver queues='4'>
139                     <host mrg_rxbuf='off'/>
140                 </driver>
141                 <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
142                  function='0x0'/>
143             </interface>
144             ...
145         </devices>
146
147         Reference: http://docs.openvswitch.org/en/latest/topics/dpdk/
148                    vhost-user/
149         """
150
151         vhost_path = ('{0}/var/run/openvswitch/dpdkvhostuser{1}'.
152                       format(vpath, port_num))
153         root = ET.parse(xml)
154         pci_address = PciAddress(vpci.strip())
155         device = root.find('devices')
156
157         interface = ET.SubElement(device, 'interface')
158         interface.set('type', 'vhostuser')
159         mac = ET.SubElement(interface, 'mac')
160         mac.set('address', vports_mac)
161
162         source = ET.SubElement(interface, 'source')
163         source.set('type', 'unix')
164         source.set('path', vhost_path)
165         source.set('mode', 'client')
166
167         model = ET.SubElement(interface, 'model')
168         model.set('type', 'virtio')
169
170         driver = ET.SubElement(interface, 'driver')
171         driver.set('queues', '4')
172
173         host = ET.SubElement(driver, 'host')
174         host.set('mrg_rxbuf', 'off')
175
176         cls._add_interface_address(interface, pci_address)
177
178         root.write(xml)
179
180     @classmethod
181     def add_sriov_interfaces(cls, vm_pci, vf_pci, vf_mac, xml):
182         """Add a SR-IOV 'interface' XML node in 'devices' node
183
184         <devices>
185            <interface type='hostdev' managed='yes'>
186              <source>
187                <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
188                 function='0x0'/>
189              </source>
190              <mac address='52:54:00:6d:90:02'>
191              <address type='pci' domain='0x0000' bus='0x02' slot='0x04'
192               function='0x1'/>
193            </interface>
194            ...
195          </devices>
196
197         Reference: https://access.redhat.com/documentation/en-us/
198             red_hat_enterprise_linux/6/html/
199             virtualization_host_configuration_and_guest_installation_guide/
200             sect-virtualization_host_configuration_and_guest_installation_guide
201             -sr_iov-how_sr_iov_libvirt_works
202         """
203
204         root = ET.parse(xml)
205         device = root.find('devices')
206
207         interface = ET.SubElement(device, 'interface')
208         interface.set('managed', 'yes')
209         interface.set('type', 'hostdev')
210
211         mac = ET.SubElement(interface, 'mac')
212         mac.set('address', vf_mac)
213
214         source = ET.SubElement(interface, 'source')
215         addr = ET.SubElement(source, 'address')
216         pci_address = PciAddress(vf_pci.strip())
217         cls._add_interface_address(addr, pci_address)
218
219         pci_vm_address = PciAddress(vm_pci.strip())
220         cls._add_interface_address(interface, pci_vm_address)
221
222         root.write(xml)
223
224     @staticmethod
225     def create_snapshot_qemu(connection, index, vm_image):
226         # build snapshot image
227         image = "/var/lib/libvirt/images/%s.qcow2" % index
228         connection.execute("rm %s" % image)
229         qemu_template = "qemu-img create -f qcow2 -o backing_file=%s %s"
230         connection.execute(qemu_template % (vm_image, image))
231
232         return image
233
234     @classmethod
235     def build_vm_xml(cls, connection, flavor, cfg, vm_name, index):
236         memory = flavor.get('ram', '4096')
237         extra_spec = flavor.get('extra_specs', {})
238         cpu = extra_spec.get('hw:cpu_cores', '2')
239         socket = extra_spec.get('hw:cpu_sockets', '1')
240         threads = extra_spec.get('hw:cpu_threads', '2')
241         vcpu = int(cpu) * int(threads)
242         numa_cpus = '0-%s' % (vcpu - 1)
243         hw_socket = flavor.get('hw_socket', '0')
244         cpuset = Libvirt.pin_vcpu_for_perf(connection, hw_socket)
245
246         cputune = extra_spec.get('cputune', '')
247         mac = StandaloneContextHelper.get_mac_address(0x00)
248         image = cls.create_snapshot_qemu(connection, index,
249                                          flavor.get("images", None))
250         vm_xml = VM_TEMPLATE.format(
251             vm_name=vm_name,
252             random_uuid=uuid.uuid4(),
253             mac_addr=mac,
254             memory=memory, vcpu=vcpu, cpu=cpu,
255             numa_cpus=numa_cpus,
256             socket=socket, threads=threads,
257             vm_image=image, cpuset=cpuset, cputune=cputune)
258
259         write_file(cfg, vm_xml)
260
261         return [vcpu, mac]
262
263     @staticmethod
264     def update_interrupts_hugepages_perf(connection):
265         connection.execute("echo 1 > /sys/module/kvm/parameters/allow_unsafe_assigned_interrupts")
266         connection.execute("echo never > /sys/kernel/mm/transparent_hugepage/enabled")
267
268     @classmethod
269     def pin_vcpu_for_perf(cls, connection, socket='0'):
270         threads = ""
271         sys_obj = CpuSysCores(connection)
272         soc_cpu = sys_obj.get_core_socket()
273         sys_cpu = int(soc_cpu["cores_per_socket"])
274         socket = str(socket)
275         cores = "%s-%s" % (soc_cpu[socket][0], soc_cpu[socket][sys_cpu - 1])
276         if int(soc_cpu["thread_per_core"]) > 1:
277             threads = "%s-%s" % (soc_cpu[socket][sys_cpu], soc_cpu[socket][-1])
278         cpuset = "%s,%s" % (cores, threads)
279         return cpuset
280
281
282 class StandaloneContextHelper(object):
283     """ This class handles all the common code for standalone
284     """
285     def __init__(self):
286         self.file_path = None
287         super(StandaloneContextHelper, self).__init__()
288
289     @staticmethod
290     def install_req_libs(connection, extra_pkgs=[]):
291         pkgs = ["qemu-kvm", "libvirt-bin", "bridge-utils", "numactl", "fping"]
292         pkgs.extend(extra_pkgs)
293         cmd_template = "dpkg-query -W --showformat='${Status}\\n' \"%s\"|grep 'ok installed'"
294         for pkg in pkgs:
295             if connection.execute(cmd_template % pkg)[0]:
296                 connection.execute("apt-get update")
297                 connection.execute("apt-get -y install %s" % pkg)
298
299     @staticmethod
300     def get_kernel_module(connection, pci, driver):
301         if not driver:
302             out = connection.execute("lspci -k -s %s" % pci)[1]
303             driver = out.split("Kernel modules:").pop().strip()
304         return driver
305
306     @classmethod
307     def get_nic_details(cls, connection, networks, dpdk_nic_bind):
308         for key, ports in networks.items():
309             if key == "mgmt":
310                 continue
311
312             phy_ports = ports['phy_port']
313             phy_driver = ports.get('phy_driver', None)
314             driver = cls.get_kernel_module(connection, phy_ports, phy_driver)
315
316             # Make sure that ports are bound to kernel drivers e.g. i40e/ixgbe
317             bind_cmd = "{dpdk_nic_bind} --force -b {driver} {port}"
318             lshw_cmd = "lshw -c network -businfo | grep '{port}'"
319             link_show_cmd = "ip -s link show {interface}"
320
321             cmd = bind_cmd.format(dpdk_nic_bind=dpdk_nic_bind,
322                                   driver=driver, port=ports['phy_port'])
323             connection.execute(cmd)
324
325             out = connection.execute(lshw_cmd.format(port=phy_ports))[1]
326             interface = out.split()[1]
327
328             connection.execute(link_show_cmd.format(interface=interface))
329
330             ports.update({
331                 'interface': str(interface),
332                 'driver': driver
333             })
334         LOG.info(networks)
335
336         return networks
337
338     @staticmethod
339     def get_virtual_devices(connection, pci):
340         cmd = "cat /sys/bus/pci/devices/{0}/virtfn0/uevent"
341         output = connection.execute(cmd.format(pci))[1]
342
343         pattern = "PCI_SLOT_NAME=({})".format(PciAddress.PCI_PATTERN_STR)
344         m = re.search(pattern, output, re.MULTILINE)
345
346         pf_vfs = {}
347         if m:
348             pf_vfs = {pci: m.group(1).rstrip()}
349
350         LOG.info("pf_vfs:\n%s", pf_vfs)
351
352         return pf_vfs
353
354     def read_config_file(self):
355         """Read from config file"""
356
357         with open(self.file_path) as stream:
358             LOG.info("Parsing pod file: %s", self.file_path)
359             cfg = yaml_load(stream)
360         return cfg
361
362     def parse_pod_file(self, file_path, nfvi_role='Sriov'):
363         self.file_path = file_path
364         nodes = []
365         nfvi_host = []
366         try:
367             cfg = self.read_config_file()
368         except IOError as io_error:
369             if io_error.errno != errno.ENOENT:
370                 raise
371             self.file_path = os.path.join(YARDSTICK_ROOT_PATH, file_path)
372             cfg = self.read_config_file()
373
374         nodes.extend([node for node in cfg["nodes"] if str(node["role"]) != nfvi_role])
375         nfvi_host.extend([node for node in cfg["nodes"] if str(node["role"]) == nfvi_role])
376         if not nfvi_host:
377             raise("Node role is other than SRIOV")
378
379         host_mgmt = {'user': nfvi_host[0]['user'],
380                      'ip': str(IPNetwork(nfvi_host[0]['ip']).ip),
381                      'password': nfvi_host[0]['password'],
382                      'ssh_port': nfvi_host[0].get('ssh_port', 22),
383                      'key_filename': nfvi_host[0].get('key_filename')}
384
385         return [nodes, nfvi_host, host_mgmt]
386
387     @staticmethod
388     def get_mac_address(end=0x7f):
389         mac = [0x52, 0x54, 0x00,
390                random.randint(0x00, end),
391                random.randint(0x00, 0xff),
392                random.randint(0x00, 0xff)]
393         mac_address = ':'.join('%02x' % x for x in mac)
394         return mac_address
395
396     @staticmethod
397     def get_mgmt_ip(connection, mac, cidr, node):
398         mgmtip = None
399         times = 10
400         while not mgmtip and times:
401             connection.execute("fping -c 1 -g %s > /dev/null 2>&1" % cidr)
402             out = connection.execute("ip neighbor | grep '%s'" % mac)[1]
403             LOG.info("fping -c 1 -g %s > /dev/null 2>&1", cidr)
404             if out.strip():
405                 mgmtip = str(out.split(" ")[0]).strip()
406                 client = ssh.SSH.from_node(node, overrides={"ip": mgmtip})
407                 client.wait()
408                 break
409
410             time.sleep(WAIT_FOR_BOOT)  # FixMe: How to find if VM is booted?
411             times = times - 1
412         return mgmtip
413
414     @classmethod
415     def wait_for_vnfs_to_start(cls, connection, servers, nodes):
416         for node in nodes:
417             vnf = servers[node["name"]]
418             mgmtip = vnf["network_ports"]["mgmt"]["cidr"]
419             ip = cls.get_mgmt_ip(connection, node["mac"], mgmtip, node)
420             if ip:
421                 node["ip"] = ip
422         return nodes
423
424
425 class Server(object):
426     """ This class handles geting vnf nodes
427     """
428
429     @staticmethod
430     def build_vnf_interfaces(vnf, ports):
431         interfaces = {}
432         index = 0
433
434         for key, vfs in vnf["network_ports"].items():
435             if key == "mgmt":
436                 mgmtip = str(IPNetwork(vfs['cidr']).ip)
437                 continue
438
439             vf = ports[vfs[0]]
440             ip = IPNetwork(vf['cidr'])
441             interfaces.update({
442                 key: {
443                     'vpci': vf['vpci'],
444                     'driver': "%svf" % vf['driver'],
445                     'local_mac': vf['mac'],
446                     'dpdk_port_num': index,
447                     'local_ip': str(ip.ip),
448                     'netmask': str(ip.netmask)
449                     },
450             })
451             index = index + 1
452
453         return mgmtip, interfaces
454
455     @classmethod
456     def generate_vnf_instance(cls, flavor, ports, ip, key, vnf, mac):
457         mgmtip, interfaces = cls.build_vnf_interfaces(vnf, ports)
458
459         result = {
460             "ip": mgmtip,
461             "mac": mac,
462             "host": ip,
463             "user": flavor.get('user', 'root'),
464             "interfaces": interfaces,
465             "routing_table": [],
466             # empty IPv6 routing table
467             "nd_route_tbl": [],
468             "name": key, "role": key
469         }
470
471         try:
472             result['key_filename'] = flavor['key_filename']
473         except KeyError:
474             pass
475
476         try:
477             result['password'] = flavor['password']
478         except KeyError:
479             pass
480         LOG.info(result)
481         return result
482
483
484 class OvsDeploy(object):
485     """ This class handles deploy of ovs dpdk
486     Configuration: ovs_dpdk
487     """
488
489     OVS_DEPLOY_SCRIPT = "ovs_deploy.bash"
490
491     def __init__(self, connection, bin_path, ovs_properties):
492         self.connection = connection
493         self.bin_path = bin_path
494         self.ovs_properties = ovs_properties
495
496     def prerequisite(self):
497         pkgs = ["git", "build-essential", "pkg-config", "automake",
498                 "autotools-dev", "libltdl-dev", "cmake", "libnuma-dev",
499                 "libpcap-dev"]
500         StandaloneContextHelper.install_req_libs(self.connection, pkgs)
501
502     def ovs_deploy(self):
503         ovs_deploy = os.path.join(YARDSTICK_ROOT_PATH,
504                                   "yardstick/resources/scripts/install/",
505                                   self.OVS_DEPLOY_SCRIPT)
506         if os.path.isfile(ovs_deploy):
507             self.prerequisite()
508             remote_ovs_deploy = os.path.join(self.bin_path, self.OVS_DEPLOY_SCRIPT)
509             LOG.info(remote_ovs_deploy)
510             self.connection.put(ovs_deploy, remote_ovs_deploy)
511
512             http_proxy = os.environ.get('http_proxy', '')
513             ovs_details = self.ovs_properties.get("version", {})
514             ovs = ovs_details.get("ovs", "2.6.0")
515             dpdk = ovs_details.get("dpdk", "16.11.1")
516
517             cmd = "sudo -E %s --ovs='%s' --dpdk='%s' -p='%s'" % (remote_ovs_deploy,
518                                                                  ovs, dpdk, http_proxy)
519             self.connection.execute(cmd)