ea8eb5d09e6b1b356e55148426c534d2e08c461c
[yardstick.git] / yardstick / benchmark / contexts / standalone / model.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16 import os
17 import re
18 import time
19 import uuid
20 import random
21 import logging
22 import errno
23
24 from netaddr import IPNetwork
25 import xml.etree.ElementTree as ET
26
27 from yardstick import ssh
28 from yardstick.common.constants import YARDSTICK_ROOT_PATH
29 from yardstick.common.yaml_loader import yaml_load
30 from yardstick.network_services.utils import PciAddress
31 from yardstick.network_services.helpers.cpu import CpuSysCores
32 from yardstick.common.utils import write_file
33
34 LOG = logging.getLogger(__name__)
35
36 VM_TEMPLATE = """
37 <domain type="kvm">
38   <name>{vm_name}</name>
39   <uuid>{random_uuid}</uuid>
40   <memory unit="MB">{memory}</memory>
41   <currentMemory unit="MB">{memory}</currentMemory>
42   <memoryBacking>
43     <hugepages />
44   </memoryBacking>
45   <vcpu cpuset='{cpuset}'>{vcpu}</vcpu>
46   <os>
47     <type arch="x86_64" machine="pc-i440fx-utopic">hvm</type>
48     <boot dev="hd" />
49   </os>
50   <features>
51     <acpi />
52     <apic />
53     <pae />
54   </features>
55   <cpu mode='host-passthrough'>
56     <topology cores="{cpu}" sockets="{socket}" threads="{threads}" />
57     <numa>
58        <cell id='0' cpus='{numa_cpus}' memory='{memory}' unit='MB' memAccess='shared'/>
59     </numa>
60   </cpu>
61   <clock offset="utc">
62     <timer name="rtc" tickpolicy="catchup" />
63     <timer name="pit" tickpolicy="delay" />
64     <timer name="hpet" present="no" />
65   </clock>
66   <on_poweroff>destroy</on_poweroff>
67   <on_reboot>restart</on_reboot>
68   <on_crash>restart</on_crash>
69   <devices>
70     <emulator>/usr/bin/kvm-spice</emulator>
71     <disk device="disk" type="file">
72       <driver name="qemu" type="qcow2" />
73       <source file="{vm_image}"/>
74       <target bus="virtio" dev="vda" />
75     </disk>
76     <graphics autoport="yes" listen="0.0.0.0" port="-1" type="vnc" />
77     <interface type="bridge">
78       <mac address='{mac_addr}'/>
79       <source bridge="br-int" />
80       <model type='virtio'/>
81     </interface>
82     <serial type='pty'>
83       <target port='0'/>
84     </serial>
85     <console type='pty'>
86       <target type='serial' port='0'/>
87     </console>
88   </devices>
89 </domain>
90 """
91 WAIT_FOR_BOOT = 30
92
93
94 class Libvirt(object):
95     """ This class handles all the libvirt updates to lauch VM
96     """
97
98     @staticmethod
99     def check_if_vm_exists_and_delete(vm_name, connection):
100         cmd_template = "virsh list --name | grep -i %s"
101         status = connection.execute(cmd_template % vm_name)[0]
102         if status == 0:
103             LOG.info("VM '%s' is already present... destroying", vm_name)
104             connection.execute("virsh destroy %s" % vm_name)
105
106     @staticmethod
107     def virsh_create_vm(connection, cfg):
108         err = connection.execute("virsh create %s" % cfg)[0]
109         LOG.info("VM create status: %s", err)
110
111     @staticmethod
112     def virsh_destroy_vm(vm_name, connection):
113         connection.execute("virsh destroy %s" % vm_name)
114
115     @staticmethod
116     def _add_interface_address(interface, pci_address):
117         """Add a PCI 'address' XML node
118
119         <address type='pci' domain='0x0000' bus='0x00' slot='0x08'
120          function='0x0'/>
121
122         Refence: https://software.intel.com/en-us/articles/
123                  configure-sr-iov-network-virtual-functions-in-linux-kvm
124         """
125         vm_pci = ET.SubElement(interface, 'address')
126         vm_pci.set('type', 'pci')
127         vm_pci.set('domain', '0x{}'.format(pci_address.domain))
128         vm_pci.set('bus', '0x{}'.format(pci_address.bus))
129         vm_pci.set('slot', '0x{}'.format(pci_address.slot))
130         vm_pci.set('function', '0x{}'.format(pci_address.function))
131         return vm_pci
132
133     @classmethod
134     def add_ovs_interface(cls, vpath, port_num, vpci, vports_mac, xml):
135         """Add a DPDK OVS 'interface' XML node in 'devices' node
136
137         <devices>
138             <interface type='vhostuser'>
139                 <mac address='00:00:00:00:00:01'/>
140                 <source type='unix' path='/usr/local/var/run/openvswitch/
141                  dpdkvhostuser0' mode='client'/>
142                 <model type='virtio'/>
143                 <driver queues='4'>
144                     <host mrg_rxbuf='off'/>
145                 </driver>
146                 <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
147                  function='0x0'/>
148             </interface>
149             ...
150         </devices>
151
152         Reference: http://docs.openvswitch.org/en/latest/topics/dpdk/
153                    vhost-user/
154         """
155
156         vhost_path = ('{0}/var/run/openvswitch/dpdkvhostuser{1}'.
157                       format(vpath, port_num))
158         root = ET.parse(xml)
159         pci_address = PciAddress(vpci.strip())
160         device = root.find('devices')
161
162         interface = ET.SubElement(device, 'interface')
163         interface.set('type', 'vhostuser')
164         mac = ET.SubElement(interface, 'mac')
165         mac.set('address', vports_mac)
166
167         source = ET.SubElement(interface, 'source')
168         source.set('type', 'unix')
169         source.set('path', vhost_path)
170         source.set('mode', 'client')
171
172         model = ET.SubElement(interface, 'model')
173         model.set('type', 'virtio')
174
175         driver = ET.SubElement(interface, 'driver')
176         driver.set('queues', '4')
177
178         host = ET.SubElement(driver, 'host')
179         host.set('mrg_rxbuf', 'off')
180
181         cls._add_interface_address(interface, pci_address)
182
183         root.write(xml)
184
185     @classmethod
186     def add_sriov_interfaces(cls, vm_pci, vf_pci, vf_mac, xml):
187         """Add a SR-IOV 'interface' XML node in 'devices' node
188
189         <devices>
190            <interface type='hostdev' managed='yes'>
191              <source>
192                <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
193                 function='0x0'/>
194              </source>
195              <mac address='52:54:00:6d:90:02'>
196              <address type='pci' domain='0x0000' bus='0x02' slot='0x04'
197               function='0x1'/>
198            </interface>
199            ...
200          </devices>
201
202         Reference: https://access.redhat.com/documentation/en-us/
203             red_hat_enterprise_linux/6/html/
204             virtualization_host_configuration_and_guest_installation_guide/
205             sect-virtualization_host_configuration_and_guest_installation_guide
206             -sr_iov-how_sr_iov_libvirt_works
207         """
208
209         root = ET.parse(xml)
210         device = root.find('devices')
211
212         interface = ET.SubElement(device, 'interface')
213         interface.set('managed', 'yes')
214         interface.set('type', 'hostdev')
215
216         mac = ET.SubElement(interface, 'mac')
217         mac.set('address', vf_mac)
218
219         source = ET.SubElement(interface, 'source')
220         addr = ET.SubElement(source, 'address')
221         pci_address = PciAddress(vf_pci.strip())
222         cls._add_interface_address(addr, pci_address)
223
224         pci_vm_address = PciAddress(vm_pci.strip())
225         cls._add_interface_address(interface, pci_vm_address)
226
227         root.write(xml)
228
229     @staticmethod
230     def create_snapshot_qemu(connection, index, vm_image):
231         # build snapshot image
232         image = "/var/lib/libvirt/images/%s.qcow2" % index
233         connection.execute("rm %s" % image)
234         qemu_template = "qemu-img create -f qcow2 -o backing_file=%s %s"
235         connection.execute(qemu_template % (vm_image, image))
236
237         return image
238
239     @classmethod
240     def build_vm_xml(cls, connection, flavor, cfg, vm_name, index):
241         memory = flavor.get('ram', '4096')
242         extra_spec = flavor.get('extra_specs', {})
243         cpu = extra_spec.get('hw:cpu_cores', '2')
244         socket = extra_spec.get('hw:cpu_sockets', '1')
245         threads = extra_spec.get('hw:cpu_threads', '2')
246         vcpu = int(cpu) * int(threads)
247         numa_cpus = '0-%s' % (vcpu - 1)
248         hw_socket = flavor.get('hw_socket', '0')
249         cpuset = Libvirt.pin_vcpu_for_perf(connection, hw_socket)
250
251         mac = StandaloneContextHelper.get_mac_address(0x00)
252         image = cls.create_snapshot_qemu(connection, index,
253                                          flavor.get("images", None))
254         vm_xml = VM_TEMPLATE.format(
255             vm_name=vm_name,
256             random_uuid=uuid.uuid4(),
257             mac_addr=mac,
258             memory=memory, vcpu=vcpu, cpu=cpu,
259             numa_cpus=numa_cpus,
260             socket=socket, threads=threads,
261             vm_image=image, cpuset=cpuset)
262
263         write_file(cfg, vm_xml)
264
265         return [vcpu, mac]
266
267     @staticmethod
268     def update_interrupts_hugepages_perf(connection):
269         connection.execute("echo 1 > /sys/module/kvm/parameters/allow_unsafe_assigned_interrupts")
270         connection.execute("echo never > /sys/kernel/mm/transparent_hugepage/enabled")
271
272     @classmethod
273     def pin_vcpu_for_perf(cls, connection, socket='0'):
274         threads = ""
275         sys_obj = CpuSysCores(connection)
276         soc_cpu = sys_obj.get_core_socket()
277         sys_cpu = int(soc_cpu["cores_per_socket"])
278         cores = "%s-%s" % (soc_cpu[socket][0], soc_cpu[socket][sys_cpu - 1])
279         if int(soc_cpu["thread_per_core"]) > 1:
280             threads = "%s-%s" % (soc_cpu[socket][sys_cpu], soc_cpu[socket][-1])
281         cpuset = "%s,%s" % (cores, threads)
282         return cpuset
283
284
285 class StandaloneContextHelper(object):
286     """ This class handles all the common code for standalone
287     """
288     def __init__(self):
289         self.file_path = None
290         super(StandaloneContextHelper, self).__init__()
291
292     @staticmethod
293     def install_req_libs(connection, extra_pkgs=None):
294         extra_pkgs = extra_pkgs or []
295         pkgs = ["qemu-kvm", "libvirt-bin", "bridge-utils", "numactl", "fping"]
296         pkgs.extend(extra_pkgs)
297         cmd_template = "dpkg-query -W --showformat='${Status}\\n' \"%s\"|grep 'ok installed'"
298         for pkg in pkgs:
299             if connection.execute(cmd_template % pkg)[0]:
300                 connection.execute("apt-get update")
301                 connection.execute("apt-get -y install %s" % pkg)
302
303     @staticmethod
304     def get_kernel_module(connection, pci, driver):
305         if not driver:
306             out = connection.execute("lspci -k -s %s" % pci)[1]
307             driver = out.split("Kernel modules:").pop().strip()
308         return driver
309
310     @classmethod
311     def get_nic_details(cls, connection, networks, dpdk_nic_bind):
312         for key, ports in networks.items():
313             if key == "mgmt":
314                 continue
315
316             phy_ports = ports['phy_port']
317             phy_driver = ports.get('phy_driver', None)
318             driver = cls.get_kernel_module(connection, phy_ports, phy_driver)
319
320             # Make sure that ports are bound to kernel drivers e.g. i40e/ixgbe
321             bind_cmd = "{dpdk_nic_bind} --force -b {driver} {port}"
322             lshw_cmd = "lshw -c network -businfo | grep '{port}'"
323             link_show_cmd = "ip -s link show {interface}"
324
325             cmd = bind_cmd.format(dpdk_nic_bind=dpdk_nic_bind,
326                                   driver=driver, port=ports['phy_port'])
327             connection.execute(cmd)
328
329             out = connection.execute(lshw_cmd.format(port=phy_ports))[1]
330             interface = out.split()[1]
331
332             connection.execute(link_show_cmd.format(interface=interface))
333
334             ports.update({
335                 'interface': str(interface),
336                 'driver': driver
337             })
338         LOG.info(networks)
339
340         return networks
341
342     @staticmethod
343     def get_virtual_devices(connection, pci):
344         cmd = "cat /sys/bus/pci/devices/{0}/virtfn0/uevent"
345         output = connection.execute(cmd.format(pci))[1]
346
347         pattern = "PCI_SLOT_NAME=({})".format(PciAddress.PCI_PATTERN_STR)
348         m = re.search(pattern, output, re.MULTILINE)
349
350         pf_vfs = {}
351         if m:
352             pf_vfs = {pci: m.group(1).rstrip()}
353
354         LOG.info("pf_vfs:\n%s", pf_vfs)
355
356         return pf_vfs
357
358     def read_config_file(self):
359         """Read from config file"""
360
361         with open(self.file_path) as stream:
362             LOG.info("Parsing pod file: %s", self.file_path)
363             cfg = yaml_load(stream)
364         return cfg
365
366     def parse_pod_file(self, file_path, nfvi_role='Sriov'):
367         self.file_path = file_path
368         nodes = []
369         nfvi_host = []
370         try:
371             cfg = self.read_config_file()
372         except IOError as io_error:
373             if io_error.errno != errno.ENOENT:
374                 raise
375             self.file_path = os.path.join(YARDSTICK_ROOT_PATH, file_path)
376             cfg = self.read_config_file()
377
378         nodes.extend([node for node in cfg["nodes"] if str(node["role"]) != nfvi_role])
379         nfvi_host.extend([node for node in cfg["nodes"] if str(node["role"]) == nfvi_role])
380         if not nfvi_host:
381             raise("Node role is other than SRIOV")
382
383         host_mgmt = {'user': nfvi_host[0]['user'],
384                      'ip': str(IPNetwork(nfvi_host[0]['ip']).ip),
385                      'password': nfvi_host[0]['password'],
386                      'ssh_port': nfvi_host[0].get('ssh_port', 22),
387                      'key_filename': nfvi_host[0].get('key_filename')}
388
389         return [nodes, nfvi_host, host_mgmt]
390
391     @staticmethod
392     def get_mac_address(end=0x7f):
393         mac = [0x52, 0x54, 0x00,
394                random.randint(0x00, end),
395                random.randint(0x00, 0xff),
396                random.randint(0x00, 0xff)]
397         mac_address = ':'.join('%02x' % x for x in mac)
398         return mac_address
399
400     @staticmethod
401     def get_mgmt_ip(connection, mac, cidr, node):
402         mgmtip = None
403         times = 10
404         while not mgmtip and times:
405             connection.execute("fping -c 1 -g %s > /dev/null 2>&1" % cidr)
406             out = connection.execute("ip neighbor | grep '%s'" % mac)[1]
407             LOG.info("fping -c 1 -g %s > /dev/null 2>&1", cidr)
408             if out.strip():
409                 mgmtip = str(out.split(" ")[0]).strip()
410                 client = ssh.SSH.from_node(node, overrides={"ip": mgmtip})
411                 client.wait()
412                 break
413
414             time.sleep(WAIT_FOR_BOOT)  # FixMe: How to find if VM is booted?
415             times = times - 1
416         return mgmtip
417
418     @classmethod
419     def wait_for_vnfs_to_start(cls, connection, servers, nodes):
420         for node in nodes:
421             vnf = servers[node["name"]]
422             mgmtip = vnf["network_ports"]["mgmt"]["cidr"]
423             ip = cls.get_mgmt_ip(connection, node["mac"], mgmtip, node)
424             if ip:
425                 node["ip"] = ip
426         return nodes
427
428
429 class Server(object):
430     """ This class handles geting vnf nodes
431     """
432
433     @staticmethod
434     def build_vnf_interfaces(vnf, ports):
435         interfaces = {}
436         index = 0
437
438         for key, vfs in vnf["network_ports"].items():
439             if key == "mgmt":
440                 mgmtip = str(IPNetwork(vfs['cidr']).ip)
441                 continue
442
443             vf = ports[vfs[0]]
444             ip = IPNetwork(vf['cidr'])
445             interfaces.update({
446                 key: {
447                     'vpci': vf['vpci'],
448                     'driver': "%svf" % vf['driver'],
449                     'local_mac': vf['mac'],
450                     'dpdk_port_num': index,
451                     'local_ip': str(ip.ip),
452                     'netmask': str(ip.netmask)
453                     },
454             })
455             index = index + 1
456
457         return mgmtip, interfaces
458
459     @classmethod
460     def generate_vnf_instance(cls, flavor, ports, ip, key, vnf, mac):
461         mgmtip, interfaces = cls.build_vnf_interfaces(vnf, ports)
462
463         result = {
464             "ip": mgmtip,
465             "mac": mac,
466             "host": ip,
467             "user": flavor.get('user', 'root'),
468             "interfaces": interfaces,
469             "routing_table": [],
470             # empty IPv6 routing table
471             "nd_route_tbl": [],
472             "name": key, "role": key
473         }
474
475         try:
476             result['key_filename'] = flavor['key_filename']
477         except KeyError:
478             pass
479
480         try:
481             result['password'] = flavor['password']
482         except KeyError:
483             pass
484         LOG.info(result)
485         return result
486
487
488 class OvsDeploy(object):
489     """ This class handles deploy of ovs dpdk
490     Configuration: ovs_dpdk
491     """
492
493     OVS_DEPLOY_SCRIPT = "ovs_deploy.bash"
494
495     def __init__(self, connection, bin_path, ovs_properties):
496         self.connection = connection
497         self.bin_path = bin_path
498         self.ovs_properties = ovs_properties
499
500     def prerequisite(self):
501         pkgs = ["git", "build-essential", "pkg-config", "automake",
502                 "autotools-dev", "libltdl-dev", "cmake", "libnuma-dev",
503                 "libpcap-dev"]
504         StandaloneContextHelper.install_req_libs(self.connection, pkgs)
505
506     def ovs_deploy(self):
507         ovs_deploy = os.path.join(YARDSTICK_ROOT_PATH,
508                                   "yardstick/resources/scripts/install/",
509                                   self.OVS_DEPLOY_SCRIPT)
510         if os.path.isfile(ovs_deploy):
511             self.prerequisite()
512             remote_ovs_deploy = os.path.join(self.bin_path, self.OVS_DEPLOY_SCRIPT)
513             LOG.info(remote_ovs_deploy)
514             self.connection.put(ovs_deploy, remote_ovs_deploy)
515
516             http_proxy = os.environ.get('http_proxy', '')
517             ovs_details = self.ovs_properties.get("version", {})
518             ovs = ovs_details.get("ovs", "2.6.0")
519             dpdk = ovs_details.get("dpdk", "16.11.1")
520
521             cmd = "sudo -E %s --ovs='%s' --dpdk='%s' -p='%s'" % (remote_ovs_deploy,
522                                                                  ovs, dpdk, http_proxy)
523             self.connection.execute(cmd)