0d58e91b0d1e307f041944abe45b264d8a8ea463
[yardstick.git] / yardstick / benchmark / contexts / standalone / model.py
1 # Copyright (c) 2016-2017 Intel Corporation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #      http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16 import os
17 import re
18 import time
19 import uuid
20 import random
21 import logging
22 import errno
23
24 from netaddr import IPNetwork
25 import xml.etree.ElementTree as ET
26
27 from yardstick import ssh
28 from yardstick.common.constants import YARDSTICK_ROOT_PATH
29 from yardstick.common.yaml_loader import yaml_load
30 from yardstick.network_services.utils import PciAddress
31 from yardstick.network_services.helpers.cpu import CpuSysCores
32 from yardstick.common.utils import write_file
33
34 LOG = logging.getLogger(__name__)
35
36 VM_TEMPLATE = """
37 <domain type="kvm">
38  <name>{vm_name}</name>
39   <uuid>{random_uuid}</uuid>
40   <memory unit="MB">{memory}</memory>
41   <currentMemory unit="MB">{memory}</currentMemory>
42   <memoryBacking>
43     <hugepages />
44   </memoryBacking>
45   <vcpu cpuset='{cpuset}'>{vcpu}</vcpu>
46  {cputune}
47   <os>
48     <type arch="x86_64" machine="pc-i440fx-utopic">hvm</type>
49     <boot dev="hd" />
50   </os>
51   <features>
52     <acpi />
53     <apic />
54     <pae />
55   </features>
56   <cpu mode='host-passthrough'>
57     <topology cores="{cpu}" sockets="{socket}" threads="{threads}" />
58     <numa>
59        <cell id='0' cpus='{numa_cpus}' memory='{memory}' unit='MB' memAccess='shared'/>
60     </numa>
61   </cpu>
62   <clock offset="utc">
63     <timer name="rtc" tickpolicy="catchup" />
64     <timer name="pit" tickpolicy="delay" />
65     <timer name="hpet" present="no" />
66   </clock>
67   <on_poweroff>destroy</on_poweroff>
68   <on_reboot>restart</on_reboot>
69   <on_crash>restart</on_crash>
70   <devices>
71     <emulator>/usr/bin/kvm-spice</emulator>
72     <disk device="disk" type="file">
73       <driver name="qemu" type="qcow2" />
74       <source file="{vm_image}"/>
75       <target bus="virtio" dev="vda" />
76     </disk>
77     <graphics autoport="yes" listen="0.0.0.0" port="-1" type="vnc" />
78     <interface type="bridge">
79       <mac address='{mac_addr}'/>
80       <source bridge="br-int" />
81       <model type='virtio'/>
82     </interface>
83    </devices>
84 </domain>
85 """
86 WAIT_FOR_BOOT = 30
87
88
89 class Libvirt(object):
90     """ This class handles all the libvirt updates to lauch VM
91     """
92
93     @staticmethod
94     def check_if_vm_exists_and_delete(vm_name, connection):
95         cmd_template = "virsh list --name | grep -i %s"
96         status = connection.execute(cmd_template % vm_name)[0]
97         if status == 0:
98             LOG.info("VM '%s' is already present... destroying", vm_name)
99             connection.execute("virsh destroy %s" % vm_name)
100
101     @staticmethod
102     def virsh_create_vm(connection, cfg):
103         err = connection.execute("virsh create %s" % cfg)[0]
104         LOG.info("VM create status: %s", err)
105
106     @staticmethod
107     def virsh_destroy_vm(vm_name, connection):
108         connection.execute("virsh destroy %s" % vm_name)
109
110     @staticmethod
111     def _add_interface_address(interface, pci_address):
112         """Add a PCI 'address' XML node
113
114         <address type='pci' domain='0x0000' bus='0x00' slot='0x08'
115          function='0x0'/>
116
117         Refence: https://software.intel.com/en-us/articles/
118                  configure-sr-iov-network-virtual-functions-in-linux-kvm
119         """
120         vm_pci = ET.SubElement(interface, 'address')
121         vm_pci.set('type', 'pci')
122         vm_pci.set('domain', '0x{}'.format(pci_address.domain))
123         vm_pci.set('bus', '0x{}'.format(pci_address.bus))
124         vm_pci.set('slot', '0x{}'.format(pci_address.slot))
125         vm_pci.set('function', '0x{}'.format(pci_address.function))
126         return vm_pci
127
128     @classmethod
129     def add_ovs_interface(cls, vpath, port_num, vpci, vports_mac, xml):
130         """Add a DPDK OVS 'interface' XML node in 'devices' node
131
132         <devices>
133             <interface type='vhostuser'>
134                 <mac address='00:00:00:00:00:01'/>
135                 <source type='unix' path='/usr/local/var/run/openvswitch/
136                  dpdkvhostuser0' mode='client'/>
137                 <model type='virtio'/>
138                 <driver queues='4'>
139                     <host mrg_rxbuf='off'/>
140                 </driver>
141                 <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
142                  function='0x0'/>
143             </interface>
144             ...
145         </devices>
146
147         Reference: http://docs.openvswitch.org/en/latest/topics/dpdk/
148                    vhost-user/
149         """
150
151         vhost_path = ('{0}/var/run/openvswitch/dpdkvhostuser{1}'.
152                       format(vpath, port_num))
153         root = ET.parse(xml)
154         pci_address = PciAddress(vpci.strip())
155         device = root.find('devices')
156
157         interface = ET.SubElement(device, 'interface')
158         interface.set('type', 'vhostuser')
159         mac = ET.SubElement(interface, 'mac')
160         mac.set('address', vports_mac)
161
162         source = ET.SubElement(interface, 'source')
163         source.set('type', 'unix')
164         source.set('path', vhost_path)
165         source.set('mode', 'client')
166
167         model = ET.SubElement(interface, 'model')
168         model.set('type', 'virtio')
169
170         driver = ET.SubElement(interface, 'driver')
171         driver.set('queues', '4')
172
173         host = ET.SubElement(driver, 'host')
174         host.set('mrg_rxbuf', 'off')
175
176         cls._add_interface_address(interface, pci_address)
177
178         root.write(xml)
179
180     @classmethod
181     def add_sriov_interfaces(cls, vm_pci, vf_pci, vf_mac, xml):
182         """Add a SR-IOV 'interface' XML node in 'devices' node
183
184         <devices>
185            <interface type='hostdev' managed='yes'>
186              <source>
187                <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
188                 function='0x0'/>
189              </source>
190              <mac address='52:54:00:6d:90:02'>
191              <address type='pci' domain='0x0000' bus='0x02' slot='0x04'
192               function='0x1'/>
193            </interface>
194            ...
195          </devices>
196
197         Reference: https://access.redhat.com/documentation/en-us/
198             red_hat_enterprise_linux/6/html/
199             virtualization_host_configuration_and_guest_installation_guide/
200             sect-virtualization_host_configuration_and_guest_installation_guide
201             -sr_iov-how_sr_iov_libvirt_works
202         """
203
204         root = ET.parse(xml)
205         device = root.find('devices')
206
207         interface = ET.SubElement(device, 'interface')
208         interface.set('managed', 'yes')
209         interface.set('type', 'hostdev')
210
211         mac = ET.SubElement(interface, 'mac')
212         mac.set('address', vf_mac)
213
214         source = ET.SubElement(interface, 'source')
215         pci_address = PciAddress(vf_pci.strip())
216         cls._add_interface_address(source, pci_address)
217
218         pci_vm_address = PciAddress(vm_pci.strip())
219         cls._add_interface_address(interface, pci_vm_address)
220
221         root.write(xml)
222
223     @staticmethod
224     def create_snapshot_qemu(connection, index, vm_image):
225         # build snapshot image
226         image = "/var/lib/libvirt/images/%s.qcow2" % index
227         connection.execute("rm %s" % image)
228         qemu_template = "qemu-img create -f qcow2 -o backing_file=%s %s"
229         connection.execute(qemu_template % (vm_image, image))
230
231         return image
232
233     @classmethod
234     def build_vm_xml(cls, connection, flavor, cfg, vm_name, index):
235         memory = flavor.get('ram', '4096')
236         extra_spec = flavor.get('extra_specs', {})
237         cpu = extra_spec.get('hw:cpu_cores', '2')
238         socket = extra_spec.get('hw:cpu_sockets', '1')
239         threads = extra_spec.get('hw:cpu_threads', '2')
240         vcpu = int(cpu) * int(threads)
241         numa_cpus = '0-%s' % (vcpu - 1)
242         hw_socket = flavor.get('hw_socket', '0')
243         cpuset = Libvirt.pin_vcpu_for_perf(connection, hw_socket)
244
245         cputune = extra_spec.get('cputune', '')
246         mac = StandaloneContextHelper.get_mac_address(0x00)
247         image = cls.create_snapshot_qemu(connection, index,
248                                          flavor.get("images", None))
249         vm_xml = VM_TEMPLATE.format(
250             vm_name=vm_name,
251             random_uuid=uuid.uuid4(),
252             mac_addr=mac,
253             memory=memory, vcpu=vcpu, cpu=cpu,
254             numa_cpus=numa_cpus,
255             socket=socket, threads=threads,
256             vm_image=image, cpuset=cpuset, cputune=cputune)
257
258         write_file(cfg, vm_xml)
259
260         return [vcpu, mac]
261
262     @staticmethod
263     def update_interrupts_hugepages_perf(connection):
264         connection.execute("echo 1 > /sys/module/kvm/parameters/allow_unsafe_assigned_interrupts")
265         connection.execute("echo never > /sys/kernel/mm/transparent_hugepage/enabled")
266
267     @classmethod
268     def pin_vcpu_for_perf(cls, connection, socket='0'):
269         threads = ""
270         sys_obj = CpuSysCores(connection)
271         soc_cpu = sys_obj.get_core_socket()
272         sys_cpu = int(soc_cpu["cores_per_socket"])
273         socket = str(socket)
274         cores = "%s-%s" % (soc_cpu[socket][0], soc_cpu[socket][sys_cpu - 1])
275         if int(soc_cpu["thread_per_core"]) > 1:
276             threads = "%s-%s" % (soc_cpu[socket][sys_cpu], soc_cpu[socket][-1])
277         cpuset = "%s,%s" % (cores, threads)
278         return cpuset
279
280
281 class StandaloneContextHelper(object):
282     """ This class handles all the common code for standalone
283     """
284     def __init__(self):
285         self.file_path = None
286         super(StandaloneContextHelper, self).__init__()
287
288     @staticmethod
289     def install_req_libs(connection, extra_pkgs=[]):
290         pkgs = ["qemu-kvm", "libvirt-bin", "bridge-utils", "numactl", "fping"]
291         pkgs.extend(extra_pkgs)
292         cmd_template = "dpkg-query -W --showformat='${Status}\\n' \"%s\"|grep 'ok installed'"
293         for pkg in pkgs:
294             if connection.execute(cmd_template % pkg)[0]:
295                 connection.execute("apt-get update")
296                 connection.execute("apt-get -y install %s" % pkg)
297
298     @staticmethod
299     def get_kernel_module(connection, pci, driver):
300         if not driver:
301             out = connection.execute("lspci -k -s %s" % pci)[1]
302             driver = out.split("Kernel modules:").pop().strip()
303         return driver
304
305     @classmethod
306     def get_nic_details(cls, connection, networks, dpdk_nic_bind):
307         for key, ports in networks.items():
308             if key == "mgmt":
309                 continue
310
311             phy_ports = ports['phy_port']
312             phy_driver = ports.get('phy_driver', None)
313             driver = cls.get_kernel_module(connection, phy_ports, phy_driver)
314
315             # Make sure that ports are bound to kernel drivers e.g. i40e/ixgbe
316             bind_cmd = "{dpdk_nic_bind} --force -b {driver} {port}"
317             lshw_cmd = "lshw -c network -businfo | grep '{port}'"
318             link_show_cmd = "ip -s link show {interface}"
319
320             cmd = bind_cmd.format(dpdk_nic_bind=dpdk_nic_bind,
321                                   driver=driver, port=ports['phy_port'])
322             connection.execute(cmd)
323
324             out = connection.execute(lshw_cmd.format(port=phy_ports))[1]
325             interface = out.split()[1]
326
327             connection.execute(link_show_cmd.format(interface=interface))
328
329             ports.update({
330                 'interface': str(interface),
331                 'driver': driver
332             })
333         LOG.info(networks)
334
335         return networks
336
337     @staticmethod
338     def get_virtual_devices(connection, pci):
339         cmd = "cat /sys/bus/pci/devices/{0}/virtfn0/uevent"
340         output = connection.execute(cmd.format(pci))[1]
341
342         pattern = "PCI_SLOT_NAME=({})".format(PciAddress.PCI_PATTERN_STR)
343         m = re.search(pattern, output, re.MULTILINE)
344
345         pf_vfs = {}
346         if m:
347             pf_vfs = {pci: m.group(1).rstrip()}
348
349         LOG.info("pf_vfs:\n%s", pf_vfs)
350
351         return pf_vfs
352
353     def read_config_file(self):
354         """Read from config file"""
355
356         with open(self.file_path) as stream:
357             LOG.info("Parsing pod file: %s", self.file_path)
358             cfg = yaml_load(stream)
359         return cfg
360
361     def parse_pod_file(self, file_path, nfvi_role='Sriov'):
362         self.file_path = file_path
363         nodes = []
364         nfvi_host = []
365         try:
366             cfg = self.read_config_file()
367         except IOError as io_error:
368             if io_error.errno != errno.ENOENT:
369                 raise
370             self.file_path = os.path.join(YARDSTICK_ROOT_PATH, file_path)
371             cfg = self.read_config_file()
372
373         nodes.extend([node for node in cfg["nodes"] if str(node["role"]) != nfvi_role])
374         nfvi_host.extend([node for node in cfg["nodes"] if str(node["role"]) == nfvi_role])
375         if not nfvi_host:
376             raise("Node role is other than SRIOV")
377
378         host_mgmt = {'user': nfvi_host[0]['user'],
379                      'ip': str(IPNetwork(nfvi_host[0]['ip']).ip),
380                      'password': nfvi_host[0]['password'],
381                      'ssh_port': nfvi_host[0].get('ssh_port', 22),
382                      'key_filename': nfvi_host[0].get('key_filename')}
383
384         return [nodes, nfvi_host, host_mgmt]
385
386     @staticmethod
387     def get_mac_address(end=0x7f):
388         mac = [0x52, 0x54, 0x00,
389                random.randint(0x00, end),
390                random.randint(0x00, 0xff),
391                random.randint(0x00, 0xff)]
392         mac_address = ':'.join('%02x' % x for x in mac)
393         return mac_address
394
395     @staticmethod
396     def get_mgmt_ip(connection, mac, cidr, node):
397         mgmtip = None
398         times = 10
399         while not mgmtip and times:
400             connection.execute("fping -c 1 -g %s > /dev/null 2>&1" % cidr)
401             out = connection.execute("ip neighbor | grep '%s'" % mac)[1]
402             LOG.info("fping -c 1 -g %s > /dev/null 2>&1", cidr)
403             if out.strip():
404                 mgmtip = str(out.split(" ")[0]).strip()
405                 client = ssh.SSH.from_node(node, overrides={"ip": mgmtip})
406                 client.wait()
407                 break
408
409             time.sleep(WAIT_FOR_BOOT)  # FixMe: How to find if VM is booted?
410             times = times - 1
411         return mgmtip
412
413     @classmethod
414     def wait_for_vnfs_to_start(cls, connection, servers, nodes):
415         for node in nodes:
416             vnf = servers[node["name"]]
417             mgmtip = vnf["network_ports"]["mgmt"]["cidr"]
418             ip = cls.get_mgmt_ip(connection, node["mac"], mgmtip, node)
419             if ip:
420                 node["ip"] = ip
421         return nodes
422
423
424 class Server(object):
425     """ This class handles geting vnf nodes
426     """
427
428     @staticmethod
429     def build_vnf_interfaces(vnf, ports):
430         interfaces = {}
431         index = 0
432
433         for key, vfs in vnf["network_ports"].items():
434             if key == "mgmt":
435                 mgmtip = str(IPNetwork(vfs['cidr']).ip)
436                 continue
437
438             vf = ports[vfs[0]]
439             ip = IPNetwork(vf['cidr'])
440             interfaces.update({
441                 key: {
442                     'vpci': vf['vpci'],
443                     'driver': "%svf" % vf['driver'],
444                     'local_mac': vf['mac'],
445                     'dpdk_port_num': index,
446                     'local_ip': str(ip.ip),
447                     'netmask': str(ip.netmask)
448                     },
449             })
450             index = index + 1
451
452         return mgmtip, interfaces
453
454     @classmethod
455     def generate_vnf_instance(cls, flavor, ports, ip, key, vnf, mac):
456         mgmtip, interfaces = cls.build_vnf_interfaces(vnf, ports)
457
458         result = {
459             "ip": mgmtip,
460             "mac": mac,
461             "host": ip,
462             "user": flavor.get('user', 'root'),
463             "interfaces": interfaces,
464             "routing_table": [],
465             # empty IPv6 routing table
466             "nd_route_tbl": [],
467             "name": key, "role": key
468         }
469
470         try:
471             result['key_filename'] = flavor['key_filename']
472         except KeyError:
473             pass
474
475         try:
476             result['password'] = flavor['password']
477         except KeyError:
478             pass
479         LOG.info(result)
480         return result
481
482
483 class OvsDeploy(object):
484     """ This class handles deploy of ovs dpdk
485     Configuration: ovs_dpdk
486     """
487
488     OVS_DEPLOY_SCRIPT = "ovs_deploy.bash"
489
490     def __init__(self, connection, bin_path, ovs_properties):
491         self.connection = connection
492         self.bin_path = bin_path
493         self.ovs_properties = ovs_properties
494
495     def prerequisite(self):
496         pkgs = ["git", "build-essential", "pkg-config", "automake",
497                 "autotools-dev", "libltdl-dev", "cmake", "libnuma-dev",
498                 "libpcap-dev"]
499         StandaloneContextHelper.install_req_libs(self.connection, pkgs)
500
501     def ovs_deploy(self):
502         ovs_deploy = os.path.join(YARDSTICK_ROOT_PATH,
503                                   "yardstick/resources/scripts/install/",
504                                   self.OVS_DEPLOY_SCRIPT)
505         if os.path.isfile(ovs_deploy):
506             self.prerequisite()
507             remote_ovs_deploy = os.path.join(self.bin_path, self.OVS_DEPLOY_SCRIPT)
508             LOG.info(remote_ovs_deploy)
509             self.connection.put(ovs_deploy, remote_ovs_deploy)
510
511             http_proxy = os.environ.get('http_proxy', '')
512             ovs_details = self.ovs_properties.get("version", {})
513             ovs = ovs_details.get("ovs", "2.6.0")
514             dpdk = ovs_details.get("dpdk", "16.11.1")
515
516             cmd = "sudo -E %s --ovs='%s' --dpdk='%s' -p='%s'" % (remote_ovs_deploy,
517                                                                  ovs, dpdk, http_proxy)
518             self.connection.execute(cmd)