1 # Copyright (c) 2016-2017 Intel Corporation
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
23 from netaddr import IPNetwork
24 import xml.etree.ElementTree as ET
26 from yardstick import ssh
27 from yardstick.common import constants
28 from yardstick.common import exceptions
29 from yardstick.common import yaml_loader
30 from yardstick.network_services.utils import PciAddress
31 from yardstick.network_services.helpers.cpu import CpuSysCores
34 LOG = logging.getLogger(__name__)
38 <name>{vm_name}</name>
39 <uuid>{random_uuid}</uuid>
40 <memory unit="MB">{memory}</memory>
41 <currentMemory unit="MB">{memory}</currentMemory>
45 <vcpu cpuset='{cpuset}'>{vcpu}</vcpu>
48 <type arch="x86_64" machine="pc-i440fx-xenial">hvm</type>
56 <cpu mode='host-passthrough'>
57 <topology cores="{cpu}" sockets="{socket}" threads="{threads}" />
59 <cell id='0' cpus='{numa_cpus}' memory='{memory}' unit='MB' memAccess='shared'/>
63 <timer name="rtc" tickpolicy="catchup" />
64 <timer name="pit" tickpolicy="delay" />
65 <timer name="hpet" present="no" />
67 <on_poweroff>destroy</on_poweroff>
68 <on_reboot>restart</on_reboot>
69 <on_crash>restart</on_crash>
71 <emulator>/usr/bin/kvm-spice</emulator>
72 <disk device="disk" type="file">
73 <driver name="qemu" type="qcow2" />
74 <source file="{vm_image}"/>
75 <target bus="virtio" dev="vda" />
77 <graphics autoport="yes" listen="0.0.0.0" port="-1" type="vnc" />
78 <interface type="bridge">
79 <mac address='{mac_addr}'/>
80 <source bridge="br-int" />
81 <model type='virtio'/>
87 <target type='serial' port='0'/>
93 USER_DATA_TEMPLATE = """
94 cat > {user_file} <<EOF
96 preserve_hostname: false
103 NETWORK_DATA_TEMPLATE = """
104 cat > {network_file} <<EOF
110 macaddress: {mac_address}
119 class Libvirt(object):
120 """ This class handles all the libvirt updates to lauch VM
124 def check_if_vm_exists_and_delete(vm_name, connection):
125 cmd_template = "virsh list --name | grep -i %s"
126 status = connection.execute(cmd_template % vm_name)[0]
128 LOG.info("VM '%s' is already present... destroying", vm_name)
129 connection.execute("virsh destroy %s" % vm_name)
132 def virsh_create_vm(connection, cfg):
133 LOG.info('VM create, XML config: %s', cfg)
134 status, _, error = connection.execute('virsh create %s' % cfg)
136 raise exceptions.LibvirtCreateError(error=error)
139 def virsh_destroy_vm(vm_name, connection):
140 LOG.info('VM destroy, VM name: %s', vm_name)
141 status, _, error = connection.execute('virsh destroy %s' % vm_name)
143 LOG.warning('Error destroying VM %s. Error: %s', vm_name, error)
146 def _add_interface_address(interface, pci_address):
147 """Add a PCI 'address' XML node
149 <address type='pci' domain='0x0000' bus='0x00' slot='0x08'
152 Refence: https://software.intel.com/en-us/articles/
153 configure-sr-iov-network-virtual-functions-in-linux-kvm
155 vm_pci = ET.SubElement(interface, 'address')
156 vm_pci.set('type', 'pci')
157 vm_pci.set('domain', '0x{}'.format(pci_address.domain))
158 vm_pci.set('bus', '0x{}'.format(pci_address.bus))
159 vm_pci.set('slot', '0x{}'.format(pci_address.slot))
160 vm_pci.set('function', '0x{}'.format(pci_address.function))
164 def add_ovs_interface(cls, vpath, port_num, vpci, vports_mac, xml_str,
166 """Add a DPDK OVS 'interface' XML node in 'devices' node
169 <interface type='vhostuser'>
170 <mac address='00:00:00:00:00:01'/>
171 <source type='unix' path='/usr/local/var/run/openvswitch/
172 dpdkvhostuser0' mode='client'/>
173 <model type='virtio'/>
175 <host mrg_rxbuf='off'/>
177 <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
183 Reference: http://docs.openvswitch.org/en/latest/topics/dpdk/
187 vhost_path = ('{0}/var/run/openvswitch/dpdkvhostuser{1}'.
188 format(vpath, port_num))
189 root = ET.fromstring(xml_str)
190 pci_address = PciAddress(vpci.strip())
191 device = root.find('devices')
193 interface = ET.SubElement(device, 'interface')
194 interface.set('type', 'vhostuser')
195 mac = ET.SubElement(interface, 'mac')
196 mac.set('address', vports_mac)
198 source = ET.SubElement(interface, 'source')
199 source.set('type', 'unix')
200 source.set('path', vhost_path)
201 source.set('mode', 'client')
203 model = ET.SubElement(interface, 'model')
204 model.set('type', 'virtio')
206 driver = ET.SubElement(interface, 'driver')
207 driver.set('queues', str(queues))
209 host = ET.SubElement(driver, 'host')
210 host.set('mrg_rxbuf', 'off')
212 cls._add_interface_address(interface, pci_address)
214 return ET.tostring(root)
217 def add_sriov_interfaces(cls, vm_pci, vf_pci, vf_mac, xml_str):
218 """Add a SR-IOV 'interface' XML node in 'devices' node
221 <interface type='hostdev' managed='yes'>
223 <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
226 <mac address='52:54:00:6d:90:02'>
227 <address type='pci' domain='0x0000' bus='0x02' slot='0x04'
233 Reference: https://access.redhat.com/documentation/en-us/
234 red_hat_enterprise_linux/6/html/
235 virtualization_host_configuration_and_guest_installation_guide/
236 sect-virtualization_host_configuration_and_guest_installation_guide
237 -sr_iov-how_sr_iov_libvirt_works
240 root = ET.fromstring(xml_str)
241 device = root.find('devices')
243 interface = ET.SubElement(device, 'interface')
244 interface.set('managed', 'yes')
245 interface.set('type', 'hostdev')
247 mac = ET.SubElement(interface, 'mac')
248 mac.set('address', vf_mac)
250 source = ET.SubElement(interface, 'source')
251 pci_address = PciAddress(vf_pci.strip())
252 cls._add_interface_address(source, pci_address)
254 pci_vm_address = PciAddress(vm_pci.strip())
255 cls._add_interface_address(interface, pci_vm_address)
257 return ET.tostring(root)
260 def create_snapshot_qemu(connection, index, base_image):
261 """Create the snapshot image for a VM using a base image
263 :param connection: SSH connection to the remote host
264 :param index: index of the VM to be spawn
265 :param base_image: path of the VM base image in the remote host
266 :return: snapshot image path
268 vm_image = '/var/lib/libvirt/images/%s.qcow2' % index
269 connection.execute('rm -- "%s"' % vm_image)
270 status, _, _ = connection.execute('test -r %s' % base_image)
272 if not os.access(base_image, os.R_OK):
273 raise exceptions.LibvirtQemuImageBaseImageNotPresent(
274 vm_image=vm_image, base_image=base_image)
275 # NOTE(ralonsoh): done in two steps to avoid root permission
277 LOG.info('Copy %s from execution host to remote host', base_image)
278 file_name = os.path.basename(os.path.normpath(base_image))
279 connection.put_file(base_image, '/tmp/%s' % file_name)
280 status, _, error = connection.execute(
281 'mv -- "/tmp/%s" "%s"' % (file_name, base_image))
283 raise exceptions.LibvirtQemuImageCreateError(
284 vm_image=vm_image, base_image=base_image, error=error)
286 LOG.info('Convert image %s to %s', base_image, vm_image)
287 qemu_cmd = ('qemu-img create -f qcow2 -o backing_file=%s %s' %
288 (base_image, vm_image))
289 status, _, error = connection.execute(qemu_cmd)
291 raise exceptions.LibvirtQemuImageCreateError(
292 vm_image=vm_image, base_image=base_image, error=error)
296 def build_vm_xml(cls, connection, flavor, vm_name, index, cdrom_img):
297 """Build the XML from the configuration parameters"""
298 memory = flavor.get('ram', '4096')
299 extra_spec = flavor.get('extra_specs', {})
300 cpu = extra_spec.get('hw:cpu_cores', '2')
301 socket = extra_spec.get('hw:cpu_sockets', '1')
302 threads = extra_spec.get('hw:cpu_threads', '2')
303 vcpu = int(cpu) * int(threads)
304 numa_cpus = '0-%s' % (vcpu - 1)
305 hw_socket = flavor.get('hw_socket', '0')
306 cpuset = Libvirt.pin_vcpu_for_perf(connection, hw_socket)
308 cputune = extra_spec.get('cputune', '')
309 mac = StandaloneContextHelper.get_mac_address(0x00)
310 image = cls.create_snapshot_qemu(connection, index,
311 flavor.get("images", None))
312 vm_xml = VM_TEMPLATE.format(
314 random_uuid=uuid.uuid4(),
316 memory=memory, vcpu=vcpu, cpu=cpu,
318 socket=socket, threads=threads,
319 vm_image=image, cpuset=cpuset, cputune=cputune)
322 vm_xml = Libvirt.add_cdrom(cdrom_img, vm_xml)
327 def update_interrupts_hugepages_perf(connection):
328 connection.execute("echo 1 > /sys/module/kvm/parameters/allow_unsafe_assigned_interrupts")
329 connection.execute("echo never > /sys/kernel/mm/transparent_hugepage/enabled")
332 def pin_vcpu_for_perf(cls, connection, socket='0'):
334 sys_obj = CpuSysCores(connection)
335 soc_cpu = sys_obj.get_core_socket()
336 sys_cpu = int(soc_cpu["cores_per_socket"])
338 cores = "%s-%s" % (soc_cpu[socket][0], soc_cpu[socket][sys_cpu - 1])
339 if int(soc_cpu["thread_per_core"]) > 1:
340 threads = "%s-%s" % (soc_cpu[socket][sys_cpu], soc_cpu[socket][-1])
341 cpuset = "%s,%s" % (cores, threads)
345 def write_file(cls, file_name, xml_str):
346 """Dump a XML string to a file"""
347 root = ET.fromstring(xml_str)
348 et = ET.ElementTree(element=root)
349 et.write(file_name, encoding='utf-8', method='xml')
352 def add_cdrom(cls, file_path, xml_str):
353 """Add a CD-ROM disk XML node in 'devices' node
356 <disk type='file' device='cdrom'>
357 <driver name='qemu' type='raw'/>
358 <source file='/var/lib/libvirt/images/data.img'/>
366 root = ET.fromstring(xml_str)
367 device = root.find('devices')
369 disk = ET.SubElement(device, 'disk')
370 disk.set('type', 'file')
371 disk.set('device', 'cdrom')
373 driver = ET.SubElement(disk, 'driver')
374 driver.set('name', 'qemu')
375 driver.set('type', 'raw')
377 source = ET.SubElement(disk, 'source')
378 source.set('file', file_path)
380 target = ET.SubElement(disk, 'target')
381 target.set('dev', 'hdb')
383 ET.SubElement(disk, 'readonly')
384 return ET.tostring(root)
387 def gen_cdrom_image(connection, file_path, vm_name, vm_user, key_filename, mac, ip):
388 """Generate ISO image for CD-ROM """
390 user_config = [" - name: {user_name}",
391 " ssh_authorized_keys:",
393 if vm_user != "root":
394 user_config.append(" sudo: ALL=(ALL) NOPASSWD:ALL")
396 meta_data = "/tmp/meta-data"
397 user_data = "/tmp/user-data"
398 network_data = "/tmp/network-config"
399 with open(".".join([key_filename, "pub"]), "r") as pub_key_file:
400 pub_key_str = pub_key_file.read().rstrip()
401 user_conf = os.linesep.join(user_config).format(pub_key_str=pub_key_str, user_name=vm_user)
404 "touch %s" % meta_data,
405 USER_DATA_TEMPLATE.format(user_file=user_data, host=vm_name, user_config=user_conf),
406 NETWORK_DATA_TEMPLATE.format(network_file=network_data, mac_address=mac,
408 "genisoimage -output {0} -volid cidata -joliet -r {1} {2} {3}".format(file_path,
412 "rm {0} {1} {2}".format(meta_data, user_data, network_data),
416 status, _, error = connection.execute(cmd)
418 raise exceptions.LibvirtQemuImageCreateError(error=error)
421 class StandaloneContextHelper(object):
422 """ This class handles all the common code for standalone
425 self.file_path = None
426 super(StandaloneContextHelper, self).__init__()
429 def install_req_libs(connection, extra_pkgs=None):
430 extra_pkgs = extra_pkgs or []
431 pkgs = ["qemu-kvm", "libvirt-bin", "bridge-utils", "numactl", "fping", "genisoimage"]
432 pkgs.extend(extra_pkgs)
433 cmd_template = "dpkg-query -W --showformat='${Status}\\n' \"%s\"|grep 'ok installed'"
435 if connection.execute(cmd_template % pkg)[0]:
436 connection.execute("apt-get update")
437 connection.execute("apt-get -y install %s" % pkg)
440 def get_kernel_module(connection, pci, driver):
442 out = connection.execute("lspci -k -s %s" % pci)[1]
443 driver = out.split("Kernel modules:").pop().strip()
447 def get_nic_details(cls, connection, networks, dpdk_devbind):
448 for key, ports in networks.items():
452 phy_ports = ports['phy_port']
453 phy_driver = ports.get('phy_driver', None)
454 driver = cls.get_kernel_module(connection, phy_ports, phy_driver)
456 # Make sure that ports are bound to kernel drivers e.g. i40e/ixgbe
457 bind_cmd = "{dpdk_devbind} --force -b {driver} {port}"
458 lshw_cmd = "lshw -c network -businfo | grep '{port}'"
459 link_show_cmd = "ip -s link show {interface}"
461 cmd = bind_cmd.format(dpdk_devbind=dpdk_devbind,
462 driver=driver, port=ports['phy_port'])
463 connection.execute(cmd)
465 out = connection.execute(lshw_cmd.format(port=phy_ports))[1]
466 interface = out.split()[1]
468 connection.execute(link_show_cmd.format(interface=interface))
471 'interface': str(interface),
479 def get_virtual_devices(connection, pci):
480 cmd = "cat /sys/bus/pci/devices/{0}/virtfn0/uevent"
481 output = connection.execute(cmd.format(pci))[1]
483 pattern = "PCI_SLOT_NAME=({})".format(PciAddress.PCI_PATTERN_STR)
484 m = re.search(pattern, output, re.MULTILINE)
488 pf_vfs = {pci: m.group(1).rstrip()}
490 LOG.info("pf_vfs:\n%s", pf_vfs)
494 def parse_pod_file(self, file_path, nfvi_role='Sriov'):
495 self.file_path = file_path
499 cfg = yaml_loader.read_yaml_file(self.file_path)
500 except IOError as io_error:
501 if io_error.errno != errno.ENOENT:
503 self.file_path = os.path.join(constants.YARDSTICK_ROOT_PATH,
505 cfg = yaml_loader.read_yaml_file(self.file_path)
507 nodes.extend([node for node in cfg["nodes"] if str(node["role"]) != nfvi_role])
508 nfvi_host.extend([node for node in cfg["nodes"] if str(node["role"]) == nfvi_role])
510 raise("Node role is other than SRIOV")
512 host_mgmt = {'user': nfvi_host[0]['user'],
513 'ip': str(IPNetwork(nfvi_host[0]['ip']).ip),
514 'password': nfvi_host[0]['password'],
515 'ssh_port': nfvi_host[0].get('ssh_port', 22),
516 'key_filename': nfvi_host[0].get('key_filename')}
518 return [nodes, nfvi_host, host_mgmt]
521 def get_mac_address(end=0x7f):
522 mac = [0x52, 0x54, 0x00,
523 random.randint(0x00, end),
524 random.randint(0x00, 0xff),
525 random.randint(0x00, 0xff)]
526 mac_address = ':'.join('%02x' % x for x in mac)
530 def get_mgmt_ip(connection, mac, cidr, node):
533 while not mgmtip and times:
534 connection.execute("fping -c 1 -g %s > /dev/null 2>&1" % cidr)
535 out = connection.execute("ip neighbor | grep '%s'" % mac)[1]
536 LOG.info("fping -c 1 -g %s > /dev/null 2>&1", cidr)
538 mgmtip = str(out.split(" ")[0]).strip()
539 client = ssh.SSH.from_node(node, overrides={"ip": mgmtip})
543 time.sleep(WAIT_FOR_BOOT) # FixMe: How to find if VM is booted?
548 def wait_for_vnfs_to_start(cls, connection, servers, nodes):
550 vnf = servers[node["name"]]
551 mgmtip = vnf["network_ports"]["mgmt"]["cidr"]
552 ip = cls.get_mgmt_ip(connection, node["mac"], mgmtip, node)
558 def check_update_key(cls, connection, node, vm_name, id_name, cdrom_img, mac):
559 # Generate public/private keys if private key file is not provided
560 user_name = node.get('user')
562 node['user'] = 'root'
563 user_name = node.get('user')
564 if not node.get('key_filename'):
565 key_filename = ''.join(
566 [constants.YARDSTICK_ROOT_PATH,
567 'yardstick/resources/files/yardstick_key-',
568 id_name, '-', vm_name])
569 ssh.SSH.gen_keys(key_filename)
570 node['key_filename'] = key_filename
571 # Update image with public key
572 key_filename = node.get('key_filename')
573 ip_netmask = "{0}/{1}".format(node.get('ip'), node.get('netmask'))
574 ip_netmask = "{0}/{1}".format(node.get('ip'),
575 IPNetwork(ip_netmask).prefixlen)
576 Libvirt.gen_cdrom_image(connection, cdrom_img, vm_name, user_name, key_filename, mac,
581 class Server(object):
582 """ This class handles geting vnf nodes
586 def build_vnf_interfaces(vnf, ports):
590 for key, vfs in vnf["network_ports"].items():
592 mgmt_cidr = IPNetwork(vfs['cidr'])
596 ip = IPNetwork(vf['cidr'])
600 'driver': "%svf" % vf['driver'],
601 'local_mac': vf['mac'],
602 'dpdk_port_num': index,
603 'local_ip': str(ip.ip),
604 'netmask': str(ip.netmask)
609 return mgmt_cidr, interfaces
612 def generate_vnf_instance(cls, flavor, ports, ip, key, vnf, mac):
613 mgmt_cidr, interfaces = cls.build_vnf_interfaces(vnf, ports)
616 "ip": str(mgmt_cidr.ip),
617 "netmask": str(mgmt_cidr.netmask),
620 "user": flavor.get('user', 'root'),
621 "interfaces": interfaces,
623 # empty IPv6 routing table
625 "name": key, "role": key
629 result['key_filename'] = flavor['key_filename']
634 result['password'] = flavor['password']
641 class OvsDeploy(object):
642 """ This class handles deploy of ovs dpdk
643 Configuration: ovs_dpdk
646 OVS_DEPLOY_SCRIPT = "ovs_deploy.bash"
648 def __init__(self, connection, bin_path, ovs_properties):
649 self.connection = connection
650 self.bin_path = bin_path
651 self.ovs_properties = ovs_properties
653 def prerequisite(self):
654 pkgs = ["git", "build-essential", "pkg-config", "automake",
655 "autotools-dev", "libltdl-dev", "cmake", "libnuma-dev",
657 StandaloneContextHelper.install_req_libs(self.connection, pkgs)
659 def ovs_deploy(self):
660 ovs_deploy = os.path.join(constants.YARDSTICK_ROOT_PATH,
661 "yardstick/resources/scripts/install/",
662 self.OVS_DEPLOY_SCRIPT)
663 if os.path.isfile(ovs_deploy):
665 remote_ovs_deploy = os.path.join(self.bin_path, self.OVS_DEPLOY_SCRIPT)
666 LOG.info(remote_ovs_deploy)
667 self.connection.put(ovs_deploy, remote_ovs_deploy)
669 http_proxy = os.environ.get('http_proxy', '')
670 ovs_details = self.ovs_properties.get("version", {})
671 ovs = ovs_details.get("ovs", "2.6.0")
672 dpdk = ovs_details.get("dpdk", "16.11.1")
674 cmd = "sudo -E %s --ovs='%s' --dpdk='%s' -p='%s'" % (remote_ovs_deploy,
675 ovs, dpdk, http_proxy)
676 exit_status, _, stderr = self.connection.execute(cmd)
678 raise exceptions.OVSDeployError(stderr=stderr)