1 # Copyright (c) 2016-2017 Intel Corporation
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
15 from __future__ import absolute_import
24 from netaddr import IPNetwork
25 import xml.etree.ElementTree as ET
27 from yardstick import ssh
28 from yardstick.common.constants import YARDSTICK_ROOT_PATH
29 from yardstick.common.yaml_loader import yaml_load
30 from yardstick.network_services.utils import PciAddress
31 from yardstick.network_services.helpers.cpu import CpuSysCores
32 from yardstick.common.utils import write_file
34 LOG = logging.getLogger(__name__)
38 <name>{vm_name}</name>
39 <uuid>{random_uuid}</uuid>
40 <memory unit="MB">{memory}</memory>
41 <currentMemory unit="MB">{memory}</currentMemory>
45 <vcpu cpuset='{cpuset}'>{vcpu}</vcpu>
47 <type arch="x86_64" machine="pc-i440fx-utopic">hvm</type>
55 <cpu mode='host-passthrough'>
56 <topology cores="{cpu}" sockets="{socket}" threads="{threads}" />
58 <cell id='0' cpus='{numa_cpus}' memory='{memory}' unit='MB' memAccess='shared'/>
62 <timer name="rtc" tickpolicy="catchup" />
63 <timer name="pit" tickpolicy="delay" />
64 <timer name="hpet" present="no" />
66 <on_poweroff>destroy</on_poweroff>
67 <on_reboot>restart</on_reboot>
68 <on_crash>restart</on_crash>
70 <emulator>/usr/bin/kvm-spice</emulator>
71 <disk device="disk" type="file">
72 <driver name="qemu" type="qcow2" />
73 <source file="{vm_image}"/>
74 <target bus="virtio" dev="vda" />
76 <graphics autoport="yes" listen="0.0.0.0" port="-1" type="vnc" />
77 <interface type="bridge">
78 <mac address='{mac_addr}'/>
79 <source bridge="br-int" />
80 <model type='virtio'/>
88 class Libvirt(object):
89 """ This class handles all the libvirt updates to lauch VM
93 def check_if_vm_exists_and_delete(vm_name, connection):
94 cmd_template = "virsh list --name | grep -i %s"
95 status = connection.execute(cmd_template % vm_name)[0]
97 LOG.info("VM '%s' is already present.. destroying" % vm_name)
98 connection.execute("virsh destroy %s" % vm_name)
101 def virsh_create_vm(connection, cfg):
102 err = connection.execute("virsh create %s" % cfg)[0]
103 LOG.info("VM create status: %s" % (err))
106 def virsh_destroy_vm(vm_name, connection):
107 connection.execute("virsh destroy %s" % vm_name)
110 def add_interface_address(interface, pci_address):
111 vm_pci = ET.SubElement(interface, 'address')
112 vm_pci.set('type', 'pci')
113 vm_pci.set('domain', '0x%s' % pci_address.domain)
114 vm_pci.set('bus', '0x%s' % pci_address.bus)
115 vm_pci.set('slot', '0x%s' % pci_address.slot)
116 vm_pci.set('function', '0x%s' % pci_address.function)
120 def add_ovs_interface(cls, vpath, port_num, vpci, vports_mac, xml):
121 vhost_path = '{0}/var/run/openvswitch/dpdkvhostuser{1}'
123 pci_address = PciAddress.parse_address(vpci.strip(), multi_line=True)
124 device = root.find('devices')
126 interface = ET.SubElement(device, 'interface')
127 interface.set('type', 'vhostuser')
128 mac = ET.SubElement(interface, 'mac')
129 mac.set('address', vports_mac)
131 source = ET.SubElement(interface, 'source')
132 source.set('type', 'unix')
133 source.set('path', vhost_path.format(vpath, port_num))
134 source.set('mode', 'client')
136 model = ET.SubElement(interface, 'model')
137 model.set('type', 'virtio')
139 driver = ET.SubElement(interface, 'driver')
140 driver.set('queues', '4')
142 host = ET.SubElement(driver, 'host')
143 host.set('mrg_rxbuf', 'off')
145 cls.add_interface_address(interface, pci_address)
150 def add_sriov_interfaces(cls, vm_pci, vf_pci, vfmac, xml):
152 pci_address = PciAddress.parse_address(vf_pci.strip(), multi_line=True)
153 device = root.find('devices')
155 interface = ET.SubElement(device, 'interface')
156 interface.set('managed', 'yes')
157 interface.set('type', 'hostdev')
159 mac = ET.SubElement(interface, 'mac')
160 mac.set('address', vfmac)
161 source = ET.SubElement(interface, 'source')
163 addr = ET.SubElement(source, "address")
164 addr.set('domain', "0x0")
165 addr.set('bus', "{0}".format(pci_address.bus))
166 addr.set('function', "{0}".format(pci_address.function))
167 addr.set('slot', "0x{0}".format(pci_address.slot))
168 addr.set('type', "pci")
170 pci_vm_address = PciAddress.parse_address(vm_pci.strip(), multi_line=True)
171 cls.add_interface_address(interface, pci_vm_address)
176 def create_snapshot_qemu(connection, index, vm_image):
177 # build snapshot image
178 image = "/var/lib/libvirt/images/%s.qcow2" % index
179 connection.execute("rm %s" % image)
180 qemu_template = "qemu-img create -f qcow2 -o backing_file=%s %s"
181 connection.execute(qemu_template % (vm_image, image))
186 def build_vm_xml(cls, connection, flavor, cfg, vm_name, index):
187 memory = flavor.get('ram', '4096')
188 extra_spec = flavor.get('extra_specs', {})
189 cpu = extra_spec.get('hw:cpu_cores', '2')
190 socket = extra_spec.get('hw:cpu_sockets', '1')
191 threads = extra_spec.get('hw:cpu_threads', '2')
192 vcpu = int(cpu) * int(threads)
193 numa_cpus = '0-%s' % (vcpu - 1)
194 hw_socket = flavor.get('hw_socket', '0')
195 cpuset = Libvirt.pin_vcpu_for_perf(connection, vm_name, vcpu, hw_socket)
197 mac = StandaloneContextHelper.get_mac_address(0x00)
198 image = cls.create_snapshot_qemu(connection, index,
199 flavor.get("images", None))
200 vm_xml = VM_TEMPLATE.format(
202 random_uuid=uuid.uuid4(),
204 memory=memory, vcpu=vcpu, cpu=cpu,
206 socket=socket, threads=threads,
207 vm_image=image, cpuset=cpuset)
209 write_file(cfg, vm_xml)
214 def update_interrupts_hugepages_perf(connection):
215 connection.execute("echo 1 > /sys/module/kvm/parameters/allow_unsafe_assigned_interrupts")
216 connection.execute("echo never > /sys/kernel/mm/transparent_hugepage/enabled")
219 def pin_vcpu_for_perf(cls, connection, vm_name, cpu, socket="0"):
221 sys_obj = CpuSysCores(connection)
222 soc_cpu = sys_obj.get_core_socket()
223 sys_cpu = int(soc_cpu["cores_per_socket"])
224 cores = "%s-%s" % (soc_cpu[socket][0], soc_cpu[socket][sys_cpu - 1])
225 if int(soc_cpu["thread_per_core"]):
226 threads = "%s-%s" % (soc_cpu[socket][sys_cpu], soc_cpu[socket][-1])
227 cpuset = "%s,%s" % (cores, threads)
231 class StandaloneContextHelper(object):
232 """ This class handles all the common code for standalone
235 self.file_path = None
236 super(StandaloneContextHelper, self).__init__()
239 def install_req_libs(connection, extra_pkgs=[]):
240 pkgs = ["qemu-kvm", "libvirt-bin", "bridge-utils", "numactl", "fping"]
241 pkgs.extend(extra_pkgs)
242 cmd_template = "dpkg-query -W --showformat='${Status}\\n' \"%s\"|grep 'ok installed'"
244 if connection.execute(cmd_template % pkg)[0]:
245 connection.execute("apt-get update")
246 connection.execute("apt-get -y install %s" % pkg)
252 def get_kernel_module(connection, pci, driver):
254 out = connection.execute("lspci -k -s %s" % pci)[1]
255 driver = out.split("Kernel modules:").pop().strip()
259 def get_nic_details(cls, connection, networks, dpdk_nic_bind):
260 for key, ports in networks.items():
264 phy_ports = ports['phy_port']
265 phy_driver = ports.get('phy_driver', None)
266 driver = cls.get_kernel_module(connection, phy_ports, phy_driver)
268 # Make sure that ports are bound to kernel drivers e.g. i40e/ixgbe
269 bind_cmd = "{dpdk_nic_bind} --force -b {driver} {port}"
270 lshw_cmd = "lshw -c network -businfo | grep '{port}'"
271 link_show_cmd = "ip -s link show {interface}"
273 cmd = bind_cmd.format(dpdk_nic_bind=dpdk_nic_bind,
274 driver=driver, port=ports['phy_port'])
275 connection.execute(cmd)
277 out = connection.execute(lshw_cmd.format(port=phy_ports))[1]
278 interface = out.split()[1]
280 connection.execute(link_show_cmd.format(interface=interface))
283 'interface': str(interface),
286 LOG.info("{0}".format(networks))
291 def get_virtual_devices(connection, pci):
292 cmd = "cat /sys/bus/pci/devices/{0}/virtfn0/uevent"
293 output = connection.execute(cmd.format(pci))[1]
295 pattern = "PCI_SLOT_NAME=({})".format(PciAddress.PCI_PATTERN_STR)
296 m = re.search(pattern, output, re.MULTILINE)
300 pf_vfs = {pci: m.group(1).rstrip()}
302 LOG.info("pf_vfs:\n%s", pf_vfs)
306 def read_config_file(self):
307 """Read from config file"""
309 with open(self.file_path) as stream:
310 LOG.info("Parsing pod file: %s", self.file_path)
311 cfg = yaml_load(stream)
314 def parse_pod_file(self, file_path, nfvi_role='Sriov'):
315 self.file_path = file_path
319 cfg = self.read_config_file()
320 except IOError as io_error:
321 if io_error.errno != errno.ENOENT:
323 self.file_path = os.path.join(YARDSTICK_ROOT_PATH, file_path)
324 cfg = self.read_config_file()
326 nodes.extend([node for node in cfg["nodes"] if str(node["role"]) != nfvi_role])
327 nfvi_host.extend([node for node in cfg["nodes"] if str(node["role"]) == nfvi_role])
329 raise("Node role is other than SRIOV")
331 host_mgmt = {'user': nfvi_host[0]['user'],
332 'ip': str(IPNetwork(nfvi_host[0]['ip']).ip),
333 'password': nfvi_host[0]['password'],
334 'ssh_port': nfvi_host[0].get('ssh_port', 22),
335 'key_filename': nfvi_host[0].get('key_filename')}
337 return [nodes, nfvi_host, host_mgmt]
340 def get_mac_address(end=0x7f):
341 mac = [0x52, 0x54, 0x00,
342 random.randint(0x00, end),
343 random.randint(0x00, 0xff),
344 random.randint(0x00, 0xff)]
345 mac_address = ':'.join('%02x' % x for x in mac)
349 def get_mgmt_ip(connection, mac, cidr, node):
352 while not mgmtip and times:
353 connection.execute("fping -c 1 -g %s > /dev/null 2>&1" % cidr)
354 out = connection.execute("ip neighbor | grep '%s'" % mac)[1]
355 LOG.info("fping -c 1 -g %s > /dev/null 2>&1" % cidr)
357 mgmtip = str(out.split(" ")[0]).strip()
358 client = ssh.SSH.from_node(node, overrides={"ip": mgmtip})
362 time.sleep(WAIT_FOR_BOOT) # FixMe: How to find if VM is booted?
367 def wait_for_vnfs_to_start(cls, connection, servers, nodes):
369 vnf = servers[node["name"]]
370 mgmtip = vnf["network_ports"]["mgmt"]["cidr"]
371 ip = cls.get_mgmt_ip(connection, node["mac"], mgmtip, node)
377 class Server(object):
378 """ This class handles geting vnf nodes
382 def build_vnf_interfaces(vnf, ports):
386 for key, vfs in vnf["network_ports"].items():
388 mgmtip = str(IPNetwork(vfs['cidr']).ip)
392 ip = IPNetwork(vf['cidr'])
396 'driver': "%svf" % vf['driver'],
397 'local_mac': vf['mac'],
398 'dpdk_port_num': index,
399 'local_ip': str(ip.ip),
400 'netmask': str(ip.netmask)
405 return mgmtip, interfaces
408 def generate_vnf_instance(cls, flavor, ports, ip, key, vnf, mac):
409 mgmtip, interfaces = cls.build_vnf_interfaces(vnf, ports)
415 "user": flavor.get('user', 'root'),
416 "interfaces": interfaces,
418 # empty IPv6 routing table
420 "name": key, "role": key
424 result['key_filename'] = flavor['key_filename']
429 result['password'] = flavor['password']
436 class OvsDeploy(object):
437 """ This class handles deploy of ovs dpdk
438 Configuration: ovs_dpdk
441 OVS_DEPLOY_SCRIPT = "ovs_deploy.bash"
443 def __init__(self, connection, bin_path, ovs_properties):
444 self.connection = connection
445 self.bin_path = bin_path
446 self.ovs_properties = ovs_properties
448 def prerequisite(self):
449 pkgs = ["git", "build-essential", "pkg-config", "automake",
450 "autotools-dev", "libltdl-dev", "cmake", "libnuma-dev",
452 StandaloneContextHelper.install_req_libs(self.connection, pkgs)
454 def ovs_deploy(self):
455 ovs_deploy = os.path.join(YARDSTICK_ROOT_PATH,
456 "yardstick/resources/scripts/install/",
457 self.OVS_DEPLOY_SCRIPT)
458 if os.path.isfile(ovs_deploy):
460 remote_ovs_deploy = os.path.join(self.bin_path, self.OVS_DEPLOY_SCRIPT)
461 LOG.info(remote_ovs_deploy)
462 self.connection.put(ovs_deploy, remote_ovs_deploy)
464 http_proxy = os.environ.get('http_proxy', '')
465 ovs_details = self.ovs_properties.get("version", {})
466 ovs = ovs_details.get("ovs", "2.6.0")
467 dpdk = ovs_details.get("dpdk", "16.11.1")
469 cmd = "sudo -E %s --ovs='%s' --dpdk='%s' -p='%s'" % (remote_ovs_deploy,
470 ovs, dpdk, http_proxy)
471 self.connection.execute(cmd)