From: Jiang, Yunhong Date: Tue, 23 Aug 2016 22:17:42 +0000 (+0000) Subject: Merge "OPNFV KVM4NFV CICD: Scripts for creating Rpms & Debians for Kernel, Qemu" X-Git-Tag: danube.1.0~51 X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=commitdiff_plain;h=76c2ed6659d79ee83c8fc4f00e955268779db826;hp=7552c26370d38ef5dd182682a0d3bf096661fe0e;p=kvmfornfv.git Merge "OPNFV KVM4NFV CICD: Scripts for creating Rpms & Debians for Kernel, Qemu" --- diff --git a/ci/envs/cyclictest.sh b/ci/envs/cyclictest.sh new file mode 100755 index 000000000..c261fe05d --- /dev/null +++ b/ci/envs/cyclictest.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +########################################################### +## Invoking this script from ubuntu docker container runs +## cyclictest through yardstick +########################################################### + +pod_config='/opt/pod.yaml' +cyclictest_context_file='/opt/cyclictest-node-context.yaml' + +if [ ! -f ${pod_config} ] ; then + echo "file ${pod_config} not found" + exit 1 +fi + +if [ ! -f ${cyclictest_context_file} ] ; then + echo "file ${cyclictest_context_file} not found" + exit 1 +fi + +#setting up of image for launching guest vm. +sudo ssh root@10.2.117.23 "cp /root/images/guest1.qcow2 /root/" + +#Running cyclictest through yardstick +yardstick -d task start ${cyclictest_context_file} +chmod 777 /tmp/yardstick.out +cat /tmp/yardstick.out > /opt/yardstick.out diff --git a/ci/envs/guest-setup0.sh b/ci/envs/guest-setup0.sh index 490bd570d..4f9eaa430 100755 --- a/ci/envs/guest-setup0.sh +++ b/ci/envs/guest-setup0.sh @@ -9,27 +9,26 @@ ############################################################################### -rpmdir=${1:-"/root/workspace/"} -rpmpat="kernel-4.1*.rpm" -rpm -ihv ${rpmdir}/rt-tests-0.96-1.el7.centos.x86_64.rpm +rpmdir=${1:-"/root/workspace/rpm"} +rpmpat="kernel-4.4*.rpm" guest_isolcpus=1 -# The script's caller should passing the rpm directory that is built out from +# The script's caller should passing the rpm directory that is built out from # build.sh. The default rpmdir is the one used by yardstick scripts. install_kernel () { # Install the kernel rpm filenum=`ls -l ${rpmdir}/${rpmpat} |wc -l` if [ $filenum -eq 0 ] then - echo "No kernel rpm found in workspace/rpm" - exit 1 + echo "No kernel rpm found in workspace/rpm" + exit 1 elif [ $filenum -gt 1 ] then - echo "Multiple kernel rpm found in workspace/rpm" - exit 1 + echo "Multiple kernel rpm found in workspace/rpm" + exit 1 else - krpm=`find "${rpmdir}" -name "${rpmpat}"` - rpm -ihv $krpm + krpm=`find "${rpmdir}" -name "${rpmpat}"` + rpm -ihv $krpm fi } diff --git a/ci/envs/host-run-qemu.sh b/ci/envs/host-run-qemu.sh index c7a2fecc6..487aebe66 100755 --- a/ci/envs/host-run-qemu.sh +++ b/ci/envs/host-run-qemu.sh @@ -18,14 +18,20 @@ cpumask () { qmp_sock="/tmp/qmp-sock-$$" -${qemu} -smp ${guest_cpus} -monitor unix:${qmp_sock},server,nowait -daemonize \ - -cpu host,migratable=off,+invtsc,+tsc-deadline,pmu=off \ - -realtime mlock=on -mem-prealloc -enable-kvm -m 1G \ - -mem-path /mnt/hugetlbfs-1g \ - -drive file=/root/workspace/image/guest.img,cache=none,aio=threads \ - -netdev user,id=guest0,hostfwd=tcp::5555-:22 \ - -device virtio-net-pci,netdev=guest0 \ - -nographic -serial /dev/null -parallel /dev/null +#${qemu} -smp ${guest_cpus} -monitor unix:${qmp_sock},server,nowait -daemonize \ +# -cpu host,migratable=off,+invtsc,+tsc-deadline,pmu=off \ +# -realtime mlock=on -mem-prealloc -enable-kvm -m 1G \ +# -mem-path /mnt/hugetlbfs-1g \ +# -drive file=/root/minimal-centos1.qcow2,cache=none,aio=threads \ +# -netdev user,id=guest0,hostfwd=tcp:10.2.117.23:5555-:22 \ +# -device virtio-net-pci,netdev=guest0 \ +# -nographic -serial /dev/null -parallel /dev/null + +${qemu} -smp ${guest_cpus} -drive file=/root/guest1.qcow2 -daemonize \ + -netdev user,id=net0,hostfwd=tcp:10.2.117.23:5555-:22 \ + -realtime mlock=on -mem-prealloc -enable-kvm -m 1G \ + -mem-path /mnt/hugetlbfs-1g \ + -device virtio-net-pci,netdev=net0 \ i=0 for c in `echo ${host_isolcpus} | sed 's/,/ /g'` ; do diff --git a/ci/envs/host-setup0.sh b/ci/envs/host-setup0.sh index 79d1f585a..28c49b8b8 100755 --- a/ci/envs/host-setup0.sh +++ b/ci/envs/host-setup0.sh @@ -12,7 +12,7 @@ source host-config rpmdir=${1:-"/root/workspace/rpm/"} -rpmpat="kernel-4.1*.rpm" +rpmpat="kernel-4.4*.rpm" config_grub () { key=$1 diff --git a/ci/test_kvmfornfv.sh b/ci/test_kvmfornfv.sh new file mode 100755 index 000000000..45d1ea252 --- /dev/null +++ b/ci/test_kvmfornfv.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +############################################################# +## !!! The original test_kvmfornfv.sh is removed because it +## break the verification process!!! +############################################################# +## This script will launch ubuntu docker container +## runs cyclictest through yardstick +## and verifies the test results. +############################################################ + + +function env_clean { + container_id=`sudo docker ps -a | grep kvmfornfv |awk '{print $1}'` + sudo docker rm $container_id + sudo ssh root@10.2.117.23 "rm -rf /root/workspace/*" + sudo ssh root@10.2.117.23 "pid=\$(ps aux | grep 'qemu' | awk '{print \$2}' | head -1); echo \$pid |xargs kill" + sudo rm -rf /tmp/kvmtest-* +} + +#Cleaning up the test environment before running cyclictest through yardstick. +env_clean + +time_stamp=$(date +%Y%m%d%H%M%S) +volume=/tmp/kvmtest-${time_stamp} +mkdir -p $volume/{image,rpm,scripts} + +#copying required files to run yardstick cyclic testcase +mv $WORKSPACE/build_output/kernel-4.4*.rpm $volume/rpm +cp -r $WORKSPACE/ci/envs/* $volume/scripts +cp -r $WORKSPACE/tests/cyclictest-node-context.yaml $volume +cp -r $WORKSPACE/tests/pod.yaml $volume + +#Launching ubuntu docker container to run yardstick +sudo docker run -i -v $volume:/opt --net=host --name kvmfornfv \ +kvmfornfv:latest /bin/bash -c "cd /opt/scripts && ls; ./cyclictest.sh" + +#Verifying the results of cyclictest +result=`grep -o '"errors":[^,]*' $volume/yardstick.out | awk -F '"' '{print $4}'` + +if [ -z "$result" ]; then + echo "####################################################" + echo "" + echo `grep -o '"data":[^}]*' $volume/yardstick.out | awk -F '{' '{print $2}'` + echo "" + echo "####################################################" + env_clean + exit 0 +else + echo "Testcase failed" + echo `grep -o '"errors":[^,]*' ${volume}/yardstick.out | awk -F '"' '{print $4}'` + env_clean + exit 1 +fi diff --git a/docs/all/environment-setup.rst b/docs/all/environment-setup.rst deleted file mode 100644 index e3814310a..000000000 --- a/docs/all/environment-setup.rst +++ /dev/null @@ -1,151 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) - -Low Latency Environment -======================= - -Achieving low latency with the KVM4NFV project requires setting up a special -test environment. This environment includes the BIOS settings, kernel -configuration, kernel parameters and the run-time environment. - -Hardware Environment Description --------------------------------- - -BIOS setup plays an important role in achieving real-time latency. A collection -of relevant settings, used on the platform where the baseline performance data -was collected, is detailed below: - -CPU Features -~~~~~~~~~~~~ - -Some special CPU features like TSC-deadline timer, invariant TSC and Process posted -interrupts, etc, are helpful for latency reduction. - -Below is the CPU information on the baseline test platform. -:: - processor : 35 - vendor_id : GenuineIntel - cpu family : 6 - model : 63 - model name : Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz - stepping : 2 - microcode : 0x2d - cpu MHz : 2294.795 - cache size : 46080 KB - physical id : 1 - siblings : 18 - core id : 27 - cpu cores : 18 - apicid : 118 - initial apicid : 118 - fpu : yes - fpu_exception : yes - cpuid level : 15 - wp : yes - flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge - mca cmov pat pse36 clflush dts acpi mmx fxsr sse - sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm - constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc - aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 - ssse3 fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt - tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm arat epb - pln pts dtherm tpr_shadow vnmi flexpriority ept vpid fsgsbase - tsc_adjust bmi1 avx2 smep bmi2 erms invpcid cqm xsaveopt cqm_llc - cqm_occup_llcbugs - bogomips : 4595.54 - clflush size : 64 - cache_alignment : 64 - address sizes : 46 bits physical, 48 bits virtual - power management: - -CPU Topology -~~~~~~~~~~~~ - -NUMA topology is also important for latency reduction. - -Below is the CPU topology on the baseline test platform. -:: - [nfv@otcnfv02 ~]$ lscpu - Architecture: x86_64 - CPU op-mode(s): 32-bit, 64-bit - Byte Order: Little Endian - CPU(s): 36 - On-line CPU(s) list: 0-35 - Thread(s) per core: 1 - Core(s) per socket: 18 - Socket(s): 2 - NUMA node(s): 2 - Vendor ID: GenuineIntel - CPU family: 6 - Model: 63 - Model name: Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz - Stepping: 2 - CPU MHz: 2294.795 - BogoMIPS: 4595.54 - Virtualization: VT-x - L1d cache: 32K - L1i cache: 32K - L2 cache: 256K - L3 cache: 46080K - NUMA node0 CPU(s): 0-17 - NUMA node1 CPU(s): 18-35 - -BIOS Setup -~~~~~~~~~~ - -Careful BIOS setup is important in achieving real time latency. Different -platforms have different BIOS setups, below are the important BIOS settings on -the platform used to collect the baseline performance data. -:: - CPU Power and Performance - CPU C-State - C1E Autopromote - Processor C3 - Processor C6 - Select Memory RAS - NUMA Optimized - Cluster-on-Die - Patrol Scrub - Demand Scrub - Correctable Error <10> - Intel(R) Hyper-Threading - Active Processor Cores - Execute Disable Bit - Intel(R) Virtualization Technology - Intel(R) TXT - Enhanced Error Containment Mode - USB Controller - USB 3.0 Controller - Legacy USB Support - Port 60/64 Emulation - -Software Environment Setup --------------------------- -Both the host and the guest environment need to be configured properly to -reduce latency variations. Below are some suggested kernel configurations. -The ci/envs/ directory gives detailed implementation on how to setup the -environment. - -Kernel Parameter -~~~~~~~~~~~~~~~~ - -Please check the default kernel configuration in the source code at: -kernel/arch/x86/configs/opnfv.config. - -Below is host kernel boot line example: -:: - isolcpus=11-15,31-35 nohz_full=11-15,31-35 rcu_nocbs=11-15,31-35 iommu=pt intel_iommu=on default_hugepagesz=1G hugepagesz=1G mce=off idle=poll intel_pstate=disable processor.max_cstate=1 pcie_asmp=off tsc=reliable - -Below is guest kernel boot line example -:: - isolcpus=1 nohz_full=1 rcu_nocbs=1 mce=off idle=poll default_hugepagesz=1G hugepagesz=1G - -Please refer to :doc:`tunning` for more explanation. - -Run-time Environment Setup -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Not only are special kernel parameters needed but a special run-time -environment is also required. Please refer to :doc:`tunning` for more -explanation. diff --git a/docs/all/index.rst b/docs/all/index.rst deleted file mode 100644 index 7f5f7a694..000000000 --- a/docs/all/index.rst +++ /dev/null @@ -1,48 +0,0 @@ -.. This work is licensed under a Creative Commons Attribution 4.0 International License. -.. http://creativecommons.org/licenses/by/4.0 -.. (c) - -=============== -KVM4NFV project -=============== - -Welcome to KVM4NFV_ project! - - - -.. _KVM4NFV: https://wiki.opnfv.org/nfv-kvm - -Contents: - -KVM4NFV Project Description -=========================== - -The NFV hypervisors provide crucial functionality in the NFV Infrastructure -(NFVI). The existing hypervisors, however, are not necessarily designed or -targeted to meet the requirements for the NFVI, and we need to make -collaborative efforts toward enabling the NFV features. - -The KVM4NFV project focuses on the KVM hypervisor to enhance it for NFV, by -looking at the following areas - -+ Minimal Interrupt latency variation for data plane VNFs - * Minimal Timing Variation for Timing correctness of real-time VNFs - * Minimal packet latency variation for data-plane VNFs -+ Fast live migration - -While these items require software development and/or specific hardware features -there are also some adjustments that need to be made to system configuration -information, like hardware, BIOS, OS, etc. - -.. toctree:: - :numbered: - :maxdepth: 1 - -Setup Guides -============ -.. toctree:: - :maxdepth: 2 - - environment-setup - tuning - live_migration diff --git a/docs/configurationguide/abstract.rst b/docs/configurationguide/abstract.rst new file mode 100644 index 000000000..a5066c284 --- /dev/null +++ b/docs/configurationguide/abstract.rst @@ -0,0 +1,16 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 + +======== +Abstract +======== + +This document provides guidance for the configurations available in the +Colorado release of OPNFV. + +The release includes four installer tools leveraging different technologies; +Apex, Compass4nfv, Fuel and JOID, which deploy components of the platform. + +This document also includes the selection of tools and components including +guidelines for how to deploy and configure the platform to an operational +state. diff --git a/docs/configurationguide/configuration.options.render.rst b/docs/configurationguide/configuration.options.render.rst new file mode 100644 index 000000000..93add7755 --- /dev/null +++ b/docs/configurationguide/configuration.options.render.rst @@ -0,0 +1,23 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 + +====================== +Configuration Options +====================== + +OPNFV provides a variety of virtual infrastructure deployments called scenarios +designed to host virtualised network functions (VNF's). KVM4NFV scenarios +provide specific capabilities and/or components aimed to solve specific +problems for the deployment of VNF's. KVM4NFV scenario includes components +such as OpenStack,KVM etc. which includes different source components or +configurations. + +KVM4NFV Scenarios +=================== + +Each KVM4NFV scenario provides unique features and capabilities, it is +important to understand your target platform capabilities before installing +and configuring. This configuration guide outlines how to install and +configure components in order to enable the features required. + +.. include:: scenariomatrix.rst diff --git a/docs/configurationguide/images/brahmaputrafeaturematrix.jpg b/docs/configurationguide/images/brahmaputrafeaturematrix.jpg new file mode 100644 index 000000000..0d2a12279 Binary files /dev/null and b/docs/configurationguide/images/brahmaputrafeaturematrix.jpg differ diff --git a/docs/configurationguide/images/brahmaputrascenariomatrix.jpg b/docs/configurationguide/images/brahmaputrascenariomatrix.jpg new file mode 100644 index 000000000..84fc87a76 Binary files /dev/null and b/docs/configurationguide/images/brahmaputrascenariomatrix.jpg differ diff --git a/docs/configurationguide/images/weather-clear.jpg b/docs/configurationguide/images/weather-clear.jpg new file mode 100644 index 000000000..011ad52e9 Binary files /dev/null and b/docs/configurationguide/images/weather-clear.jpg differ diff --git a/docs/configurationguide/images/weather-dash.jpg b/docs/configurationguide/images/weather-dash.jpg new file mode 100644 index 000000000..3bf98dd27 Binary files /dev/null and b/docs/configurationguide/images/weather-dash.jpg differ diff --git a/docs/configurationguide/images/weather-few-clouds.jpg b/docs/configurationguide/images/weather-few-clouds.jpg new file mode 100644 index 000000000..51994ee84 Binary files /dev/null and b/docs/configurationguide/images/weather-few-clouds.jpg differ diff --git a/docs/configurationguide/images/weather-overcast.jpg b/docs/configurationguide/images/weather-overcast.jpg new file mode 100644 index 000000000..bdc1e0487 Binary files /dev/null and b/docs/configurationguide/images/weather-overcast.jpg differ diff --git a/docs/configurationguide/index.rst b/docs/configurationguide/index.rst new file mode 100644 index 000000000..6ad3b282c --- /dev/null +++ b/docs/configurationguide/index.rst @@ -0,0 +1,16 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 + +************************* +OPNFV Configuration Guide +************************* +Colorado 1.0 +------------ + +.. toctree:: + :maxdepth: 2 + + ./abstract.rst + ./configuration.options.render.rst + ./low-latency.feature.configuration.description.rst + ./os-nosdn-kvm-ha.description.rst diff --git a/docs/configurationguide/low-latency.feature.configuration.description.rst b/docs/configurationguide/low-latency.feature.configuration.description.rst new file mode 100644 index 000000000..bb2bbd1ba --- /dev/null +++ b/docs/configurationguide/low-latency.feature.configuration.description.rst @@ -0,0 +1,93 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 + +Introduction +============ + +In KVM4NFV project, we focus on the KVM hypervisor to enhance it for NFV, by +looking at the following areas initially + +* Minimal Interrupt latency variation for data plane VNFs: + * Minimal Timing Variation for Timing correctness of real-time VNFs + * Minimal packet latency variation for data-plane VNFs +* Inter-VM communication, +* Fast live migration + +Configuration of Cyclictest +=========================== + +Cyclictest measures Latency of response to a stimulus. Achieving low latency +with the KVM4NFV project requires setting up a special test environment. +This environment includes the BIOS settings, kernel configuration, kernel +parameters and the run-time environment. + +* For more information regarding the test environment, please visit + https://wiki.opnfv.org/display/kvm/KVM4NFV+Test++Environment + https://wiki.opnfv.org/display/kvm/Nfv-kvm-tuning + +Pre-configuration activities +---------------------------- + +Intel POD1 is currently used as OPNFV-KVM4NFV test environment. The latest +build packages are downloaded onto Intel Pod1-jump server from artifact +repository. Yardstick running in a ubuntu docker container on Intel Pod1-jump +server will trigger the cyclictest. + +Running cyclictest through Yardstick will Configure the host(Pod1-node1), the +guest, executes cyclictest on the guest. + +The following scripts are used for configuring host and guest to create a +special test environment and achieve low latency. + +**host-setup0.sh**: On running this script will install latest kernel rpm +on host and will make necessary changes as following to create special test +environment + + * Isolates CPUs from the general scheduler + * Stops timer ticks on isolated CPUs whenever possible + * Stops RCU callbacks on isolated CPUs + * Enables intel iommu driver and disables DMA translation for devices + * Sets HugeTLB pages to 1GB + * Disables machine check + * Disables clocksource verification at runtime + +**host-setup1.sh**: On running this script will make following test +environment changes + + * Disabling watchdogs to reduce overhead + * Disabling RT throttling + * Reroute interrupts bound to isolated CPUs to CPU 0 + * Change the iptable so that we can ssh to the guest remotely + +**host-run-qemu.sh**: On running this script will launch a guest vm on host. + Note: download guest disk image from artifactory + +**guest-setup0.sh**: On running this scrcipt on guest vm will install the +latest build kernel rpm, cyclictest and makes following configuration on +guest vm. + + * Isolates CPUs from the general scheduler + * Stops timer ticks on isolated CPUs whenever possible + * Uses polling idle loop to improve performance + * Disables clocksource verification at runtime + +**guest-setup1.sh**: On running this script on guest vm will make following +configurations + + * Disable watchdogs to reduce overhead + * Routes device interrupts to non-RT CPU + * Disables RT throttling + +Hardware configuration +---------------------- + +Currently Intel POD1 is used as test environment for kvmfornfv to execute +cyclictest. As part of this test environment Intel pod1-jump is configured as +jenkins slave and all the latest build artifacts are downloaded on to it. +Intel pod1-node1 is the host on which a guest vm will be launched as a part of +running cylictest through yardstick. + +* For more information regarding hardware configuration, please visit + https://wiki.opnfv.org/display/pharos/Intel+Pod1 + https://build.opnfv.org/ci/computer/intel-pod1/ + http://artifacts.opnfv.org/octopus/brahmaputra/docs/octopus_docs/opnfv-jenkins-slave-connection.html diff --git a/docs/configurationguide/os-nosdn-kvm-ha.description.rst b/docs/configurationguide/os-nosdn-kvm-ha.description.rst new file mode 100644 index 000000000..d60276e0f --- /dev/null +++ b/docs/configurationguide/os-nosdn-kvm-ha.description.rst @@ -0,0 +1,126 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. + +.. http://creativecommons.org/licenses/by/4.0 + + +Introduction +============ + +.. In this section explain the purpose of the scenario and the + types of capabilities provided + +The purpose of os-nosdn-kvm-ha scenario testing is to test the +High Availability deployment and configuration of OPNFV software suite +with OpenStack and without SDN software. This OPNFV software suite +includes OPNFV KVM4NFV latest software packages for Linux Kernel and +QEMU patches for achieving low latency. High Availability feature is achieved +by deploying OpenStack multi-node setup with 3 controllers and 2 computes nodes + +KVM4NFV packages will be installed on compute nodes as part of deployment. +This scenario testcase deployment is happening on multi-node by using +OPNFV Fuel deployer. + +Scenario Components and Composition +=================================== +.. In this section describe the unique components that make up the scenario, +.. what each component provides and why it has been included in order +.. to communicate to the user the capabilities available in this scenario. + +This scenario deploys the High Availability OPNFV Cloud based on the +configurations provided in ha_nfv-kvm_heat_ceilometer_scenario.yaml. +This yaml file contains following configurations and is passed as an +argument to deploy.py script + +* scenario.yaml:This configuration file defines translation between a + short deployment scenario name(os-nosdn-kvm-ha) and an actual deployment + scenario configuration file(ha_nfv-kvm_heat_ceilometer_scenario.yaml) + +* deployment-scenario-metadata:Contains the configuration metadata like + title,version,created,comment. + +* stack-extensions:Stack extentions are opnfv added value features in form + of a fuel-plugin.Plugins listed in stack extensions are enabled and + configured. + +* dea-override-config: Used to configure the HA mode,network segmentation + types and role to node assignments.These configurations overrides + corresponding keys in the dea_base.yaml and dea_pod_override.yaml. + These keys are used to deploy multiple nodes(3 controllers,2 computes) + as mention below. + + * **Node 1**: This node has MongoDB and Controller roles. The controller + node runs the Identity service, Image Service, management portions of + Compute and Networking, Networking plug-in and the dashboard. The + Telemetry service which was designed to support billing systems for + OpenStack cloud resources uses a NoSQL database to store information. + The database typically runs on the controller node. + + * **Node 2**: This node has Controller and Ceph-osd roles. Ceph is a + massively scalable, open source, distributed storage system. It is + comprised of an object store, block store and a POSIX-compliant distributed + file system. Enabling Ceph, configures Nova to store ephemeral volumes in + RBD, configures Glance to use the Ceph RBD backend to store images, + configures Cinder to store volumes in Ceph RBD images and configures the + default number of object replicas in Ceph. + + * **Node 3**: This node has Controller role in order to achieve high + availability. + + * **Node 4**: This node has Compute role. The compute node runs the + hypervisor portion of Compute that operates tenant virtual machines + or instances. By default, Compute uses KVM as the hypervisor. + + * **Node 5**: This node has compute role. + +* dha-override-config:Provides information about the VM definition and + Network config for virtual deployment.These configurations overrides + the pod dha definition and points to the controller,compute and + fuel definition files. + +* os-nosdn-kvm-ha scenario is successful when all the 5 Nodes are accessible, + up and running + +Scenario Usage Overview +======================= +.. Provide a brief overview on how to use the scenario and the features available to the +.. user. This should be an "introduction" to the userguide document, and explicitly link to it, +.. where the specifics of the features are covered including examples and API's + +* The high availability feature can be acheived by executing deploy.py with + ha_nfv-kvm_heat_ceilometer_scenario.yaml as an argument. +* Install Fuel Master and deploy OPNFV Cloud from scratch on Hardware + Environment: + + -Example: + + sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/hardware/dea.yaml -dha ~/CONF/hardware/dha.yaml -s /mnt/images -b pxebr -log ~/Deployment-888.log.tar.gz + +* Install Fuel Master and deploy OPNFV Cloud from scratch on Virtual + Environment: + + -Example: + + sudo python deploy.py -iso ~/ISO/opnfv.iso -dea ~/CONF/virtual/dea.yaml -dha ~/CONF/virtual/dha.yaml -s /mnt/images -log ~/Deployment-888.log.tar.gz + +* os-nosdn-kvm-ha scenario can be executed from the jenkins project + "fuel-os-nosdn-kvm-ha-baremetal-daily-master" +* This scenario provides the High Availability feature by deploying + 3 controller,2 compute nodes and checking if all the 5 nodes + are accessible(IP,up & running). +* Test Scenario is passed if deployment is successful and all 5 nodes have + accessibility (IP , up & running). +* Observed that scenario is not running any testcase on top of deployment. + +Known Limitations, Issues and Workarounds +========================================= +.. Explain any known limitations here. + +* Test scenario os-nosdn-kvm-ha result is not stable. After node reboot + triggered by kvm plugin, sometimes puppet agent (mcollective) is not + responding with in the given time. + +References +========== + +For more information on the OPNFV Colorado release, please visit +http://www.opnfv.org/colorado diff --git a/docs/configurationguide/scenariomatrix.rst b/docs/configurationguide/scenariomatrix.rst new file mode 100644 index 000000000..1e2cef90a --- /dev/null +++ b/docs/configurationguide/scenariomatrix.rst @@ -0,0 +1,100 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. + +.. http://creativecommons.org/licenses/by/4.0 + +Scenarios are implemented as deployable compositions through integration with an installation tool. +OPNFV supports multiple installation tools and for any given release not all tools will support all +scenarios. While our target is to establish parity across the installation tools to ensure they +can provide all scenarios, the practical challenge of achieving that goal for any given feature and +release results in some disparity. + +Colorado scenario overeview +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The following table provides an overview of the installation tools and available scenario's +in the Colorado release of OPNFV. + +Scenario status is indicated by a weather pattern icon. All scenarios listed with +a weather pattern are possible to deploy and run in your environment or a Pharos lab, +however they may have known limitations or issues as indicated by the icon. + +Weather pattern icon legend: + ++---------------------------------------------+----------------------------------------------------------+ +| Weather Icon | Scenario Status | ++=============================================+==========================================================+ +| .. image:: ../images/weather-clear.jpg | Stable, no known issues | ++---------------------------------------------+----------------------------------------------------------+ +| .. image:: ../images/weather-few-clouds.jpg | Stable, documented limitations | ++---------------------------------------------+----------------------------------------------------------+ +| .. image:: ../images/weather-overcast.jpg | Deployable, stability or feature limitations | ++---------------------------------------------+----------------------------------------------------------+ +| .. image:: ../images/weather-dash.jpg | Not deployed with this installer | ++---------------------------------------------+----------------------------------------------------------+ + +Scenarios that are not yet in a state of "Stable, no known issues" will continue to be stabilised +and updates will be made on the stable/colorado branch. While we intend that all Colorado +scenarios should be stable it is worth checking regularly to see the current status. Due to +our dependency on upstream communities and code some issues may not be resolved prior to the D release. + +Scenario Naming +^^^^^^^^^^^^^^^ + +In OPNFV scenarios are identified by short scenario names, these names follow a scheme that +identifies the key components and behaviours of the scenario. The rules for scenario naming are as follows: + + os-[controller]-[feature]-[mode]-[option] + +Details of the fields are + * os: mandatory + + * Refers to the platform type used + * possible value: os (OpenStack) + +* [controller]: mandatory + + * Refers to the SDN controller integrated in the platform + * example values: nosdn, ocl, odl, onos + + * [feature]: mandatory + + * Refers to the feature projects supported by the scenario + * example values: nofeature, kvm, ovs, sfc + + * [mode]: mandatory + + * Refers to the deployment type, which may include for instance high availability + * possible values: ha, noha + + * [option]: optional + + * Used for the scenarios those do not fit into naming scheme. + * The optional field in the short scenario name should not be included if there is no optional scenario. + +Some examples of supported scenario names are: + + * os-nosdn-kvm-noha + + * This is an OpenStack based deployment using neutron including the OPNFV enhanced KVM hypervisor + + * os-onos-nofeature-ha + + * This is an OpenStack deployment in high availability mode including ONOS as the SDN controller + + * os-odl_l2-sfc + + * This is an OpenStack deployment using OpenDaylight and OVS enabled with SFC features + +Installing your scenario +^^^^^^^^^^^^^^^^^^^^^^^^ + +There are two main methods of deploying your target scenario, one method is to follow this guide which will +walk you through the process of deploying to your hardware using scripts or ISO images, the other method is +to set up a Jenkins slave and connect your infrastructure to the OPNFV Jenkins master. + +For the purposes of evaluation and development a number of Colorado scenarios are able to be deployed +virtually to mitigate the requirements on physical infrastructure. Details and instructions on performing +virtual deployments can be found in the installer specific installation instructions. + +To set up a Jenkins slave for automated deployment to your lab, refer to the `Jenkins slave connect guide. +`_ diff --git a/docs/design/Bare-metalPacketForwarding.png b/docs/design/Bare-metalPacketForwarding.png new file mode 100644 index 000000000..4b884e257 Binary files /dev/null and b/docs/design/Bare-metalPacketForwarding.png differ diff --git a/docs/design/DeviceInterruptTest.png b/docs/design/DeviceInterruptTest.png new file mode 100644 index 000000000..497f63fa3 Binary files /dev/null and b/docs/design/DeviceInterruptTest.png differ diff --git a/docs/design/PacketforwardingDPDK_OVS.png b/docs/design/PacketforwardingDPDK_OVS.png new file mode 100644 index 000000000..c8b689b82 Binary files /dev/null and b/docs/design/PacketforwardingDPDK_OVS.png differ diff --git a/docs/design/TimerTest.png b/docs/design/TimerTest.png new file mode 100644 index 000000000..52eacc8cf Binary files /dev/null and b/docs/design/TimerTest.png differ diff --git a/docs/design/index.rst b/docs/design/index.rst new file mode 100755 index 000000000..871f17388 --- /dev/null +++ b/docs/design/index.rst @@ -0,0 +1,12 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 + +================ +KVMFORNFV Design +================ + +.. toctree:: + :numbered: + :maxdepth: 3 + + kvmfornfv_design.rst diff --git a/docs/design/kvm1.png b/docs/design/kvm1.png new file mode 100644 index 000000000..3de1a6b80 Binary files /dev/null and b/docs/design/kvm1.png differ diff --git a/docs/design/kvmfornfv_design.rst b/docs/design/kvmfornfv_design.rst new file mode 100644 index 000000000..54dcd120a --- /dev/null +++ b/docs/design/kvmfornfv_design.rst @@ -0,0 +1,155 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 + +============ +Introduction +============ + +**Purpose**: + + This document provides an overview of the areas that can be addressed to + enhance the KVM Hypervisor for NFV. It is intended to capture and convey the + significant changes which have been made on the KVM Hypervisor. + + +=================== +Project description +=================== + +The NFV hypervisors provide crucial functionality in the NFV +Infrastructure(NFVI).The existing hypervisors, however, are not necessarily +designed or targeted to meet the requirements for the NFVI. + +This design focuses on the enhancement of following area for KVM Hypervisor + +* Minimal Interrupt latency variation for data plane VNFs: + * Minimal Timing Variation for Timing correctness of real-time VNFs + * Minimal packet latency variation for data-plane VNFs +* Fast live migration + +While these items require software development and/or specific hardware features +there are also some adjustments that need to be made to system configuration +information, like hardware, BIOS, OS, etc. + +**Minimal Interrupt latency variation for data plane VNFs** + +Processing performance and latency depend on a number of factors, including +the CPUs (frequency, power management features, etc.), micro-architectural +resources, the cache hierarchy and sizes, memory (and hierarchy, such as NUMA) +and speed, inter-connects, I/O and I/O NUMA, devices, etc. + +There are two separate types of latencies to minimize: + + 1. Minimal Timing Variation for Timing correctness of real-time + VNFs – timing correctness for scheduling operations(such as Radio scheduling) + 2. Minimal packet latency variation for data-plane VNFs – packet delay + variation, which applies to packet processing. + +For a VM, interrupt latency (time between arrival of H/W interrupt and +invocation of the interrupt handler in the VM), for example, can be either of +the above or both, depending on the type of the device. Interrupt latency with +a (virtual) timer can cause timing correctness issues with real-time VNFs even +if they only use polling for packet processing. + +We assume that the VNFs are implemented properly to minimize interrupt latency +variation within the VMs, but we have additional causes of latency variation +on KVM: + + - Asynchronous (e.g. external interrupts) and synchronous(e.g. instructions) + VM exits and handling in KVM (and kernel routines called), which may have + loops and spin locks + - Interrupt handling in the host Linux and KVM, scheduling and virtual + interrupt delivery to VNFs + - Potential VM exit (e.g. EOI) in the interrupt service routines in VNFs + - Exit to the user-level (e.g. QEMU) + +.. Figure:: kvm1.png + +===================== +Design Considerations +===================== + +The latency variation and jitters can be minimized with the below +steps (with some in parallel): + + 1. Statically and exclusively assign hardware resources + (CPUs, memory, caches,) to the VNFs. + + 2. Pre-allocate huge pages (e.g. 1 GB/2MB pages) and guest-to-host mapping, + e.g. EPT (Extended Page Table) page tables, to minimize or mitigate + latency from misses in caches, + + 3. Use the host Linux configured for hard real-time and packet latency, + Check the set of virtual devices used by the VMs to optimize or + eliminate virtualization overhead if applicable + + 4. Use advanced hardware virtualization features that can reduce or + eliminate VM exits, if present, and + + 5. Inspect the code paths in KVM and associated kernel services to + eliminate code that can cause latencies (e.g. loops and spin locks). + + 6. Measure latencies intensively. We leverage the existing testing methods. + OSADL, for example, defines industry tests for timing correctness. + +==================== +Goals and Guidelines +==================== + +The output of this project will provide : + + 1. A list of the performance goals, which will be obtained by the + OPNFV members (as described above) + + 2. A set of comprehensive instructions for the system configurations + (hardware features, BIOS setup, kernel parameters, VM configuration, + options to QEMU/KVM, etc.) + + 3. The above features to the upstream of Linux, the real-time patch + set, KVM, QEMU, libvirt, and + + 4. Performance and interrupt latency measurement tools + +========= +Test plan +========= + +The tests that need to be conducted to make sure that all components from OPNFV +meet the requirement are mentioned below: + +**Timer test**:This test utilize the cyclictest +(https://rt.wiki.kernel.org/index.php/Cyclictest) to test the guest timer +latency (the latency from the time that the guest timer should be triggered +to the time the guest timer is really triggered). + +.. Figure:: TimerTest.png + +**Device Interrupt Test**:A device on the hardware platform trigger interrupt +every one ms and the device interrupt will be delivered to the VNF. This test +cover the latency from the interrupt happened on the hardware to the time the +interrupt handled in the VNF. + +.. Figure:: DeviceInterruptTest.png + +**Packet forwarding (DPDK OVS)**:A packet is sent from TC (Traffic Generator) +to a VNF. The VNF, after processing the packet, forwards the packet to another +NIC and in the end the packet is received by the traffic generator. The test +check the latency from the packet is sent out by the TC to the time the packet +is received by the TC. + +.. Figure:: PacketforwardingDPDK_OVS.png + +**Packet Forwarding (SR-IOV)**:This test is similar to Packet Forwarding +(DPDK OVS). However, instead of using virtio NIC devices on the guest, +a PCI NIC or a PCI VF NIC is assigned to the guest for network acess. + +**Bare-metal Packet Forwarding**:This is used to compare with the above +packet forwarding scenario. + +.. Figure:: Bare-metalPacketForwarding.png + +========= +Reference +========= + +https://wiki.opnfv.org/display/kvm/ diff --git a/docs/glossary/kvmfornfv_glossary.rst b/docs/glossary/kvmfornfv_glossary.rst new file mode 100644 index 000000000..adebc815a --- /dev/null +++ b/docs/glossary/kvmfornfv_glossary.rst @@ -0,0 +1,396 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. + +.. http://creativecommons.org/licenses/by/4.0 + +************** +OPNFV Glossary +************** +Colorado 1.0 +------------ + +======== +Contents +======== + +This glossary provides a common definition of phrases and words commonly used +in OPNFV. + +-------- + +A +- + +Arno + + A river running through Tuscany and the name of the first OPNFV release. + +API + + Application Programming Interface + +AVX2 + + Advanced Vector Extensions 2 is an instruction set extension for x86. + + +-------- + +B +- + +Brahmaputra + + A river running through Asia and the name of the Second OPNFV release. + +Bios + + Basic Input/Output System + +Builds + + Build in Jenkins is a version of a program. + +Bogomips + + Bogomips is the number of million times per second a processor can do + absolutely nothing. + +-------- + +C +- + +CAT + + Cache Automation Technology + +CentOS + + Community Enterprise Operating System is a Linux distribution + +CICD + + Continuous Integration and Continuous Deployment + +CLI + + Command Line Interface + +Colorado + + A river in Argentina and the name of the Third OPNFV release. + +Compute + + Compute is an OpenStack service which offers many configuration options + which may be deployment specific. + +Console + + Console is display screen. + +CPU + Central Processing Unit + +-------- + +D +- + +Data plane + + The data plane is the part of a network that carries user traffic. + +Debian/deb + + Debian is a Unix-like computer operating system that is composed entirely of + free software. + +Docs + + Documentation/documents + +DPDK + + Data Plane Development Kit + +DPI + + Deep Packet Inspection + +DSCP + + Differentiated Services Code Point + +-------- + +F +- + +Flavors + + Flavors are templates used to define VM configurations. + +Fuel + + Provides an intuitive, GUI-driven experience for deployment and management of OpenStack + +-------- + +H +- + +Horizon + + Horizon is an OpenStack service which serves as an UI. + +Hypervisor + + A hypervisor, also called a virtual machine manager, is a program that allows + multiple operating systems to share a single hardware host. + +-------- + +I +- + +IGMP + + Internet Group Management Protocol + +IOMMU + + Input-Output Memory Management Unit + +IOPS + + Input/Output Operations Per Second + +IRQ + + Interrupt ReQuest is an interrupt request sent from the hardware level to + the CPU. + +IRQ affinity + + IRQ affinity is the set of CPU cores that can service that interrupt. + +-------- + +J +- + +Jenkins + + Jenkins is an open source continuous integration tool written in Java. + +JIRA + + JIRA is a bug tracking software. + +Jitter + + Time difference in packet inter-arrival time to their destination can be called jitter. + +JumpHost + + A jump host or jump server or jumpbox is a computer on a network typically + used to manage devices in a separate security zone. + +-------- + +K +- + +Kernel + + The kernel is a computer program that constitutes the central core of a + computer's operating system. + +-------- + +L +- + +Latency + + The amount of time it takes a packet to travel from source to destination is + Latency. + +libvirt + + libvirt is an open source API, daemon and management tool for managing + platform virtualization. + +-------- + +M +- + +Migration + + Migration is the process of moving from the use of one operating environment + to another operating environment. + +-------- + +N +- + +NFV + + Network Functions Virtualisation, an industry initiative to leverage + virtualisation technologies in carrier networks. + +NFVI + + Network Function Virtualization Infrastructure + +NIC + + Network Interface Controller + +NUMA + + Non-Uniform Memory Access + +-------- + +O +- + +OPNFV + + Open Platform for NFV, an open source project developing an NFV reference + platform and features. + +-------- + +P +- + +Pharos + + Is a lighthouse and is a project deals with developing an OPNFV lab + infrastructure that is geographically and technically diverse. + +Pipeline + + A suite of plugins in Jenkins that lets you orchestrate automation. + +Platform + + OPNFV provides an open source platform for deploying NFV solutions that + leverages investments from a community of developers and solution providers. + +Pools + + A Pool is a set of resources that are kept ready to use, rather than acquired + on use and released afterwards. + +-------- + +Q +- + +Qemu + + QEMU is a free and open-source hosted hypervisor that performs hardware + virtualization. + +-------- + +R +- + +RDMA + + Remote Direct Memory Access (RDMA) + +Rest-Api + + REST (REpresentational State Transfer) is an architectural style, and an + approach to communications that is often used in the development of web + services + +-------- + +S +- + +Scaling + + Refers to altering the size. + +Slave + + Works with/for master.where master has unidirectional control over one or + more other devices. + +SR-IOV + + Single root IO- Virtualization. + +Spin locks + + A spinlock is a lock which causes a thread trying to acquire it to simply + wait in a loop while repeatedly checking if the lock is available. + +Storage + + Refers to computer components which store some data. + +-------- + +T +- + +Tenant + + A Tenant is a group of users who share a common access with specific + privileges to the software instance. + +Tickless + + A tickless kernel is an operating system kernel in which timer interrupts + do not occur at regular intervals, but are only delivered as required. + +TSC + + Technical Steering Committee + +-------- + +V +- + +VLAN + + A virtual local area network, typically an isolated ethernet network. + +VM + + Virtual machine, an emulation in software of a computer system. + +VNF + + Virtual network function, typically a networking application or function + running in a virtual environment. + +-------- + +X +- + +XBZRLE + + Helps to reduce the network traffic by just sending the updated data + +-------- + +Y +- + +Yardstick + + Yardstick is an infrastructure verification. It is an OPNFV testing project. diff --git a/docs/installationprocedure/abstract.rst b/docs/installationprocedure/abstract.rst new file mode 100644 index 000000000..728f0aa1c --- /dev/null +++ b/docs/installationprocedure/abstract.rst @@ -0,0 +1,7 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. + +.. http://creativecommons.org/licenses/by/4.0 + +This document will give the user instructions on how to deploy available +KVM4NFV CICD build scenario verfied for the Colorado release of the OPNFV +platform. diff --git a/docs/installationprocedure/index.rst b/docs/installationprocedure/index.rst new file mode 100644 index 000000000..00ccaf237 --- /dev/null +++ b/docs/installationprocedure/index.rst @@ -0,0 +1,17 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. + +.. http://creativecommons.org/licenses/by/4.0 + +********************** +Installation procedure +********************** +Colorado 1.0 +------------ + +.. toctree:: + :numbered: + :maxdepth: 2 + + abstract.rst + kvm4nfv-cicd.installation.instruction.rst + kvm4nfv-cicd.release.notes.rst diff --git a/docs/installationprocedure/kvm4nfv-cicd.installation.instruction.rst b/docs/installationprocedure/kvm4nfv-cicd.installation.instruction.rst new file mode 100644 index 000000000..23177344e --- /dev/null +++ b/docs/installationprocedure/kvm4nfv-cicd.installation.instruction.rst @@ -0,0 +1,68 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. + +.. http://creativecommons.org/licenses/by/4.0 + +===================================== +KVM4NFV CICD Installation Instruction +===================================== + +Preparing the installation +-------------------------- + +The OPNFV project- KVM4NFV (https://gerrit.opnfv.org/gerrit/kvmfornfv.git) is +cloned first, to make the build scripts for Qemu & Kernel, Rpms and Debians +available. + +HW requirements +--------------- + +These build scripts are triggered on the Jenkins-Slave build server. Currently +Intel POD1 is used as test environment for kvmfornfv to execute cyclictest. As +part of this test environment Intel pod1-jump is configured as jenkins slave +and all the latest build artifacts are downloaded on to it. Intel pod1-node1 +is the host on which a guest vm will be launched as a part of running cylictest +through yardstick. + +Build instructions +------------------ + +Builds are possible for the following packages- + +**kvmfornfv source code**- The ./ci/build.sh is the main script used to trigger +the Rpms (on 'centos') and Debians (on 'ubuntu') builds in this case. + +* How to build Kernel/Qemu Rpms- To build rpm packages, build.sh script is run + with -p and -o option (i.e. if -p package option is passed as "centos" or in + default case). Example: sh ./ci/build.sh -p centos -o build_output + +* How to build Kernel/Qemu Debians- To build debian packages, build.sh script + is run with -p and -o option (i.e. if -p package option is passed as + "ubuntu"). Example: sh ./ci/build.sh -p ubuntu -o build_output + +* How to build all Kernel & Qemu, Rpms & Debians- To build both debian and rpm + packages, build.sh script is run with -p and -o option (i.e. if -p package + option is passed as "both"). Example: sh ./ci/build.sh -p both -o build_output + +Installation instructions +------------------------- + +Installation can be done in the following ways- + +**1. From kvmfornfv source code**- +The build packages that are prepared in the above section, are installed +differently depending on the platform. + +Please visit the links for each- + +* Centos : https://www.centos.org/docs/5/html/Deployment_Guide-en-US/s1-rpm-using.html +* Ubuntu : https://help.ubuntu.com/community/InstallingSoftware + +**2. Using Fuel installer**- + +* Please refer to the document present at /fuel-plugin/README.md + +Post-installation activities +---------------------------- + +After the packages are built, test these packages by executing the scripts +present in ci/envs for configuring the host and guest respectively. diff --git a/docs/installationprocedure/kvm4nfv-cicd.release.notes.rst b/docs/installationprocedure/kvm4nfv-cicd.release.notes.rst new file mode 100644 index 000000000..a54fe0b11 --- /dev/null +++ b/docs/installationprocedure/kvm4nfv-cicd.release.notes.rst @@ -0,0 +1,138 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. + +.. http://creativecommons.org/licenses/by/4.0 + +============================= +Release Note for KVM4NFV CICD +============================= + + +Abstract +======== + +This document contains the release notes for the Colorado release of +OPNFV when using KVM4NFV CICD process. + +Introduction +============ + +Provide a brief introduction of how this configuration is used in OPNFV release +using KVM4VFV CICD as scenario. + +Be sure to reference your scenario installation instruction. + +Release Data +============ + ++--------------------------------------+--------------------------------------+ +| **Project** | NFV Hypervisors-KVM | +| | | ++--------------------------------------+--------------------------------------+ +| **Repo/tag** | kvmfornfv | +| | | ++--------------------------------------+--------------------------------------+ +| **Release designation** | | +| | | ++--------------------------------------+--------------------------------------+ +| **Release date** | | +| | | ++--------------------------------------+--------------------------------------+ +| **Purpose of the delivery** | Automate the KVM4VFV CICD scenario | +| | | ++--------------------------------------+--------------------------------------+ + +Deliverables +------------ + +Software deliverables +~~~~~~~~~~~~~~~~~~~~~ +Kernel and Qemu- RPM and Debian build packages + +Documentation deliverables +~~~~~~~~~~~~~~~~~~~~~~~~~~ +- KVM4NFV CICD process documentation available under /docs/ under + various categories. + +Version change +-------------- +.. This section describes the changes made since the last version of this +.. document. + +Module version change +~~~~~~~~~~~~~~~~~~~~~ +- Build scripts made available for Kernel rpm, Kernel deb, Qemu rpm, Qemu + deb packages. +- Releng scripts made available to trigger these kvm4nfv build scripts for + automating complete CICD process. + +Document version change +~~~~~~~~~~~~~~~~~~~~~~~ +The following documents are added- +- configurationguide +- instalationprocedure +- userguide +- overview +- glossary +- releasenotes + +Reason for new version +---------------------- + +Feature additions +~~~~~~~~~~~~~~~~~ + ++--------------------------------------+--------------------------------------+ +| **JIRA REFERENCE** | **SLOGAN** | +| | | ++--------------------------------------+--------------------------------------+ +| JIRA: | NFV Hypervisors-KVMKVMFORNFV-34 | +| | | ++--------------------------------------+--------------------------------------+ +| JIRA: | NFV Hypervisors-KVMKVMFORNFV-34 | +| | | ++--------------------------------------+--------------------------------------+ + +Bug corrections +~~~~~~~~~~~~~~~ + +**JIRA TICKETS:** + ++--------------------------------------+--------------------------------------+ +| **JIRA REFERENCE** | **SLOGAN** | +| | | ++--------------------------------------+--------------------------------------+ +| JIRA: | | +| | | ++--------------------------------------+--------------------------------------+ + + +Known Limitations, Issues and Workarounds +========================================= + +System Limitations +------------------ + +Known issues +------------ + +**JIRA TICKETS:** + ++--------------------------------------+--------------------------------------+ +| **JIRA REFERENCE** | **SLOGAN** | +| | | ++--------------------------------------+--------------------------------------+ +| JIRA: | | ++--------------------------------------+--------------------------------------+ +| JIRA: | | ++--------------------------------------+--------------------------------------+ + + +Workarounds +----------- +See JIRA: + + +References +========== +For more information on the OPNFV Brahmaputra release, please visit +http://www.opnfv.org/brahmaputra diff --git a/docs/overview/kvmfornfv_overview.rst b/docs/overview/kvmfornfv_overview.rst new file mode 100644 index 000000000..87d401bf0 --- /dev/null +++ b/docs/overview/kvmfornfv_overview.rst @@ -0,0 +1,25 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. + +.. http://creativecommons.org/licenses/by/4.0 + +============================= +KMV4MFV CICD Project Overview +============================= + +The detailed understanding of this project is organized into different sections- + +* userguide - This provides the required technical assistance to the user, in + using the KVM4NFV CICD process. +* installationprocedure- This will give the user instructions on how to deploy + available KVM4NFV CICD build scenario. +* configurationguide- This provides guidance for configuring KVM4NFV + environment, even with the use of specific installer tools for deploying some + components, available in the Colorado release of OPNFV. +* requirements- This includes the introduction of KVM4NFV CICD project, + specifications of how the project should work, and constraints placed upon + its execution. +* design- This includes the parameters or design considerations taken into + account for achieving minimal interrupt latency for the data VNFs. +* releasenotes- This describes a brief summary of recent changes, enhancements + and bug fixes in the KVM4NFV project. +* glossary- It includes the definition of terms, used in the KVM4NFV project diff --git a/docs/releasenotes/index.rst b/docs/releasenotes/index.rst new file mode 100644 index 000000000..9ae91bf0f --- /dev/null +++ b/docs/releasenotes/index.rst @@ -0,0 +1,11 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 + +========================= +KVM4NFV CICD Release Note +========================= + +.. toctree:: + :maxdepth: 2 + + release-notes diff --git a/docs/releasenotes/release-notes.rst b/docs/releasenotes/release-notes.rst new file mode 100644 index 000000000..c6013d2ef --- /dev/null +++ b/docs/releasenotes/release-notes.rst @@ -0,0 +1,174 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. + +.. http://creativecommons.org/licenses/by/4.0 + +===================================================== +OPNFV Release Note for "Colorado release" - KVMFORNFV +===================================================== + +.. _Kvmfornfv: https://wiki.opnfv.org/display/kvm/ + + +Abstract +======== + +This document provides the release notes for Colorado release of KVMFORNFV. + +License +======= + +KVMFORNFV is licensed under a Creative Commons Attribution 4.0 International +License.You should have received a copy of the license along with this. If not, +see . + + +**Contents** + +1 Version History + +2 Important notes + +3 Summary + +4 Delivery Data + +5 References + +1 Version history +=================== + ++--------------------+--------------------+--------------------+--------------------+ +| **Date** | **Ver.** | **Author** | **Comment** | +| | | | | ++--------------------+--------------------+--------------------+--------------------+ +|2016-08-22 | 0.1.0 | | Colorado release | +| | | | | ++--------------------+--------------------+--------------------+--------------------+ + +2 Important notes +=================== + +The software delivered in the OPNFV KVMFORNFV_ Project, comprises the +*ci*, the *kvmfornfv test cases*. + +The *KVMFORNFV* framework depends on the *Fuel* installer. + + +3 Summary +=========== + +This Colorado release provides *KVMFORNFV* as a framework to enhance the +KVM Hypervisor for NFV and OPNFV scenario testing, automated in the OPNFV +CI pipeline, including: + +* Documentation created + + * User Guide + + * Configuration Guide + + * Installation Procedure + + * Release notes (this document) + +* KVMFORNFV source code + +* Cyclictests for KVMFORNFV + +For Colorado release, the KVMFORNFV uses for the following: + +* Automation of building the Kernel and qemu RPM's or debians + +* Executing the Cyclictests to check the latency + +* os-sdn-kvm-ha Scenario testing for High Availability Configuration using + Fuel Installer + +The *KVMFORNFV framework* is developed in the OPNFV community, by the +KVMFORNFV_ team. + +4 Release Data +================ + ++--------------------------------------+--------------------------------------+ +| **Project** | NFV Hypervisors-KVM | +| | | ++--------------------------------------+--------------------------------------+ +| **Repo/commit-ID** | kvmfornfv | +| | | ++--------------------------------------+--------------------------------------+ +| **Release designation** | Colorado | +| | | ++--------------------------------------+--------------------------------------+ +| **Release date** | 2016-08-22 | +| | | ++--------------------------------------+--------------------------------------+ +| **Purpose of the delivery** | OPNFV Colorado Releases | +| | | ++--------------------------------------+--------------------------------------+ + +4.1 Version change +------------------ + +4.1.1 Module version changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This is the first tracked release of KVMFORNFV + + +4.1.2 Document version changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This is the initial version of the KVMFORNFV framework in OPNFV. + +4.2 Reason for version +---------------------- + +4.2.1 Feature additions +~~~~~~~~~~~~~~~~~~~~~~~ + ++--------------------------------------+--------------------------------------+ +| **JIRA REFERENCE** | **SLOGAN** | +| | | ++--------------------------------------+--------------------------------------+ +| JIRA: | NFV Hypervisors-KVMKVMFORNFV-34 | +| | | ++--------------------------------------+--------------------------------------+ +| JIRA: | NFV Hypervisors-KVMKVMFORNFV-34 | +| | | ++--------------------------------------+--------------------------------------+ + +4.2.2 Bug corrections +~~~~~~~~~~~~~~~~~~~~~ + +Initial Release + +4.3 Deliverables +---------------- + +4.3.1 Software deliverables +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +KVMFORNFV framework source code + +4.3.2 Documentation deliverables +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The below documents are delivered for Colorado KVMFORNFV Release: + + * User Guide + + * Configuration Guide + + * Installation Procedure + + * Overview + + * Release notes (this document) + + * Glossary + + +5 References +============= + +For more information on the KVMFORNFV Colorado release, please see: + +https://wiki.opnfv.org/display/kvm/ diff --git a/docs/requirements/index.rst b/docs/requirements/index.rst new file mode 100755 index 000000000..42dba0422 --- /dev/null +++ b/docs/requirements/index.rst @@ -0,0 +1,12 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 + +================ +KVMFORNFV Design +================ + +.. toctree:: + :numbered: + :maxdepth: 3 + + kvmfornfv_requirements.rst diff --git a/docs/requirements/kvmfornfv_requirements.rst b/docs/requirements/kvmfornfv_requirements.rst new file mode 100644 index 000000000..048838907 --- /dev/null +++ b/docs/requirements/kvmfornfv_requirements.rst @@ -0,0 +1,89 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, Intel Corporation, AT&T and others. + +============ +Introduction +============ +The NFV hypervisors provide crucial functionality in the NFV +Infrastructure(NFVI).The existing hypervisors, however, are not necessarily +designed or targeted to meet the requirements for the NFVI. + +This document specifies the list of requirements that need to be met as part +of this "NFV Hypervisors-KVM" project in Colorado release. + +As part of this project we need to make collaborative efforts towards enabling +the NFV features. + +================= +Scope and Purpose +================= + +The main purpose of this project is to enhance the KVM hypervisor for NFV, by +looking at the following areas initially: + +* Minimal Interrupt latency variation for data plane VNFs: + * Minimal Timing Variation for Timing correctness of real-time VNFs + * Minimal packet latency variation for data-plane VNFs +* Inter-VM communication +* Fast live migration + +The output of this project would be list of the performance goals,comprehensive +instructions for the system configurations,tools to measure Performance and +interrupt latency. + +=========================== +Methods and Instrumentation +=========================== + +The above areas would require software development and/or specific hardware +features, and some need just configurations information for the system +(hardware, BIOS, OS, etc.). + +A right configuration is critical for improving the NFV performance/latency. +Even working on the same code base, different configurations can make +completely different performance/latency result. +Configurations that can be made as part of this project to tune a specific +scenario are: + + 1. **Platform Configuration** : Some hardware features like Power management, + Hyper-Threading,Legacy USB Support/Port 60/64 Emulation,SMI can be configured. + 2. **Operating System Configuration** : Some configuration features like CPU + isolation,Memory allocation,IRQ affinity,Device assignment for VM,Tickless, + TSC,Idle,_RCU_NOCB_,Disable the RT throttling,NUMA can be configured. + 3. **Performance/Latency Tuning** : Application level configurations like + timers,Making vfio MSI interrupt as non-threaded,Cache Allocation + Technology(CAT) enabling can be tuned to improve the NFV + performance/latency. + +===================== +Features to be tested +===================== + +The tests that need to be conducted to make sure that latency is addressed are: +1. Timer test +2. Device Interrupt Test +3. Packet forwarding (DPDK OVS) +4. Packet Forwarding (SR-IOV) +5. Bare-metal Packet Forwarding + +============ +Dependencies +============ + +1. OPNFV Project: “Characterize vSwitch Performance for Telco NFV Use Cases” + (VSPERF) for performance evaluation of ivshmem vs. vhost-user. +2. OPNFV Project: “Pharos” for Test Bed Infrastructure, and possibly + “Yardstick” for infrastructure verification. +3. There are currently no similar projects underway in OPNFV or in an upstream + project +4. The relevant upstream project to be influenced here is QEMU/KVM and + libvirt. +5. In terms of HW dependencies, the aim is to use standard IA Server hardware + for this project, as provided by OPNFV Pharos. + +========= +Reference +========= + +https://wiki.opnfv.org/display/kvm/ diff --git a/docs/userguide/abstract.rst b/docs/userguide/abstract.rst new file mode 100644 index 000000000..8c36c268f --- /dev/null +++ b/docs/userguide/abstract.rst @@ -0,0 +1,16 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. + +.. http://creativecommons.org/licenses/by/4.0 + +======== +Abstract +======== + +In KVM4NFV project, we focus on the KVM hypervisor to enhance it for NFV, +by looking at the following areas initially- + +* Minimal Interrupt latency variation for data plane VNFs: + * Minimal Timing Variation for Timing correctness of real-time VNFs + * Minimal packet latency variation for data-plane VNFs +* Inter-VM communication +* Fast live migration diff --git a/docs/userguide/common.platform.render.rst b/docs/userguide/common.platform.render.rst new file mode 100644 index 000000000..486ca469f --- /dev/null +++ b/docs/userguide/common.platform.render.rst @@ -0,0 +1,15 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. + +.. http://creativecommons.org/licenses/by/4.0 + +================================ +Using common platform components +================================ + +This section outlines basic usage principals and methods for some of the +commonly deployed components of supported OPNFV scenario's in Colorado. +The subsections provide an outline of how these components are commonly +used and how to address them in an OPNFV deployment.The components derive +from autonomous upstream communities and where possible this guide will +provide direction to the relevant documentation made available by those +communities to better help you navigate the OPNFV deployment. diff --git a/docs/userguide/feature.userguide.render.rst b/docs/userguide/feature.userguide.render.rst new file mode 100644 index 000000000..d903f0711 --- /dev/null +++ b/docs/userguide/feature.userguide.render.rst @@ -0,0 +1,14 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. + +.. http://creativecommons.org/licenses/by/4.0 + +========================== +Using Colorado Features +========================== + +The following sections of the user guide provide feature specific usage +guidelines and references for KVM4NFV CICD project. + +* /docs/userguide/low_latency.userguide.rst +* /docs/userguide/live_migration.userguide.rst +* /docs/userguide/tuning.userguide.rst diff --git a/docs/userguide/index.rst b/docs/userguide/index.rst new file mode 100644 index 000000000..55042ec04 --- /dev/null +++ b/docs/userguide/index.rst @@ -0,0 +1,20 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. + +.. http://creativecommons.org/licenses/by/4.0 + +**************** +OPNFV User Guide +**************** +Colorado 1.0 +------------ + +.. toctree:: + :maxdepth: 2 + + ./abstract.rst + ./introduction.rst + ./common.platform.render.rst + ./feature.userguide.render.rst + ./low_latency.userguide.rst + ./live_migration.userguide.rst + ./tuning.userguide.rst diff --git a/docs/userguide/introduction.rst b/docs/userguide/introduction.rst new file mode 100644 index 000000000..501d6391b --- /dev/null +++ b/docs/userguide/introduction.rst @@ -0,0 +1,53 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. + +.. http://creativecommons.org/licenses/by/4.0 + +======== +Overview +======== + +The project "NFV Hypervisors-KVM" makes collaborative efforts to enable NFV +features for existing hypervisors, which are not necessarily designed or +targeted to meet the requirements for the NFVI.The KVM4NFV CICD scenario +consists of Continuous Integration builds, deployments and testing +combinations of virtual infrastructure components. + +KVM4NFV Features +================ + +Using this project, the following areas are targeted- + +* Minimal Interrupt latency variation for data plane VNFs: + * Minimal Timing Variation for Timing correctness of real-time VNFs + * Minimal packet latency variation for data-plane VNFs +* Inter-VM communication +* Fast live migration + +Some of the above items would require software development and/or specific +hardware features, and some need just configurations information for the +system (hardware, BIOS, OS, etc.). + +We include a requirements gathering stage as a formal part of the project. +For each subproject, we will start with an organized requirement stage so +that we can determine specific use cases (e.g. what kind of VMs should be +live migrated) and requirements (e.g. interrupt latency, jitters, Mpps, +migration-time, down-time, etc.) to set out the performance goals. + +Potential future projects would include: + +* Dynamic scaling (via scale-out) using VM instantiation +* Fast live migration for SR-IOV + +The user guide outlines how to work with key components and features in +the platform, each feature description section will indicate the scenarios +that provide the components and configurations required to use it. + +The configuration guide details which scenarios are best for you and how to +install and configure them. + +General usage guidelines +======================== + +The user guide for KVM4NFV CICD features and capabilities provide step by step +instructions for using features that have been configured according to the +installation and configuration instructions. diff --git a/docs/all/live_migration.rst b/docs/userguide/live_migration.userguide.rst similarity index 82% rename from docs/all/live_migration.rst rename to docs/userguide/live_migration.userguide.rst index 4af19b6f4..9fa9b82fd 100644 --- a/docs/all/live_migration.rst +++ b/docs/userguide/live_migration.userguide.rst @@ -1,13 +1,13 @@ .. This work is licensed under a Creative Commons Attribution 4.0 International License. + .. http://creativecommons.org/licenses/by/4.0 -.. (c) Fast Live Migration =================== -The NFV project requires fast live migration. The specific requirement is total -live migration time < 2Sec, while keeping the VM down time < 10ms when running -DPDK L2 forwarding workload. +The NFV project requires fast live migration. The specific requirement is +total live migration time < 2Sec, while keeping the VM down time < 10ms when +running DPDK L2 forwarding workload. We measured the baseline data of migrating an idle 8GiB guest running a DPDK L2 forwarding work load and observed that the total live migration time was 2271ms @@ -50,9 +50,10 @@ a. Delay non-emergency operations is completed the VM down time is reduced to about 5-7ms. b. Optimize zero page checking Currently QEMU uses the SSE2 instruction to optimize the zero pages - checking. The SSE2 instruction can process 16 bytes per instruction. By using - the AVX2 instruction, we can process 32 bytes per instruction. Testingt shows - that using AVX2 can speed up the zero pages checking process by about 25%. + checking. The SSE2 instruction can process 16 bytes per instruction. + By using the AVX2 instruction, we can process 32 bytes per instruction. + Testing shows that using AVX2 can speed up the zero pages checking process + by about 25%. c. Remove unnecessary context synchronization. The CPU context was being synchronized twice during live migration. Removing this unnecessary synchronization shortened the VM downtime by about 100us. @@ -69,10 +70,18 @@ OS: RHEL 7.1 Kernel: 4.2 QEMU v2.4.0 -Ethernet controller: Intel Corporation Ethernet Controller 10-Gigabit X540-AT2 (rev 01) +Ethernet controller: Intel Corporation Ethernet Controller 10-Gigabit +X540-AT2 (rev 01) QEMU parameters: :: - /root/qemu.git/x86_64-softmmu/qemu-system-x86_64-enable-kvm -cpu host -smp 4 –device virtio-net-pci,netdev=net1,mac=52:54:00:12:34:56 –netdev type=tap,id=net1,script=/etc/kvm/qemu-ifup,downscript=no,vhost=on–device virtio-net-pci,netdev=net2,mac=54:54:00:12:34:56 –netdevtype=tap,id=net2,script=/etc/kvm/qemu-ifup2,downscript=no,vhost=on -balloon virtio -m 8192-monitor stdio /mnt/liang/ia32e_rhel6u5.qcow +${qemu} -smp ${guest_cpus} -monitor unix:${qmp_sock},server,nowait -daemonize \ +-cpu host,migratable=off,+invtsc,+tsc-deadline,pmu=off \ +-realtime mlock=on -mem-prealloc -enable-kvm -m 1G \ +-mem-path /mnt/hugetlbfs-1g \ +-drive file=/root/minimal-centos1.qcow2,cache=none,aio=threads \ +-netdev user,id=guest0,hostfwd=tcp:5555-:22 \ +-device virtio-net-pci,netdev=guest0 \ +-nographic -serial /dev/null -parallel /dev/null Network connection diff --git a/docs/all/lmdowntime.jpg b/docs/userguide/lmdowntime.jpg similarity index 100% rename from docs/all/lmdowntime.jpg rename to docs/userguide/lmdowntime.jpg diff --git a/docs/all/lmnetwork.jpg b/docs/userguide/lmnetwork.jpg similarity index 100% rename from docs/all/lmnetwork.jpg rename to docs/userguide/lmnetwork.jpg diff --git a/docs/all/lmtotaltime.jpg b/docs/userguide/lmtotaltime.jpg similarity index 100% rename from docs/all/lmtotaltime.jpg rename to docs/userguide/lmtotaltime.jpg diff --git a/docs/userguide/low_latency.userguide.rst b/docs/userguide/low_latency.userguide.rst new file mode 100644 index 000000000..e0d2791df --- /dev/null +++ b/docs/userguide/low_latency.userguide.rst @@ -0,0 +1,68 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. + +.. http://creativecommons.org/licenses/by/4.0 + +Low Latency Environment +======================= + +Achieving low latency with the KVM4NFV project requires setting up a special +test environment. This environment includes the BIOS settings, kernel +configuration, kernel parameters and the run-time environment. + +Hardware Environment Description +-------------------------------- + +BIOS setup plays an important role in achieving real-time latency. A collection +of relevant settings, used on the platform where the baseline performance data +was collected, is detailed below: + +CPU Features +~~~~~~~~~~~~ + +Some special CPU features like TSC-deadline timer, invariant TSC and Process +posted interrupts, etc, are helpful for latency reduction. + +CPU Topology +~~~~~~~~~~~~ + +NUMA topology is also important for latency reduction. + +BIOS Setup +~~~~~~~~~~ + +Careful BIOS setup is important in achieving real time latency. Different +platforms have different BIOS setups, below are the important BIOS settings on +the platform used to collect the baseline performance data. + +Software Environment Setup +-------------------------- +Both the host and the guest environment need to be configured properly to +reduce latency variations. Below are some suggested kernel configurations. +The ci/envs/ directory gives detailed implementation on how to setup the +environment. + +Kernel Parameter +~~~~~~~~~~~~~~~~ + +Please check the default kernel configuration in the source code at: +kernel/arch/x86/configs/opnfv.config. + +Below is host kernel boot line example: +:: +isolcpus=11-15,31-35 nohz_full=11-15,31-35 rcu_nocbs=11-15,31-35 +iommu=pt intel_iommu=on default_hugepagesz=1G hugepagesz=1G mce=off idle=poll +intel_pstate=disable processor.max_cstate=1 pcie_asmp=off tsc=reliable + +Below is guest kernel boot line example +:: +isolcpus=1 nohz_full=1 rcu_nocbs=1 mce=off idle=poll default_hugepagesz=1G +hugepagesz=1G + +Please refer to `tuning.userguide` for more explanation. + +Run-time Environment Setup +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Not only are special kernel parameters needed but a special run-time +environment is also required. Please refer to `tunning.userguide` for +more explanation. diff --git a/docs/userguide/openstack.rst b/docs/userguide/openstack.rst new file mode 100644 index 000000000..bd1919991 --- /dev/null +++ b/docs/userguide/openstack.rst @@ -0,0 +1,51 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. + +.. http://creativecommons.org/licenses/by/4.0 + +-------------------------------- +Colorado OpenStack User Guide +-------------------------------- + +OpenStack is a cloud operating system developed and released by the +`OpenStack project `_. OpenStack is used in OPNFV +for controlling pools of compute, storage, and networking resources in a Pharos +compliant infrastructure. + +OpenStack is used in Colorado to manage tenants (known in OpenStack as +projects),users, services, images, flavours, and quotas across the Pharos +infrastructure.The OpenStack interface provides the primary interface for an +operational Colorado deployment and it is from the "horizon console" that an +OPNFV user will perform the majority of administrative and operational +activities on the deployment. + +OpenStack references +-------------------- + +The `OpenStack user guide `_ provides +details and descriptions of how to configure and interact with the OpenStack +deployment.This guide can be used by lab engineers and operators to tune the +OpenStack deployment to your liking. + +Once you have configured OpenStack to your purposes, or the Colorado +deployment meets your needs as deployed, an operator, or administrator, will +find the best guidance for working with OpenStack in the +`OpenStack administration guide `_. + +Connecting to the OpenStack instance +------------------------------------ + +Once familiar with the basic of working with OpenStack you will want to connect +to the OpenStack instance via the Horizon Console. The Horizon console provide +a Web based GUI that will allow you operate the deployment. +To do this you should open a browser on the JumpHost to the following address +and enter the username and password: + + + http://{Controller-VIP}:80/index.html> + username: admin + password: admin + +Other methods of interacting with and configuring OpenStack,, like the REST API +and CLI are also available in the Colorado deployment, see the +`OpenStack administration guide `_ +for more information on using those interfaces. diff --git a/docs/all/tuning.rst b/docs/userguide/tuning.userguide.rst similarity index 70% rename from docs/all/tuning.rst rename to docs/userguide/tuning.userguide.rst index 760861b8b..3673ae2d4 100644 --- a/docs/all/tuning.rst +++ b/docs/userguide/tuning.userguide.rst @@ -1,13 +1,13 @@ .. This work is licensed under a Creative Commons Attribution 4.0 International License. + .. http://creativecommons.org/licenses/by/4.0 -.. (c) Low Latency Tunning Suggestion ============================== -The correct configuration is critical for improving the NFV performance/latency. -Even working on the same codebase, configurations can cause wildly different -performance/latency results. +The correct configuration is critical for improving the NFV +performance/latency.Even working on the same codebase, configurations can cause +wildly different performance/latency results. There are many combinations of configurations, from hardware configuration to Operating System configuration and application level configuration. And there @@ -24,10 +24,10 @@ but others may not be configurable (e.g. SMI on most platforms). * **Power management:** Most power management related features save power at the expensive of latency. These features include: Intel®Turbo Boost Technology, - Enhanced Intel®SpeedStep, Processor C state and P state. Normally they should - be disabled but, depending on the real-time application design and latency - requirements, there might be some features that can be enabled if the impact on - deterministic execution of the workload is small. + Enhanced Intel®SpeedStep, Processor C state and P state. Normally they + should be disabled but, depending on the real-time application design and + latency requirements, there might be some features that can be enabled if + the impact on deterministic execution of the workload is small. * **Hyper-Threading:** The logic cores that share resource with other logic cores can introduce @@ -41,7 +41,8 @@ but others may not be configurable (e.g. SMI on most platforms). * **SMI (System Management Interrupt):** SMI runs outside of the kernel code and can potentially cause latency. It is a pity there is no simple way to disable it. Some vendors may - provide related switches in BIOS but most machines do not have this capability. + provide related switches in BIOS but most machines do not have this + capability. Operating System Configuration ------------------------------ @@ -54,32 +55,32 @@ Operating System Configuration for more information. * **Memory allocation:** - Memory shoud be reserved for realtime applications and usually hugepage should - be used to reduce page fauts/TLB misses. + Memory shoud be reserved for realtime applications and usually hugepage + should be used to reduce page fauts/TLB misses. * **IRQ affinity:** All the non-realtime IRQs should be affinitized to non realtime CPUs to - reduce the impact on realtime CPUs. Some OS distributions contain an irqbalance - daemon which balances the IRQs among all the cores dynamically. It should be - disabled as well. + reduce the impact on realtime CPUs. Some OS distributions contain an + irqbalance daemon which balances the IRQs among all the cores dynamically. + It should be disabled as well. * **Device assignment for VM:** - If a device is used in a VM, then device passthrough is desirable. In this case, - the IOMMU should be enabled. + If a device is used in a VM, then device passthrough is desirable. In this + case,the IOMMU should be enabled. * **Tickless:** - Frequent clock ticks cause latency. CONFIG_NOHZ_FULL should be enabled in the - linux kernel. With CONFIG_NOHZ_FULL, the physical CPU will trigger many fewer - clock tick interrupts(currently, 1 tick per second). This can reduce latency - because each host timer interrupt triggers a VM exit from guest to host which - causes performance/latency impacts. + Frequent clock ticks cause latency. CONFIG_NOHZ_FULL should be enabled in + the linux kernel. With CONFIG_NOHZ_FULL, the physical CPU will trigger many + fewer clock tick interrupts(currently, 1 tick per second). This can reduce + latency because each host timer interrupt triggers a VM exit from guest to + host which causes performance/latency impacts. * **TSC:** Mark TSC clock source as reliable. A TSC clock source that seems to be - unreliable causes the kernel to continuously enable the clock source watchdog - to check if TSC frequency is still correct. On recent Intel platforms with - Constant TSC/Invariant TSC/Synchronized TSC, the TSC is reliable so the - watchdog is useless but cause latency. + unreliable causes the kernel to continuously enable the clock source + watchdog to check if TSC frequency is still correct. On recent Intel + platforms with Constant TSC/Invariant TSC/Synchronized TSC, the TSC is + reliable so the watchdog is useless but cause latency. * **Idle:** The poll option forces a polling idle loop that can slightly improve the @@ -92,9 +93,9 @@ Operating System Configuration * **Disable the RT throttling:** RT Throttling is a Linux kernel mechanism that - occurs when a process or thread uses 100% of the core, leaving no resources for - the Linux scheduler to execute the kernel/housekeeping tasks. RT Throttling - increases the latency so should be disabled. + occurs when a process or thread uses 100% of the core, leaving no resources + for the Linux scheduler to execute the kernel/housekeeping tasks. RT + Throttling increases the latency so should be disabled. * **NUMA configuration:** To achieve the best latency. CPU/Memory and device allocated for realtime diff --git a/fuel-plugin/README.md b/fuel-plugin/README.md index 77dee17c2..63ddc4eb9 100644 --- a/fuel-plugin/README.md +++ b/fuel-plugin/README.md @@ -38,12 +38,12 @@ Buiding system pre-requistes Buid instruction ---------------- 1. Clone the kvmfornfv repo from https://gerrit.opnfv.org/gerrit/kvmfornfv by - "git clone https://gerrit.opnfv.org/gerrit/kvmfornfv" -2. You can modify the kernel code in kvmfornfv/kernel as you want + "git clone https://gerrit.opnfv.org/gerrit/kvmfornfv". +2. You can modify the kernel code in kvmfornfv/kernel as you want. 3. Go to kvmfornfv/fuel-plugin/vagrant, type "vagrant destroy -f; vagrant up; - estroy -f", the building will start + vagarant destroy -f", the building will start. 4. When the building completes, you should find the built fuel-plugin-kvm in - kvmfornfv/fuel-plugin with the name as "fuel-plugin-kvm-0.9-0.9.0-1.noarch.rpm", + kvmfornfv/fuel-plugin/vagrant with the name as "fuel-plugin-kvm-0.9-0.9.0-1.noarch.rpm", where "0.9-0.9.0-1" is the version information for this plugin, this version info may be changed in future. The built plugin incules the changes you made. diff --git a/fuel-plugin/build_kvm.sh b/fuel-plugin/build_kvm.sh index 9984f3bab..befadd95f 100755 --- a/fuel-plugin/build_kvm.sh +++ b/fuel-plugin/build_kvm.sh @@ -1,6 +1,6 @@ #!/bin/bash -SRC=/ +SRC=/root CONFIG="arch/x86/configs/opnfv.config" VERSION="1.0.OPNFV" OVS_COMMIT="4ff6642f3c1dd8949c2f42b3310ee2523ee970a6" @@ -22,6 +22,11 @@ done apt-get update apt-get install -y git fakeroot build-essential ncurses-dev xz-utils kernel-package bc autoconf automake libtool python python-pip +# +# Build kernel in another directory, so some files (which are root writeable only) generated during kernel +# building wouldn't remain in the source directory mapped into Docker container +# +cp -r /kvmfornfv $SRC/. cd $SRC # Get the Open VSwitch sources @@ -68,3 +73,6 @@ depmod -b ovs.$$ -a `ls ovs.$$/lib/modules` dpkg-deb -b ovs.$$ $SRC/kvmfornfv/linux-image*.deb rm -rf ovs.$$ +cp $SRC/kvmfornfv/linux-headers*.deb /kvmfornfv/. +cp $SRC/kvmfornfv/linux-image*.deb /kvmfornfv/. + diff --git a/fuel-plugin/deployment_tasks.yaml b/fuel-plugin/deployment_tasks.yaml index ee9e998fa..f6e31e3ee 100644 --- a/fuel-plugin/deployment_tasks.yaml +++ b/fuel-plugin/deployment_tasks.yaml @@ -2,8 +2,8 @@ type: puppet version: 2.0.0 groups: [compute] - required_for: [pre_deployment_end] - requires: [pre_deployment_start] + required_for: [firewall] + requires: [tools] parameters: puppet_manifest: puppet/manifests/kvm-install.pp puppet_modules: puppet/modules:/etc/puppet/modules diff --git a/kernel/arch/x86/configs/opnfv.config b/kernel/arch/x86/configs/opnfv.config index b623b0cd4..95bb41d70 100644 --- a/kernel/arch/x86/configs/opnfv.config +++ b/kernel/arch/x86/configs/opnfv.config @@ -1814,7 +1814,9 @@ CONFIG_NET_VENDOR_BROADCOM=y CONFIG_BNX2=m CONFIG_CNIC=m CONFIG_TIGON3=y -# CONFIG_BNX2X is not set +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_BNX2X_VXLAN is not set # CONFIG_BNXT is not set CONFIG_NET_VENDOR_BROCADE=y # CONFIG_BNA is not set diff --git a/kernel/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/kernel/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 45bd628ea..e680442cc 100644 --- a/kernel/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/kernel/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -802,6 +802,47 @@ static int bnxt_flash_nvram(struct net_device *dev, return rc; } +static int bnxt_firmware_reset(struct net_device *dev, + u16 dir_type) +{ + struct bnxt *bp = netdev_priv(dev); + struct hwrm_fw_reset_input req = {0}; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); + + /* TODO: Support ASAP ChiMP self-reset (e.g. upon PF driver unload) */ + /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ + /* (e.g. when firmware isn't already running) */ + switch (dir_type) { + case BNX_DIR_TYPE_CHIMP_PATCH: + case BNX_DIR_TYPE_BOOTCODE: + case BNX_DIR_TYPE_BOOTCODE_2: + req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; + /* Self-reset ChiMP upon next PCIe reset: */ + req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; + break; + case BNX_DIR_TYPE_APE_FW: + case BNX_DIR_TYPE_APE_PATCH: + req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; + /* Self-reset APE upon next PCIe reset: */ + req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; + break; + case BNX_DIR_TYPE_KONG_FW: + case BNX_DIR_TYPE_KONG_PATCH: + req.embedded_proc_type = + FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; + break; + case BNX_DIR_TYPE_BONO_FW: + case BNX_DIR_TYPE_BONO_PATCH: + req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; + break; + default: + return -EINVAL; + } + + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + static int bnxt_flash_firmware(struct net_device *dev, u16 dir_type, const u8 *fw_data, @@ -856,10 +897,9 @@ static int bnxt_flash_firmware(struct net_device *dev, /* TODO: Validate digital signature (RSA-encrypted SHA-256 hash) here */ rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, 0, 0, fw_data, fw_size); - if (rc == 0) { /* Firmware update successful */ - /* TODO: Notify processor it needs to reset itself - */ - } + if (rc == 0) /* Firmware update successful */ + rc = bnxt_firmware_reset(dev, dir_type); + return rc; } diff --git a/patches/ovs/0001-Add-Linux-4.4-support.patch b/patches/ovs/0001-Add-Linux-4.4-support.patch deleted file mode 100644 index a580bc267..000000000 --- a/patches/ovs/0001-Add-Linux-4.4-support.patch +++ /dev/null @@ -1,246 +0,0 @@ -From a4c2305b6190ce24ceaafb9f85fc5a67787fb71d Mon Sep 17 00:00:00 2001 -From: Donald Dugger -Date: Mon, 9 May 2016 05:14:12 +0000 -Subject: [PATCH] Add Linux 4.4 support - -A bit cleaner than my previous patch. - http://patchwork.ozlabs.org/patch/595969/ - -Though I couldn't figure out a clean solution for ip6_local_out(), -genl_notify(), and vport-vxlan - -Signed-off-by: Alexandru Ardelean - -Note that this patch has been rejected for the upstream OVS tree as -the maintainers feel a different apporach (backporting all Linux -patches that affect the OVS code). We'll just use this patch until -the official OVS tree is updated to support Linux 4.4. - -Upstream status: NA - -Signed-off-by: Don Dugger ---- - acinclude.m4 | 4 ++-- - datapath/actions.c | 6 ++++-- - datapath/datapath.c | 6 +++++- - datapath/linux/compat/include/linux/netfilter_ipv6.h | 2 +- - datapath/linux/compat/include/net/ip.h | 19 ++++++++++++++++--- - datapath/linux/compat/include/net/ip6_tunnel.h | 4 ++++ - datapath/linux/compat/include/net/vxlan.h | 10 ++++++++++ - datapath/linux/compat/stt.c | 6 ++++++ - datapath/vport-vxlan.c | 5 +++++ - 9 files changed, 53 insertions(+), 9 deletions(-) - -diff --git a/acinclude.m4 b/acinclude.m4 -index 23015fe..22e75ec 100644 ---- a/acinclude.m4 -+++ b/acinclude.m4 -@@ -134,10 +134,10 @@ AC_DEFUN([OVS_CHECK_LINUX], [ - AC_MSG_RESULT([$kversion]) - - if test "$version" -ge 4; then -- if test "$version" = 4 && test "$patchlevel" -le 3; then -+ if test "$version" = 4 && test "$patchlevel" -le 4; then - : # Linux 4.x - else -- AC_ERROR([Linux kernel in $KBUILD is version $kversion, but version newer than 4.3.x is not supported (please refer to the FAQ for advice)]) -+ AC_ERROR([Linux kernel in $KBUILD is version $kversion, but version newer than 4.4.x is not supported (please refer to the FAQ for advice)]) - fi - elif test "$version" = 3 && test "$patchlevel" -ge 10; then - : # Linux 3.x -diff --git a/datapath/actions.c b/datapath/actions.c -index dcf8591..242e710 100644 ---- a/datapath/actions.c -+++ b/datapath/actions.c -@@ -702,7 +702,8 @@ static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru, - skb_dst_set_noref(skb, &ovs_dst); - IPCB(skb)->frag_max_size = mru; - -- ip_do_fragment(skb->sk, skb, ovs_vport_output); -+ ip_do_fragment(NET_ARG(dev_net(ovs_dst.dev)) -+ skb->sk, skb, ovs_vport_output); - refdst_drop(orig_dst); - } else if (ethertype == htons(ETH_P_IPV6)) { - const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops(); -@@ -723,7 +724,8 @@ static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru, - skb_dst_set_noref(skb, &ovs_rt.dst); - IP6CB(skb)->frag_max_size = mru; - -- v6ops->fragment(skb->sk, skb, ovs_vport_output); -+ v6ops->fragment(NET_ARG(dev_net(ovs_rt.dst.dev)) -+ skb->sk, skb, ovs_vport_output); - refdst_drop(orig_dst); - } else { - WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.", -diff --git a/datapath/datapath.c b/datapath/datapath.c -index 5bec072..ba19c01 100644 ---- a/datapath/datapath.c -+++ b/datapath/datapath.c -@@ -95,8 +95,12 @@ static bool ovs_must_notify(struct genl_family *family, struct genl_info *info, - static void ovs_notify(struct genl_family *family, struct genl_multicast_group *grp, - struct sk_buff *skb, struct genl_info *info) - { -- genl_notify(family, skb, genl_info_net(info), -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0) -+ genl_notify(family, skb, info, GROUP_ID(grp), GFP_KERNEL); -+#else -+ genl_notify(family, skb, genl_info_net(info), - info->snd_portid, GROUP_ID(grp), info->nlhdr, GFP_KERNEL); -+#endif - } - - /** -diff --git a/datapath/linux/compat/include/linux/netfilter_ipv6.h b/datapath/linux/compat/include/linux/netfilter_ipv6.h -index 8d896fb..9f64002 100644 ---- a/datapath/linux/compat/include/linux/netfilter_ipv6.h -+++ b/datapath/linux/compat/include/linux/netfilter_ipv6.h -@@ -13,7 +13,7 @@ - * the callback parameter needs to be in the form that older kernels accept. - * We don't backport the other ipv6_ops as they're currently unused by OVS. */ - struct ovs_nf_ipv6_ops { -- int (*fragment)(struct sock *sk, struct sk_buff *skb, -+ int (*fragment)(NET_ARG(net) struct sock *sk, struct sk_buff *skb, - int (*output)(OVS_VPORT_OUTPUT_PARAMS)); - }; - #define nf_ipv6_ops ovs_nf_ipv6_ops -diff --git a/datapath/linux/compat/include/net/ip.h b/datapath/linux/compat/include/net/ip.h -index c283ad0..483662c 100644 ---- a/datapath/linux/compat/include/net/ip.h -+++ b/datapath/linux/compat/include/net/ip.h -@@ -59,8 +59,20 @@ static inline unsigned int rpl_ip_skb_dst_mtu(const struct sk_buff *skb) - #define ip_skb_dst_mtu rpl_ip_skb_dst_mtu - #endif /* HAVE_IP_SKB_DST_MTU */ - -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0) -+#define NET_PARAM(x) struct net *x, -+#define NET_ARG(x) x, -+#define NET_DEV_NET(x) dev_net(x) -+#define NET_DECLARE_INIT(x,y) -+#else -+#define NET_PARAM(x) -+#define NET_ARG(x) -+#define NET_DEV_NET(x) -+#define NET_DECLARE_INIT(x,y) struct net *x = y; -+#endif -+ - #ifdef HAVE_IP_FRAGMENT_TAKES_SOCK --#define OVS_VPORT_OUTPUT_PARAMS struct sock *sock, struct sk_buff *skb -+#define OVS_VPORT_OUTPUT_PARAMS NET_PARAM(net) struct sock *sock, struct sk_buff *skb - #else - #define OVS_VPORT_OUTPUT_PARAMS struct sk_buff *skb - #endif -@@ -82,12 +94,13 @@ static inline bool ip_defrag_user_in_between(u32 user, - #endif /* < v4.2 */ - - #ifndef HAVE_IP_DO_FRAGMENT --static inline int rpl_ip_do_fragment(struct sock *sk, struct sk_buff *skb, -+static inline int rpl_ip_do_fragment(NET_PARAM(net) struct sock *sk, struct sk_buff *skb, - int (*output)(OVS_VPORT_OUTPUT_PARAMS)) - { - unsigned int mtu = ip_skb_dst_mtu(skb); - struct iphdr *iph = ip_hdr(skb); - struct rtable *rt = skb_rtable(skb); -+ NET_DECLARE_INIT(net, dev_net(dev)); - struct net_device *dev = rt->dst.dev; - - if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) || -@@ -95,7 +108,7 @@ static inline int rpl_ip_do_fragment(struct sock *sk, struct sk_buff *skb, - IPCB(skb)->frag_max_size > mtu))) { - - pr_warn("Dropping packet in ip_do_fragment()\n"); -- IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS); -+ IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); - kfree_skb(skb); - return -EMSGSIZE; - } -diff --git a/datapath/linux/compat/include/net/ip6_tunnel.h b/datapath/linux/compat/include/net/ip6_tunnel.h -index ce65087..eacf9ca 100644 ---- a/datapath/linux/compat/include/net/ip6_tunnel.h -+++ b/datapath/linux/compat/include/net/ip6_tunnel.h -@@ -17,11 +17,15 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, - - pkt_len = skb->len - skb_inner_network_offset(skb); - /* TODO: Fix GSO for ipv6 */ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0) -+ err = ip6_local_out(dev_net(dev), sk, skb); -+#else - #ifdef HAVE_IP6_LOCAL_OUT_SK - err = ip6_local_out_sk(sk, skb); - #else - err = ip6_local_out(skb); - #endif -+#endif /* >= kernel 4.4 */ - if (net_xmit_eval(err) != 0) - pkt_len = net_xmit_eval(err); - else -diff --git a/datapath/linux/compat/include/net/vxlan.h b/datapath/linux/compat/include/net/vxlan.h -index 75a5a7a..589cc0d 100644 ---- a/datapath/linux/compat/include/net/vxlan.h -+++ b/datapath/linux/compat/include/net/vxlan.h -@@ -218,10 +218,20 @@ struct vxlan_dev { - struct net_device *rpl_vxlan_dev_create(struct net *net, const char *name, - u8 name_assign_type, struct vxlan_config *conf); - -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0) -+static inline __be16 vxlan_dev_dst_port(struct vxlan_dev *vxlan, -+ unsigned short family) -+{ -+ if (family == AF_INET6) -+ return inet_sk(vxlan->vn6_sock->sock->sk)->inet_sport; -+ return inet_sk(vxlan->vn4_sock->sock->sk)->inet_sport; -+} -+#else - static inline __be16 vxlan_dev_dst_port(struct vxlan_dev *vxlan) - { - return inet_sk(vxlan->vn_sock->sock->sk)->inet_sport; - } -+#endif - - static inline netdev_features_t vxlan_features_check(struct sk_buff *skb, - netdev_features_t features) -diff --git a/datapath/linux/compat/stt.c b/datapath/linux/compat/stt.c -index 86d225e..6b1e3a3 100644 ---- a/datapath/linux/compat/stt.c -+++ b/datapath/linux/compat/stt.c -@@ -1544,7 +1544,11 @@ static void clean_percpu(struct work_struct *work) - } - - #ifdef HAVE_NF_HOOKFN_ARG_OPS -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0) -+#define FIRST_PARAM void *priv -+#else - #define FIRST_PARAM const struct nf_hook_ops *ops -+#endif /* >= kernel 4.4 */ - #else - #define FIRST_PARAM unsigned int hooknum - #endif -@@ -1592,7 +1596,9 @@ static unsigned int nf_ip_hook(FIRST_PARAM, struct sk_buff *skb, LAST_PARAM) - - static struct nf_hook_ops nf_hook_ops __read_mostly = { - .hook = nf_ip_hook, -+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0) - .owner = THIS_MODULE, -+#endif - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_LOCAL_IN, - .priority = INT_MAX, -diff --git a/datapath/vport-vxlan.c b/datapath/vport-vxlan.c -index c05f5d4..3cbb568 100644 ---- a/datapath/vport-vxlan.c -+++ b/datapath/vport-vxlan.c -@@ -153,7 +153,12 @@ static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb, - { - struct vxlan_dev *vxlan = netdev_priv(vport->dev); - struct net *net = ovs_dp_get_net(vport->dp); -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0) -+ unsigned short family = ip_tunnel_info_af(upcall->egress_tun_info); -+ __be16 dst_port = vxlan_dev_dst_port(vxlan, family); -+#else - __be16 dst_port = vxlan_dev_dst_port(vxlan); -+#endif - __be16 src_port; - int port_min; - int port_max; --- -1.9.1 - diff --git a/tests/pod.yaml b/tests/pod.yaml new file mode 100644 index 000000000..fa66ebf7f --- /dev/null +++ b/tests/pod.yaml @@ -0,0 +1,18 @@ +--- +# sample config file about the POD information, including the +# name/IP/user/ssh key +# +# The options of this config file include: +# name: the name of this node +# role: node's role, support role: Master/Controller/Comupte/BareMetal +# ip: the node's IP address +# user: the username for login +# key_filename:the path of the private key file for login + +nodes: +- + name: kvm + role: Controller + ip: 10.2.117.23 + user: root + key_filename: /root/.ssh/id_rsa