From: Jiang, Yunhong Date: Mon, 28 Nov 2016 23:43:45 +0000 (+0000) Subject: Merge "Revert "Revert "[Fuel-plugin] Install kernel in post-deployment.""" X-Git-Tag: danube.1.0~25 X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=commitdiff_plain;h=2c8605dc1647ee6a7b7a76c952d2adc6f68be989;hp=860e71785b244ba8e7c5a39c7f738302c5a957c7;p=kvmfornfv.git Merge "Revert "Revert "[Fuel-plugin] Install kernel in post-deployment.""" --- diff --git a/ci/build_rpm/build_rpms_docker.sh b/ci/build_rpm/build_rpms_docker.sh index 59d5bb911..708c8acbf 100755 --- a/ci/build_rpm/build_rpms_docker.sh +++ b/ci/build_rpm/build_rpms_docker.sh @@ -10,7 +10,6 @@ cp -r $rpm_build_dir $tmp_rpm_build_dir cd $tmp_rpm_build_dir/qemu make clean ./configure -make -j$(nproc) cd $tmp_rpm_build_dir ./ci/build_rpm/qemu_rpm_build.sh build_output diff --git a/ci/build_rpm/qemu_rpm_build.sh b/ci/build_rpm/qemu_rpm_build.sh index 1e87fbdd6..302d00354 100755 --- a/ci/build_rpm/qemu_rpm_build.sh +++ b/ci/build_rpm/qemu_rpm_build.sh @@ -54,9 +54,7 @@ qemu_rpm_build() { } if [ ! -d ${rpmbuild_dir} ] ; then - yum install rpm-build -y - mkdir -p ~/rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS} - mv rpmbuild $workspace + mkdir -p ${rpmbuild_dir}/{BUILD,RPMS,SOURCES,SPECS,SRPMS} fi qemu_rpm_build diff --git a/ci/cyclicTestTrigger.sh b/ci/cyclicTestTrigger.sh index bd6e29038..6241452a2 100755 --- a/ci/cyclicTestTrigger.sh +++ b/ci/cyclicTestTrigger.sh @@ -19,17 +19,28 @@ if [ -z ${KERNELRPM_VERSION} ];then exit 1 fi -#Updating the pod.yaml file with HOST_IP,cyclictest-node-context.yaml with loops and interval +#calculating and verifying sha512sum of the guestimage. +function verifyGuestImage { + scp $WORKSPACE/build_output/guest1.sha512 root@${HOST_IP}:/root/images + checksum=$(sudo ssh root@${HOST_IP} "cd /root/images/ && sha512sum -c guest1.sha512 | awk '{print \$2}'") + if [ "$checksum" != "OK" ]; then + echo "Something wrong with the image, please verify" + return 1 + fi +} + +#Updating the pod.yaml file with HOST_IP,kvmfornfv_cyclictest_idle_idle.yaml with loops and interval function updateYaml { cd $WORKSPACE/tests/ sed -ri "s/[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}/${HOST_IP}/" pod.yaml - sed -ri "s/loops: [0-9]*/loops: ${testTime}/" cyclictest-node-context.yaml - sed -ri "s/interval: [0-9]*/interval: 1000/" cyclictest-node-context.yaml + sed -ri "s/loops: [0-9]*/loops: ${testTime}/" kvmfornfv_cyclictest_idle_idle.yaml + sed -ri "0,/interval: [0-9]*/s//interval: 1000/" kvmfornfv_cyclictest_idle_idle.yaml } #cleaning the environment after executing the test through yardstick. function env_clean { container_id=`sudo docker ps -a | grep kvmfornfv_${testType} |awk '{print \$1}'|sed -e 's/\r//g'` + sudo docker stop ${container_id} sudo docker rm ${container_id} sudo ssh root@${HOST_IP} "rm -rf /root/workspace/*" sudo ssh root@${HOST_IP} "pid=\$(ps aux | grep 'qemu' | awk '{print \$2}' | head -1); echo \$pid |xargs kill" @@ -44,6 +55,17 @@ function host_clean { sudo ssh root@${HOST_IP} "reboot" } +function cleanup { + output=$1 + env_clean + host_clean + if [ $output != 0 ];then + echo "Yardstick Failed.Please check cyclictest.sh" + return 1 + else + return 0 + fi +} #Creating a docker image with yardstick installed and Verify the results of cyclictest function runCyclicTest { @@ -61,30 +83,33 @@ function runCyclicTest { #copying required files to run yardstick cyclic testcase mv $WORKSPACE/build_output/kernel-${KERNELRPM_VERSION}*.rpm ${volume}/rpm cp -r $WORKSPACE/ci/envs/* ${volume}/scripts - cp -r $WORKSPACE/tests/cyclictest-node-context.yaml ${volume} - cp -r $WORKSPACE/tests/pod.yaml ${volume} + cp -r $WORKSPACE/tests/kvmfornfv_cyclictest_idle_idle.yaml ${volume} + cp -r $WORKSPACE/tests/pod.yaml ${volume}/scripts #Launching ubuntu docker container to run yardstick sudo docker run -i -v ${volume}:/opt --net=host --name kvmfornfv_${testType} \ - kvmfornfv:latest /bin/bash -c "cd /opt/scripts && ls; ./cyclictest.sh" - + kvmfornfv:latest /bin/bash -c "cd /opt/scripts && ls; ./cyclictest.sh $testType" + cyclictest_output=$? #Verifying the results of cyclictest - result=`grep -o '"errors":[^,]*' ${volume}/yardstick.out | awk -F '"' '{print $4}'` - if [ -z "${result}" ]; then - echo "####################################################" - echo "" - echo `grep -o '"data":[^}]*' ${volume}/yardstick.out | awk -F '{' '{print $2}'` - echo "" - echo "####################################################" - env_clean - host_clean - exit 0 + if [ "$testType" == "verify" ];then + result=`grep -o '"errors":[^,]*' ${volume}/yardstick.out | awk -F '"' '{print $4}'` + + if [ -z "${result}" ]; then + echo "####################################################" + echo "" + echo `grep -o '"data":[^}]*' ${volume}/yardstick.out | awk -F '{' '{print $2}'` + echo "" + echo "####################################################" + cleanup $cyclictest_output + else + echo "Testcase failed" + echo `grep -o '"errors":[^,]*' ${volume}/yardstick.out | awk -F '"' '{print $4}'` + env_clean + host_clean + return 1 + fi else - echo "Testcase failed" - echo `grep -o '"errors":[^,]*' ${volume}/yardstick.out | awk -F '"' '{print $4}'` - env_clean - host_clean - exit 1 + cleanup $cyclictest_output fi } diff --git a/ci/envs/cyclictest.sh b/ci/envs/cyclictest.sh index 747f34758..805f9088f 100755 --- a/ci/envs/cyclictest.sh +++ b/ci/envs/cyclictest.sh @@ -6,9 +6,10 @@ ########################################################### source utils.sh +testType=$1 #daily/verify/merge HOST_IP=$( getHostIP ) -pod_config='/opt/pod.yaml' -cyclictest_context_file='/opt/cyclictest-node-context.yaml' +pod_config='/opt/scripts/pod.yaml' +cyclictest_context_file='/opt/kvmfornfv_cyclictest_idle_idle.yaml' if [ ! -f ${pod_config} ] ; then echo "file ${pod_config} not found" @@ -21,9 +22,48 @@ if [ ! -f ${cyclictest_context_file} ] ; then fi #setting up of image for launching guest vm. -sudo ssh root@$HOST_IP "cp /root/images/guest1.qcow2 /root/" +ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \ +root@$HOST_IP "cp /root/images/guest1.qcow2 /root/" + +#Updating the yardstick.conf file for daily +function updateConfDaily() { + DISPATCHER_TYPE=influxdb + DISPATCHER_FILE_NAME="/tmp/yardstick.out" + # Use the influxDB on the jumping server + DISPATCHER_INFLUXDB_TARGET="http://104.197.68.199:8086" + mkdir -p /etc/yardstick + cat << EOF > /etc/yardstick/yardstick.conf +[DEFAULT] +debug = True +dispatcher = ${DISPATCHER_TYPE} + +[dispatcher_file] +file_name = ${DISPATCHER_FILE_NAME} + +[dispatcher_influxdb] +timeout = 5 +db_name = yardstick +username = opnfv +password = 0pnfv2015 +target = ${DISPATCHER_INFLUXDB_TARGET} +EOF +} + +#Function call to update yardstick conf file based on Job type +if [ "$testType" == "daily" ];then + updateConfDaily +fi #Running cyclictest through yardstick yardstick -d task start ${cyclictest_context_file} -chmod 777 /tmp/yardstick.out -cat /tmp/yardstick.out > /opt/yardstick.out +output=$? + +if [ "$testType" == "verify" ];then + chmod 777 /tmp/yardstick.out + cat /tmp/yardstick.out > /opt/yardstick.out +fi + +if [ $output != 0 ];then + echo "Yardstick Failed !!!" + exit 1 +fi diff --git a/ci/envs/host-config b/ci/envs/host-config old mode 100644 new mode 100755 index ce6243ce0..a6beb06b6 --- a/ci/envs/host-config +++ b/ci/envs/host-config @@ -1,3 +1,5 @@ +#!/bin/bash + ############################################################################## ## Copyright (c) 2015 Intel Corp. ## @@ -7,13 +9,27 @@ ## http://www.apache.org/licenses/LICENSE-2.0 ############################################################################### -# Isolated cpus for nfv, must be delimited with ',' -host_isolcpus=3,4 - # Number of huge pages to create and on which NUMA node -numa_node=0 +numa_node=1 huge_pages=2 # QEMU executable path and number of cpus for guest qemu=/usr/libexec/qemu-kvm guest_cpus=2 + +# Isolated cpus for nfv, must be given as a range '-' and Numa node1 CPU's should be considered +host_isolcpus=`lscpu | grep "NUMA node1 CPU(s)"| awk -F ':' '{print \$2}' | sed 's/[[:space:]]//g'` +first=$(echo ${host_isolcpus} | cut -f1 -d-) +last=$(echo ${host_isolcpus} | cut -f2 -d-) + +# Bind cpus from host_isolcpus range for QEMU processor threads +i=0 +while [ ${i} -lt ${guest_cpus} ]; do + qemu_cpu[$i]=${first} + i=`expr $i + 1` + first=`expr $first + 1` +done + +#Isolated cpus from host_isolcpus range to run Stress tool +stress_isolcpus=${first}-${last} +echo "Stress tool runs on $stress_isolcpus" diff --git a/ci/envs/host-run-qemu.sh b/ci/envs/host-run-qemu.sh index 400e9e306..9cd4b45c2 100755 --- a/ci/envs/host-run-qemu.sh +++ b/ci/envs/host-run-qemu.sh @@ -19,7 +19,7 @@ cpumask () { printf 0x%x ${m} } -qmp_sock="/tmp/qmp-sock-$$" +qmp_sock="/tmp/qmp-sock" #${qemu} -smp ${guest_cpus} -monitor unix:${qmp_sock},server,nowait -daemonize \ # -cpu host,migratable=off,+invtsc,+tsc-deadline,pmu=off \ @@ -30,25 +30,20 @@ qmp_sock="/tmp/qmp-sock-$$" # -device virtio-net-pci,netdev=guest0 \ # -nographic -serial /dev/null -parallel /dev/null -${qemu} -smp ${guest_cpus} -drive file=/root/guest1.qcow2 -daemonize \ +${qemu} -smp ${guest_cpus} -monitor unix:${qmp_sock},server,nowait \ + -drive file=/root/guest1.qcow2 -daemonize \ -netdev user,id=net0,hostfwd=tcp:$HOST_IP:5555-:22 \ -realtime mlock=on -mem-prealloc -enable-kvm -m 1G \ -mem-path /mnt/hugetlbfs-1g \ -device virtio-net-pci,netdev=net0 \ -i=0 -for c in `echo ${host_isolcpus} | sed 's/,/ /g'` ; do - cpu[$i]=${c} - i=`expr $i + 1` -done - threads=`echo "info cpus" | nc -U ${qmp_sock} | grep thread_id | cut -d= -f3` # Bind QEMU processor threads to RT CPUs i=0 for tid in ${threads} ; do - tid=`printf %d ${tid}` # this is required to get rid of cr at end - mask=`cpumask ${cpu[$i]}` - taskset -p ${mask} ${tid} + new_tid=`echo $tid | sed -e 's/[\r\n]//g'` # this is required to get rid of cr at end + mask=`cpumask ${qemu_cpu[$i]}` + taskset -p ${mask} ${new_tid} i=`expr $i + 1` done diff --git a/ci/envs/utils.sh b/ci/envs/utils.sh index f582b5aac..5db55bef8 100755 --- a/ci/envs/utils.sh +++ b/ci/envs/utils.sh @@ -18,6 +18,12 @@ function getKernelVersion { #Get the IP address from pod.yaml file (example ip : 10.2.117.23) function getHostIP { - HOST_IP=`grep 'ip' $WORKSPACE/tests/pod.yaml | awk -F ': ' '{print $NF}' | tail -1` + host_dir="/root/workspace/scripts/" + container_dir="/opt/scripts/" + if [ -d "$container_dir" ];then + HOST_IP=`grep 'ip' $container_dir/pod.yaml | awk -F ': ' '{print $NF}' | tail -1` + elif [ -d "$host_dir" ];then + HOST_IP=`grep 'ip' $host_dir/pod.yaml | awk -F ': ' '{print $NF}' | tail -1` + fi echo $HOST_IP } diff --git a/ci/test_kvmfornfv.sh b/ci/test_kvmfornfv.sh index 858aaf394..e9f520059 100755 --- a/ci/test_kvmfornfv.sh +++ b/ci/test_kvmfornfv.sh @@ -19,15 +19,27 @@ elif [ ${test_type} == "daily" ];then elif [ ${test_type} == "merge" ];then echo "Test is not enabled for ${test_type}" exit 0 +else + echo "Incorrect test type ${test_type}" + exit 1 fi source $WORKSPACE/ci/cyclicTestTrigger.sh $HOST_IP $test_time $test_type -#Update cyclictest-node-context.yaml with test_time and pod.yaml with IP +#calculating and verifying sha512sum of the guestimage. +if ! verifyGuestImage;then + exit 1 +fi + +#Update kvmfornfv_cyclictest_idle_idle.yaml with test_time and pod.yaml with IP updateYaml #Cleaning up the test environment before running cyclictest through yardstick. env_clean #Creating a docker image with yardstick installed and launching ubuntu docker to run yardstick cyclic testcase -runCyclicTest +if runCyclicTest;then + exit 0 +else + exit 1 +fi diff --git a/docker_image_build/Dockerfile b/docker_image_build/Dockerfile index fd1ea5883..57e22bbef 100644 --- a/docker_image_build/Dockerfile +++ b/docker_image_build/Dockerfile @@ -5,5 +5,6 @@ # FROM kvmfornfv1:latest RUN cd /root && git clone https://gerrit.opnfv.org/gerrit/p/yardstick.git -b stable/colorado +RUN sed -i -e "s/3600/9000/g" /root/yardstick/yardstick/ssh.py RUN cd /root/yardstick && python setup.py install WORKDIR /root diff --git a/kernel/arch/x86/include/asm/apic.h b/kernel/arch/x86/include/asm/apic.h index a30316bf8..9686289d2 100644 --- a/kernel/arch/x86/include/asm/apic.h +++ b/kernel/arch/x86/include/asm/apic.h @@ -131,6 +131,7 @@ extern void init_apic_mappings(void); void register_lapic_address(unsigned long address); extern void setup_boot_APIC_clock(void); extern void setup_secondary_APIC_clock(void); +extern void lapic_update_tsc_freq(void); extern int APIC_init_uniprocessor(void); #ifdef CONFIG_X86_64 @@ -166,6 +167,7 @@ static inline void init_apic_mappings(void) { } static inline void disable_local_APIC(void) { } # define setup_boot_APIC_clock x86_init_noop # define setup_secondary_APIC_clock x86_init_noop +static inline void lapic_update_tsc_freq(void) { } #endif /* !CONFIG_X86_LOCAL_APIC */ #ifdef CONFIG_X86_X2APIC diff --git a/kernel/arch/x86/kernel/apic/apic.c b/kernel/arch/x86/kernel/apic/apic.c index 2f69e3b18..58e7cdb25 100644 --- a/kernel/arch/x86/kernel/apic/apic.c +++ b/kernel/arch/x86/kernel/apic/apic.c @@ -305,7 +305,7 @@ int lapic_get_maxlvt(void) /* Clock divisor */ #define APIC_DIVISOR 16 -#define TSC_DIVISOR 32 +#define TSC_DIVISOR 8 /* * This function sets up the local APIC timer, with a timeout of @@ -557,12 +557,36 @@ static void setup_APIC_timer(void) CLOCK_EVT_FEAT_DUMMY); levt->set_next_event = lapic_next_deadline; clockevents_config_and_register(levt, - (tsc_khz / TSC_DIVISOR) * 1000, + tsc_khz * (1000 / TSC_DIVISOR), 0xF, ~0UL); } else clockevents_register_device(levt); } +/* + * Install the updated TSC frequency from recalibration at the TSC + * deadline clockevent devices. + */ +static void __lapic_update_tsc_freq(void *info) +{ + struct clock_event_device *levt = this_cpu_ptr(&lapic_events); + + if (!this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) + return; + + clockevents_update_freq(levt, tsc_khz * (1000 / TSC_DIVISOR)); +} + +void lapic_update_tsc_freq(void) +{ + /* + * The clockevent device's ->mult and ->shift can both be + * changed. In order to avoid races, schedule the frequency + * update code on each CPU. + */ + on_each_cpu(__lapic_update_tsc_freq, NULL, 0); +} + /* * In this functions we calibrate APIC bus clocks to the external timer. * diff --git a/kernel/arch/x86/kernel/tsc.c b/kernel/arch/x86/kernel/tsc.c index c7c4d9c51..eafe93888 100644 --- a/kernel/arch/x86/kernel/tsc.c +++ b/kernel/arch/x86/kernel/tsc.c @@ -22,6 +22,7 @@ #include #include #include +#include unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ EXPORT_SYMBOL(cpu_khz); @@ -1141,6 +1142,9 @@ static void tsc_refine_calibration_work(struct work_struct *work) (unsigned long)tsc_khz / 1000, (unsigned long)tsc_khz % 1000); + /* Inform the TSC deadline clockevent devices about the recalibration */ + lapic_update_tsc_freq(); + out: clocksource_register_khz(&clocksource_tsc, tsc_khz); } diff --git a/tests/cyclictest-node-context.yaml b/tests/cyclictest-node-context.yaml index 2dd74d937..f3c84c856 100644 --- a/tests/cyclictest-node-context.yaml +++ b/tests/cyclictest-node-context.yaml @@ -25,6 +25,7 @@ scenarios: type: Duration duration: 1 interval: 1 + tc: "kvmfornfv_cyclictest-node-context" sla: max_min_latency: 50 max_avg_latency: 100 @@ -47,4 +48,4 @@ scenarios: context: type: Node name: LF - file: /opt/pod.yaml + file: /opt/scripts/pod.yaml diff --git a/tests/kvmfornfv_cyclictest_idle_idle.yaml b/tests/kvmfornfv_cyclictest_idle_idle.yaml new file mode 100644 index 000000000..f3c84c856 --- /dev/null +++ b/tests/kvmfornfv_cyclictest_idle_idle.yaml @@ -0,0 +1,51 @@ +--- +# Sample benchmark task config file +# Measure system high resolution by using Cyclictest +# +# For this sample just like running the command below on the test vm and +# getting latencies info back to the yardstick. +# +# sudo bash cyclictest -a 1 -i 1000 -p 99 -l 1000 -t 1 -h 90 -m -n -q +# + +schema: "yardstick:task:0.1" + +scenarios: +- + type: Cyclictest + options: + affinity: 1 + interval: 1000 + priority: 99 + loops: 600000 + threads: 1 + histogram: 90 + host: kvm.LF + runner: + type: Duration + duration: 1 + interval: 1 + tc: "kvmfornfv_cyclictest-node-context" + sla: + max_min_latency: 50 + max_avg_latency: 100 + max_max_latency: 1000 + action: monitor + setup_options: + rpm_dir: "/opt/rpm" + script_dir: "/opt/scripts" + image_dir: "/opt/image" + host_setup_seqs: + - "host-setup0.sh" + - "reboot" + - "host-setup1.sh" + - "host-run-qemu.sh" + guest_setup_seqs: + - "guest-setup0.sh" + - "reboot" + - "guest-setup1.sh" + +context: + type: Node + name: LF + file: /opt/scripts/pod.yaml