Merge "Fix issues with 'Success_' stats reported by ProxBinSearchProfile"
authorAbhijit Sinha <abhijit.sinha@intel.com>
Thu, 23 Aug 2018 13:39:34 +0000 (13:39 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Thu, 23 Aug 2018 13:39:34 +0000 (13:39 +0000)
53 files changed:
ansible/group_vars/all.yml
ansible/install-inventory.ini
ansible/install.yaml
ansible/roles/add_custom_repos/tasks/main.yml
ansible/roles/build_yardstick_image/tasks/cloudimg_modify_normal.yml [new file with mode: 0644]
ansible/roles/build_yardstick_image/tasks/cloudimg_modify_nsb.yml [new file with mode: 0644]
ansible/roles/build_yardstick_image/tasks/main.yml [new file with mode: 0644]
ansible/roles/build_yardstick_image/tasks/post_build.yml [new file with mode: 0644]
ansible/roles/build_yardstick_image/tasks/pre_build.yml [new file with mode: 0644]
ansible/roles/build_yardstick_image/vars/main.yml [new file with mode: 0644]
ansible/roles/download_dpdk/defaults/main.yml
ansible/roles/install_civetweb/defaults/main.yml
ansible/roles/install_image_dependencies/tasks/main.yml
samples/vnf_samples/nsut/prox/configs/gen_bng-4.cfg
samples/vnf_samples/nsut/prox/configs/gen_bng_qos-4.cfg
samples/vnf_samples/nsut/prox/configs/handle_bng-4.cfg
samples/vnf_samples/nsut/prox/configs/handle_bng_qos-4.cfg
samples/vnf_samples/nsut/prox/configs/ipv4_bng.lua [new file with mode: 0644]
samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng-4.yaml
samples/vnf_samples/nsut/prox/tc_prox_baremetal_bng_qos-4.yaml
samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng-4.yaml
samples/vnf_samples/nsut/prox/tc_prox_heat_context_bng_qos-4.yaml
samples/vnf_samples/traffic_profiles/ipv4_throughput-10.yaml
samples/vnf_samples/traffic_profiles/ipv4_throughput-2.yaml
samples/vnf_samples/traffic_profiles/ipv4_throughput-3.yaml
samples/vnf_samples/traffic_profiles/ipv4_throughput-4.yaml
samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml
samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_cgnapt.yaml
samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
yardstick/benchmark/contexts/standalone/model.py
yardstick/benchmark/contexts/standalone/ovs_dpdk.py
yardstick/benchmark/contexts/standalone/sriov.py
yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
yardstick/benchmark/scenarios/availability/serviceha.py
yardstick/common/exceptions.py
yardstick/common/utils.py
yardstick/network_services/traffic_profile/base.py
yardstick/network_services/traffic_profile/ixia_rfc2544.py
yardstick/network_services/traffic_profile/rfc2544.py
yardstick/network_services/vnf_generic/vnf/tg_prox.py
yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
yardstick/network_services/vnf_generic/vnf/tg_trex.py
yardstick/tests/unit/benchmark/contexts/standalone/test_model.py
yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
yardstick/tests/unit/benchmark/scenarios/availability/test_baseattacker.py [new file with mode: 0644]
yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
yardstick/tests/unit/common/test_utils.py
yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py

index 3599682..9f52932 100644 (file)
@@ -1,9 +1,15 @@
 ---\r
 target_os: "Ubuntu"\r
 YARD_IMG_ARCH: "amd64"\r
+IMG_PROPERTY: "normal"\r
 clone_dest: /opt/tempT\r
 release: xenial\r
 normal_image_file: "{{ workspace }}/yardstick-image.img"\r
 nsb_image_file: "{{ workspace }}/yardstick-nsb-image.img"\r
 ubuntu_image_file: /tmp/workspace/yardstick/yardstick-trusty-server.raw\r
-proxy_env: {}\r
+proxy_env:\r
+  PATH: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/bin\r
+  http_proxy: "{{ lookup('env', 'http_proxy') }}"\r
+  https_proxy: "{{ lookup('env', 'https_proxy') }}"\r
+  ftp_proxy: "{{ lookup('env', 'ftp_proxy') }}"\r
+  no_proxy: "{{ lookup('env', 'no_proxy') }}"\r
index 6aa9905..e15a2e9 100644 (file)
@@ -1,7 +1,6 @@
 # the group of systems on which to install yardstick
 # by default just localhost
 [jumphost]
-#yardstickvm1 ansible_user=ubuntu ansible_ssh_pass=password ansible_connection=local
 localhost ansible_connection=local
 
 # section below is only due backward compatibility.
@@ -10,14 +9,10 @@ localhost ansible_connection=local
 jumphost
 
 [yardstick-standalone]
-#yardstickvm2 ansible_host=192.168.2.51 ansible_user=ubuntu ansible_ssh_pass=password ansible_connection=ssh
-# uncomment hosts below if you would to test yardstick-standalone/sriov scenarios
-#yardstick-standalone-node ansible_host=192.168.1.2
-#yardstick-standalone-node-2 ansible_host=192.168.1.3
+# standalone-node ansible_host=192.168.2.51 ansible_user=ubuntu ansible_ssh_pass=password ansible_connection=ssh
 
 [yardstick-baremetal]
-#yardstickvm3 ansible_host=192.168.2.52 ansible_user=ubuntu ansible_ssh_pass=password ansible_connection=ssh
-# hostname ansible_host=192.168.1.2
+# baremetal-node ansible_host=192.168.2.52 ansible_user=ubuntu ansible_ssh_pass=password ansible_connection=ssh
 
 [all:vars]
 arch_amd64=amd64
@@ -25,6 +20,6 @@ arch_arm64=arm64
 inst_mode_container=container
 inst_mode_baremetal=baremetal
 ubuntu_archive={"amd64": "http://archive.ubuntu.com/ubuntu/", "arm64": "http://ports.ubuntu.com/ubuntu-ports/"}
-# uncomment credentials below for yardstick-standalone
-#ansible_user=root
-#ansible_pass=root
+# Uncomment credentials below if needed
+# ansible_user=root
+# ansible_pass=root
index ae9f858..fa8419b 100644 (file)
     - shell: uwsgi -i /etc/yardstick/yardstick.ini
       when: installation_mode != inst_mode_container
 
-- name: Prepare baremetal and standalone server(s)
+
+- name: Prepare baremetal and standalone servers
   hosts: yardstick-baremetal,yardstick-standalone
   become: yes
-  vars:
-    YARD_IMG_ARCH: "{{ arch_amd64 }}"
-  environment:
-    proxy_env:
-      http_proxy: "{{ lookup('env', 'http_proxy') }}"
-      https_proxy: "{{ lookup('env', 'https_proxy') }}"
-      ftp_proxy: "{{ lookup('env', 'ftp_proxy') }}"
-      no_proxy: "{{ lookup('env', 'no_proxy') }}"
+  environment: "{{ proxy_env }}"
 
   roles:
     - add_custom_repos
     - role: set_package_installer_proxy
       when: proxy_env is defined and proxy_env
     # can't update grub in chroot/docker
+    # ?? - enable_iommu_on_boot
     - enable_hugepages_on_boot
     # needed for collectd plugins
     - increase_open_file_limits
     - install_pmu_tools
     - download_collectd
     - install_collectd
+
+
+- hosts: jumphost
+  become: yes
+  vars:
+    img_prop_item: "{{ IMG_PROPERTY }}"
+    img_arch: "{{ YARD_IMG_ARCH }}"
+
+  tasks:
+    - name: Include pre-build
+      include_role:
+        name: build_yardstick_image
+        tasks_from: pre_build.yml
+
+
+- hosts: chroot_image
+  connection: chroot
+  become: yes
+  vars:
+    img_property: "{{ IMG_PROPERTY }}"
+  environment: "{{ proxy_env }}"
+
+  tasks:
+    - name: Include image build
+      include_role:
+        name: build_yardstick_image
+        tasks_from: "cloudimg_modify_{{ img_property }}.yml"
+
+
+- hosts: jumphost
+  become: yes
+
+  tasks:
+    - name: Include post-build
+      include_role:
+        name: build_yardstick_image
+        tasks_from: post_build.yml
index 7341ad0..b1dfd54 100644 (file)
@@ -12,5 +12,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
-- include: "{{ target_os|lower }}.yml"
+- include_tasks: "{{ target_os|lower }}.yml"
 
diff --git a/ansible/roles/build_yardstick_image/tasks/cloudimg_modify_normal.yml b/ansible/roles/build_yardstick_image/tasks/cloudimg_modify_normal.yml
new file mode 100644 (file)
index 0000000..435b438
--- /dev/null
@@ -0,0 +1,56 @@
+# Copyright (c) 2018 Intel Corporation.\r
+#\r
+# Licensed under the Apache License, Version 2.0 (the "License");\r
+# you may not use this file except in compliance with the License.\r
+# You may obtain a copy of the License at\r
+#\r
+#      http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+# See the License for the specific language governing permissions and\r
+# limitations under the License.\r
+---\r
+- debug:\r
+    msg: "chrooted in {{ inventory_hostname }}"\r
+\r
+- debug:\r
+    var: proxy_env\r
+    verbosity: 2\r
+\r
+- include_role:\r
+    name: "{{ role_item }}"\r
+  with_items:\r
+    - reset_resolv_conf\r
+    - add_custom_repos\r
+    - modify_cloud_config\r
+  loop_control:\r
+    loop_var: role_item\r
+\r
+- include_role:\r
+    name: set_package_installer_proxy\r
+  when: proxy_env is defined and proxy_env\r
+\r
+- include_role:\r
+    name: install_image_dependencies\r
+\r
+- include_vars: roles/download_unixbench/defaults/main.yml\r
+  when: unixbench_dest is undefined\r
+\r
+- include_vars: roles/download_ramspeed/defaults/main.yml\r
+  when: ramspeed_dest is undefined\r
+\r
+- include_role:\r
+    name: "{{ role_item }}"\r
+  with_items:\r
+    - download_l2fwd\r
+    - download_unixbench\r
+    - install_unixbench\r
+    - download_ramspeed\r
+    - install_ramspeed\r
+    - download_cachestat\r
+  loop_control:\r
+    loop_var: role_item\r
+\r
+  environment: "{{ proxy_env }}"\r
diff --git a/ansible/roles/build_yardstick_image/tasks/cloudimg_modify_nsb.yml b/ansible/roles/build_yardstick_image/tasks/cloudimg_modify_nsb.yml
new file mode 100644 (file)
index 0000000..9a70ff3
--- /dev/null
@@ -0,0 +1,104 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- debug:
+    msg: "chrooted in {{ inventory_hostname }}"
+
+- debug:
+    var: proxy_env
+    verbosity: 2
+
+- debug: msg="play_hosts={{play_hosts}}"
+
+- include_role:
+    name: "{{ role_item }}"
+  with_items:
+    - reset_resolv_conf
+    - add_custom_repos
+    - modify_cloud_config
+  loop_control:
+    loop_var: role_item
+
+- include_role:
+    name: set_package_installer_proxy
+  when: proxy_env is defined and proxy_env
+
+- include_vars: roles/install_dpdk/vars/main.yml
+  when: dpdk_make_arch is undefined
+
+- include_vars: roles/download_dpdk/defaults/main.yml
+  when: dpdk_version is undefined
+
+- include_vars: roles/download_trex/defaults/main.yml
+  when: trex_unarchive is undefined
+
+- include_vars: roles/download_civetweb/defaults/main.yml
+  when: civetweb_dest is undefined
+
+- include_role:
+    name: "{{ role_item }}"
+  with_items:
+    - install_image_dependencies
+    - enable_hugepages_on_boot    # can't update grub in chroot/docker
+    - increase_open_file_limits   # needed for collectd plugins
+    - download_dpdk
+    - install_dpdk
+    - download_trex
+    - install_trex
+    - download_pktgen
+    - install_pktgen
+    - download_civetweb
+    - install_civetweb
+    - download_samplevnfs
+  loop_control:
+    loop_var: role_item
+  environment: "{{ proxy_env }}"
+
+- include_vars: roles/install_dpdk/defaults/main.yml
+  when: INSTALL_BIN_PATH is undefined
+
+- include_vars: roles/download_samplevnfs/defaults/main.yml
+  when: samplevnf_dest is undefined
+- set_fact:
+    samplevnf_path: "{{ samplevnf_dest }}"
+- include_role:
+    name: install_samplevnf
+  with_items:
+    - PROX
+    - UDP_Replay
+    - ACL
+    - FW
+    - CGNAPT
+  loop_control:
+    loop_var: vnf_name
+
+- include_vars: roles/download_drivers/defaults/main.yml
+  when: i40evf_path is undefined
+
+- include_role:
+    name: "{{ role_item }}"
+  with_items:
+    - install_dpdk_shared  # build shared DPDK for collectd only, required DPDK downloaded already
+    - install_rabbitmq
+    - download_intel_cmt_cat
+    - install_intel_cmt_cat
+    - download_pmu_tools
+    - install_pmu_tools
+    - download_collectd
+    - install_collectd
+    - download_drivers
+    - install_drivers
+  loop_control:
+    loop_var: role_item
+  environment: "{{ proxy_env }}"
diff --git a/ansible/roles/build_yardstick_image/tasks/main.yml b/ansible/roles/build_yardstick_image/tasks/main.yml
new file mode 100644 (file)
index 0000000..e21cbb7
--- /dev/null
@@ -0,0 +1,14 @@
+# Copyright (c) 2018 Intel Corporation.\r
+#\r
+# Licensed under the Apache License, Version 2.0 (the "License");\r
+# you may not use this file except in compliance with the License.\r
+# You may obtain a copy of the License at\r
+#\r
+#      http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+# See the License for the specific language governing permissions and\r
+# limitations under the License.\r
+---\r
diff --git a/ansible/roles/build_yardstick_image/tasks/post_build.yml b/ansible/roles/build_yardstick_image/tasks/post_build.yml
new file mode 100644 (file)
index 0000000..c6888f8
--- /dev/null
@@ -0,0 +1,46 @@
+# Copyright (c) 2018 Intel Corporation.\r
+#\r
+# Licensed under the Apache License, Version 2.0 (the "License");\r
+# you may not use this file except in compliance with the License.\r
+# You may obtain a copy of the License at\r
+#\r
+#      http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+# See the License for the specific language governing permissions and\r
+# limitations under the License.\r
+---\r
+- name: convert image to image file\r
+  command: qemu-img convert -c -o compat=0.10 -O qcow2 {{ raw_imgfile }} {{ imgfile }}\r
+\r
+# Post build yardstick image\r
+- group_by:\r
+    key: image_builder\r
+\r
+- name: remove ubuntu policy-rc.d workaround\r
+  file:\r
+    path: "{{ mountdir }}/usr/sbin/policy-rc.d"\r
+    state: absent\r
+  when: "target_os == 'Ubuntu'"\r
+\r
+- name: cleanup fake tmp fstab\r
+  file:\r
+    path: "{{ fake_fstab }}"\r
+    state: absent\r
+\r
+- mount:\r
+    name: "{{ mountdir }}/proc"\r
+    state: unmounted\r
+\r
+- mount:\r
+    name: "{{ mountdir }}"\r
+    state: unmounted\r
+\r
+- name: kpartx -dv to delete all image partition device nodes\r
+  command: kpartx -dv "{{ raw_imgfile }}"\r
+  ignore_errors: true\r
+\r
+- debug:\r
+    msg: "yardstick image = {{ imgfile }}"\r
diff --git a/ansible/roles/build_yardstick_image/tasks/pre_build.yml b/ansible/roles/build_yardstick_image/tasks/pre_build.yml
new file mode 100644 (file)
index 0000000..2dae380
--- /dev/null
@@ -0,0 +1,202 @@
+# Copyright (c) 2018 Intel Corporation.\r
+#\r
+# Licensed under the Apache License, Version 2.0 (the "License");\r
+# you may not use this file except in compliance with the License.\r
+# You may obtain a copy of the License at\r
+#\r
+#      http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+# See the License for the specific language governing permissions and\r
+# limitations under the License.\r
+---\r
+- name: Group\r
+  group_by:\r
+    key: image_builder\r
+\r
+- package: name=parted state=present\r
+  environment: "{{ proxy_env }}"\r
+\r
+- package: name=kpartx state=present\r
+  environment: "{{ proxy_env }}"\r
+\r
+- package: name="{{ growpart_package[ansible_os_family] }}" state=present\r
+  environment: "{{ proxy_env }}"\r
+\r
+- set_fact:\r
+    imgfile: "{{ normal_image_file }}"\r
+  when: img_prop_item == "normal"\r
+\r
+- set_fact:\r
+    imgfile: "{{ nsb_image_file }}"\r
+  when: img_prop_item == "nsb"\r
+\r
+- set_fact:\r
+    mountdir: "{{ lookup('env', 'mountdir')|default('/mnt/yardstick', true) }}"\r
+    raw_imgfile: "{{ workspace }}/{{ raw_imgfile_basename }}"\r
+\r
+# cleanup non-lxd\r
+- name: unmount all old mount points\r
+  mount:\r
+    name: "{{ item }}"\r
+    state: unmounted\r
+  with_items:\r
+    # order matters\r
+    - "{{ mountdir }}/proc"\r
+    - "{{ mountdir }}"\r
+    - "/mnt/{{ release }}"\r
+\r
+- name: kpartx -dv to delete all image partition device nodes\r
+  command: kpartx -dv "{{ raw_imgfile }}"\r
+  ignore_errors: true\r
+\r
+- name: Debug dump loop devices\r
+  command: losetup -a\r
+  ignore_errors: true\r
+\r
+- name: delete loop devices for image file\r
+  # use this because kpartx -dv will fail if raw_imgfile was delete\r
+  # but in theory we could have deleted file still attached to loopback device?\r
+  # use grep because of // and awk\r
+  shell: losetup -O NAME,BACK-FILE | grep "{{ raw_imgfile_basename }}" | awk '{ print $1 }' | xargs -l1 losetup -v -d\r
+  ignore_errors: true\r
+\r
+- name: Debug dump loop devices again\r
+  command: losetup -a\r
+  ignore_errors: true\r
+\r
+- name: delete {{ raw_imgfile }}\r
+  file:\r
+    path: "{{ raw_imgfile }}"\r
+    state: absent\r
+\r
+# common\r
+- name: remove {{ mountdir }}\r
+  file:\r
+    path: "{{ mountdir }}"\r
+    state: absent\r
+\r
+# download-common\r
+- name: remove {{ workspace }}\r
+  file:\r
+    path: "{{ workspace }}"\r
+    state: directory\r
+\r
+- name: "fetch {{ image_url }} and verify "\r
+  fetch_url_and_verify:\r
+    url: "{{ image_url }}"\r
+    sha256url: "{{ sha256sums_url }}"\r
+    dest: "{{ image_dest }}"\r
+\r
+- name: convert image to raw\r
+  command: "qemu-img convert {{ image_dest }} {{ raw_imgfile }}"\r
+\r
+- name: resize image to allow for more VNFs\r
+  command: "qemu-img resize -f raw {{ raw_imgfile }} +2G"\r
+\r
+- name: resize parition to allow for more VNFs\r
+  # use growpart because maybe it handles GPT better than parted\r
+  command: growpart {{ raw_imgfile }}  1\r
+\r
+- name: create mknod devices in chroot\r
+  command: "mknod -m 0660 /dev/loop{{ item }} b 7 {{ item }}"\r
+  args:\r
+    creates: "/dev/loop{{ item }}"\r
+  with_sequence: start=0 end=9\r
+  tags: mknod_devices\r
+\r
+- name: find first partition device\r
+  command: kpartx -l "{{ raw_imgfile }}"\r
+  register: kpartx_res\r
+\r
+- set_fact:\r
+    image_first_partition: "{{ kpartx_res.stdout_lines[0].split()[0] }}"\r
+\r
+- set_fact:\r
+    # assume / is the first partition\r
+    image_first_partition_device: "/dev/mapper/{{ image_first_partition }}"\r
+\r
+- name: use kpartx to create device nodes for the raw image loop device\r
+  # operate on the loop device to avoid /dev namespace missing devices\r
+  command: kpartx -avs "{{ raw_imgfile }}"\r
+\r
+- name: parted dump raw image\r
+  command: parted "{{ raw_imgfile }}" print\r
+  register: parted_res\r
+\r
+- debug:\r
+    var: parted_res\r
+    verbosity: 2\r
+\r
+- name: use blkid to find filesystem type of first partition device\r
+  command: blkid -o value -s TYPE {{ image_first_partition_device }}\r
+  register: blkid_res\r
+\r
+- set_fact:\r
+    image_fs_type: "{{ blkid_res.stdout.strip() }}"\r
+\r
+- fail:\r
+    msg: "We only support ext4 image filesystems because we have to resize"\r
+  when: image_fs_type != "ext4"\r
+\r
+- name: fsck the image filesystem\r
+  command: "e2fsck -y -f {{ image_first_partition_device  }}"\r
+\r
+- name: resize filesystem to full partition size\r
+  command: resize2fs {{ image_first_partition_device }}\r
+\r
+- name: fsck the image filesystem\r
+  command: "e2fsck -y -f {{ image_first_partition_device  }}"\r
+\r
+- name: make tmp disposable fstab\r
+  command: mktemp --tmpdir fake_fstab.XXXXXXXXXX\r
+  register: mktemp_res\r
+\r
+- set_fact:\r
+    fake_fstab: "{{ mktemp_res.stdout.strip() }}"\r
+\r
+- name: mount first parition on image device\r
+  mount:\r
+    src: "{{ image_first_partition_device }}"\r
+    name: "{{ mountdir }}"\r
+    # fstype is required\r
+    fstype: "{{ image_fs_type }}"\r
+    # !!!!!!! this is required otherwise we add entries to /etc/fstab\r
+    # and prevent the system from booting\r
+    fstab: "{{ fake_fstab }}"\r
+    state: mounted\r
+\r
+- name: mount chroot /proc\r
+  mount:\r
+    src: none\r
+    name: "{{ mountdir }}/proc"\r
+    fstype: proc\r
+    # !!!!!!! this is required otherwise we add entries to /etc/fstab\r
+    # and prevent the system from booting\r
+    fstab: "{{ fake_fstab }}"\r
+    state: mounted\r
+\r
+- name: if arm copy qemu-aarch64-static into chroot\r
+  copy:\r
+    src: /usr/bin/qemu-aarch64-static\r
+    dest: "{{ mountdir }}/usr/bin"\r
+  when: img_arch == arch_arm64\r
+\r
+- name: create ubuntu policy-rc.d workaround\r
+  copy:\r
+    content: "{{ '#!/bin/sh\nexit 101\n' }}"\r
+    dest: "{{ mountdir }}/usr/sbin/policy-rc.d"\r
+    mode: 0755\r
+  when: "target_os == 'Ubuntu'"\r
+\r
+- name: add chroot as host\r
+  add_host:\r
+    name: "{{ mountdir }}"\r
+    groups: chroot_image,image_builder\r
+    connection: chroot\r
+    ansible_python_interpreter: /usr/bin/python3\r
+    # set this host variable here\r
+    nameserver_ip: "{{ ansible_dns.nameservers[0] }}"\r
+    image_type: vm\r
diff --git a/ansible/roles/build_yardstick_image/vars/main.yml b/ansible/roles/build_yardstick_image/vars/main.yml
new file mode 100644 (file)
index 0000000..6728e5a
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright (c) 2018 Intel Corporation.\r
+#\r
+# Licensed under the Apache License, Version 2.0 (the "License");\r
+# you may not use this file except in compliance with the License.\r
+# You may obtain a copy of the License at\r
+#\r
+#      http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+# See the License for the specific language governing permissions and\r
+# limitations under the License.\r
+---\r
+boot_modes:\r
+  'amd64': disk1\r
+  'arm64': uefi1\r
+boot_mode: "{{ boot_modes[YARD_IMG_ARCH] }}"\r
+image_filename: "{{ release }}-server-cloudimg-{{ YARD_IMG_ARCH }}-{{ boot_mode }}.img"\r
+image_path: "{{ release }}/current/{{ image_filename }}"\r
+host: "{{ lookup('env', 'HOST')|default('cloud-images.ubuntu.com', true)}}"\r
+image_url: "{{ lookup('env', 'IMAGE_URL')|default('https://' ~ host ~ '/' ~ image_path, true) }}"\r
+image_dest: "{{ workspace }}/{{ image_filename }}"\r
+sha256sums_path: "{{ release }}/current/SHA256SUMS"\r
+sha256sums_filename: "{{ sha256sums_path|basename }}"\r
+sha256sums_url: "{{ lookup('env', 'SHA256SUMS_URL')|default('https://' ~ host ~ '/' ~ sha256sums_path, true) }}"\r
+workspace: "{{ lookup('env', 'workspace')|default('/tmp/workspace/yardstick', true) }}"\r
+raw_imgfile_basename: "yardstick-{{ release }}-server.raw"\r
+growpart_package:\r
+  RedHat: cloud-utils-growpart\r
+  Debian: cloud-guest-utils\r
index 885eebf..8371188 100644 (file)
@@ -1,5 +1,5 @@
 ---
-dpdk_version: "17.02.1"
+dpdk_version: "17.05"
 dpdk_url: "http://fast.dpdk.org/rel/dpdk-{{ dpdk_version }}.tar.xz"
 dpdk_file: "{{ dpdk_url|basename }}"
 dpdk_unarchive: "{{ dpdk_file|regex_replace('[.]tar[.]xz$', '') }}"
index ed5ab27..c974036 100644 (file)
@@ -15,8 +15,9 @@
 civetweb_dest: "{{ clone_dest }}/civetweb"
 civetweb_build_dependencies:
   Debian:
-    - libjson-c-dev=0.11-4ubuntu2
-    - libjson0
-    - libjson0-dev
+#    - libjson-c-dev=0.11-4ubuntu2
+#    - libjson0
+#    - libjson0-dev
     - libssl-dev
+    - libjson-c-dev
   RedHat:
index ffd30f3..4e55339 100644 (file)
@@ -19,5 +19,5 @@
     action: "{{ ansible_pkg_mgr }} name={{ item }} state=latest update_cache=yes"
     register: pkg_mgr_results
     retries: "{{ pkg_mgr_retries }}"
-    until: pkg_mgr_results|success
+    until: pkg_mgr_results is success
     with_items: "{{ install_dependencies[ansible_os_family] }}"
index a70ea65..60f21bd 100644 (file)
@@ -94,7 +94,7 @@ rand_offset=14
 random=0000XXXX00XX00XX
 rand_offset=18
 ; dst_ip: [10,11].[odd 1..255].[16,48,80,112,144,176,208,240].[odd 1..255]
-random=0000101XXXXXXXX1XXX10000XXXXXXX1
+random=0000101XXXXXXXX11XXX0000XXXXXXX1
 rand_offset=38
 lat pos=42
 
@@ -113,7 +113,7 @@ rand_offset=14
 random=0000XXXX00XX00XX
 rand_offset=18
 ; dst_ip: [10,11].[odd 1..255].[16,48,80,112,144,176,208,240].[odd 1..255]
-random=0000101XXXXXXXX1XXX10000XXXXXXX1
+random=0000101XXXXXXXX11XXX0000XXXXXXX1
 rand_offset=38
 lat pos=42
 
index a70ea65..60f21bd 100644 (file)
@@ -94,7 +94,7 @@ rand_offset=14
 random=0000XXXX00XX00XX
 rand_offset=18
 ; dst_ip: [10,11].[odd 1..255].[16,48,80,112,144,176,208,240].[odd 1..255]
-random=0000101XXXXXXXX1XXX10000XXXXXXX1
+random=0000101XXXXXXXX11XXX0000XXXXXXX1
 rand_offset=38
 lat pos=42
 
@@ -113,7 +113,7 @@ rand_offset=14
 random=0000XXXX00XX00XX
 rand_offset=18
 ; dst_ip: [10,11].[odd 1..255].[16,48,80,112,144,176,208,240].[odd 1..255]
-random=0000101XXXXXXXX1XXX10000XXXXXXX1
+random=0000101XXXXXXXX11XXX0000XXXXXXX1
 rand_offset=38
 lat pos=42
 
index 7d350bd..c191d29 100644 (file)
@@ -14,7 +14,7 @@
 #
 
 [lua]
-lpm4 = dofile("ipv4.lua")
+lpm4 = dofile("ipv4_bng.lua")
 user_table = dofile("gre_table.lua")
 
 [eal options]
index f65b7cb..b873fb9 100644 (file)
@@ -14,7 +14,7 @@
 #
 
 [lua]
-lpm4 = dofile("ipv4.lua")
+lpm4 = dofile("ipv4_bng.lua")
 user_table = dofile("gre_table.lua")
 dscp_table = dofile("dscp.lua")
 
diff --git a/samples/vnf_samples/nsut/prox/configs/ipv4_bng.lua b/samples/vnf_samples/nsut/prox/configs/ipv4_bng.lua
new file mode 100644 (file)
index 0000000..22697b0
--- /dev/null
@@ -0,0 +1,99 @@
+-- Copyright (c) 2016-2017 Intel Corporation
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+--      http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+require("parameters")
+
+local lpm4 = {}
+lpm4.next_hops = {
+   {id = 0,  port_id = 0, ip = ip("1.1.1.1"),  mac = mac(tester_mac1), mpls = 0x112},
+   {id = 1,  port_id = 1, ip = ip("2.1.1.1"),  mac = mac(tester_mac1), mpls = 0x212},
+   {id = 2,  port_id = 0, ip = ip("3.1.1.1"),  mac = mac(tester_mac3), mpls = 0x312},
+   {id = 3,  port_id = 1, ip = ip("4.1.1.1"),  mac = mac(tester_mac3), mpls = 0x412},
+   {id = 4,  port_id = 0, ip = ip("5.1.1.1"),  mac = mac(tester_mac1), mpls = 0x512},
+   {id = 5,  port_id = 1, ip = ip("6.1.1.1"),  mac = mac(tester_mac1), mpls = 0x612},
+   {id = 6,  port_id = 0, ip = ip("7.1.1.1"),  mac = mac(tester_mac3), mpls = 0x712},
+   {id = 7,  port_id = 1, ip = ip("8.1.1.1"),  mac = mac(tester_mac3), mpls = 0x812},
+   {id = 8,  port_id = 0, ip = ip("9.1.1.1"),  mac = mac(tester_mac1), mpls = 0x912},
+   {id = 9,  port_id = 1, ip = ip("10.1.1.1"), mac = mac(tester_mac1), mpls = 0x1012},
+   {id = 10, port_id = 0, ip = ip("11.1.1.1"), mac = mac(tester_mac3), mpls = 0x1112},
+   {id = 11, port_id = 1, ip = ip("12.1.1.1"), mac = mac(tester_mac3), mpls = 0x1212},
+   {id = 12, port_id = 0, ip = ip("13.1.1.1"), mac = mac(tester_mac1), mpls = 0x1312},
+   {id = 13, port_id = 1, ip = ip("14.1.1.1"), mac = mac(tester_mac1), mpls = 0x1412},
+   {id = 14, port_id = 0, ip = ip("15.1.1.1"), mac = mac(tester_mac3), mpls = 0x1512},
+   {id = 15, port_id = 1, ip = ip("16.1.1.1"), mac = mac(tester_mac3), mpls = 0x1612},
+   {id = 16, port_id = 0, ip = ip("17.1.1.1"), mac = mac(tester_mac1), mpls = 0x1712},
+   {id = 17, port_id = 1, ip = ip("18.1.1.1"), mac = mac(tester_mac1), mpls = 0x1812},
+   {id = 18, port_id = 0, ip = ip("19.1.1.1"), mac = mac(tester_mac3), mpls = 0x1912},
+   {id = 19, port_id = 1, ip = ip("20.1.1.1"), mac = mac(tester_mac3), mpls = 0x2012},
+   {id = 20, port_id = 0, ip = ip("21.1.1.1"), mac = mac(tester_mac1), mpls = 0x2112},
+   {id = 21, port_id = 1, ip = ip("22.1.1.1"), mac = mac(tester_mac1), mpls = 0x2212},
+   {id = 22, port_id = 0, ip = ip("23.1.1.1"), mac = mac(tester_mac3), mpls = 0x2312},
+   {id = 23, port_id = 1, ip = ip("24.1.1.1"), mac = mac(tester_mac3), mpls = 0x2412},
+   {id = 24, port_id = 0, ip = ip("25.1.1.1"), mac = mac(tester_mac1), mpls = 0x2512},
+   {id = 25, port_id = 1, ip = ip("26.1.1.1"), mac = mac(tester_mac1), mpls = 0x2612},
+   {id = 26, port_id = 0, ip = ip("27.1.1.1"), mac = mac(tester_mac3), mpls = 0x2712},
+   {id = 27, port_id = 1, ip = ip("28.1.1.1"), mac = mac(tester_mac3), mpls = 0x2812},
+   {id = 28, port_id = 0, ip = ip("29.1.1.1"), mac = mac(tester_mac1), mpls = 0x2912},
+   {id = 29, port_id = 1, ip = ip("30.1.1.1"), mac = mac(tester_mac1), mpls = 0x3012},
+   {id = 30, port_id = 0, ip = ip("31.1.1.1"), mac = mac(tester_mac3), mpls = 0x3112},
+   {id = 31, port_id = 1, ip = ip("32.1.1.1"), mac = mac(tester_mac3), mpls = 0x3212},
+   {id = 32, port_id = 0, ip = ip("33.1.1.1"), mac = mac(tester_mac1), mpls = 0x3312},
+   {id = 33, port_id = 1, ip = ip("34.1.1.1"), mac = mac(tester_mac1), mpls = 0x3412},
+   {id = 34, port_id = 0, ip = ip("35.1.1.1"), mac = mac(tester_mac3), mpls = 0x3512},
+   {id = 35, port_id = 1, ip = ip("36.1.1.1"), mac = mac(tester_mac3), mpls = 0x3612},
+   {id = 36, port_id = 0, ip = ip("37.1.1.1"), mac = mac(tester_mac1), mpls = 0x3712},
+   {id = 37, port_id = 1, ip = ip("38.1.1.1"), mac = mac(tester_mac1), mpls = 0x3812},
+   {id = 38, port_id = 0, ip = ip("39.1.1.1"), mac = mac(tester_mac3), mpls = 0x3912},
+   {id = 39, port_id = 1, ip = ip("40.1.1.1"), mac = mac(tester_mac3), mpls = 0x4012},
+   {id = 40, port_id = 0, ip = ip("41.1.1.1"), mac = mac(tester_mac1), mpls = 0x4112},
+   {id = 41, port_id = 1, ip = ip("42.1.1.1"), mac = mac(tester_mac1), mpls = 0x4212},
+   {id = 42, port_id = 0, ip = ip("43.1.1.1"), mac = mac(tester_mac3), mpls = 0x4312},
+   {id = 43, port_id = 1, ip = ip("44.1.1.1"), mac = mac(tester_mac3), mpls = 0x4412},
+   {id = 44, port_id = 0, ip = ip("45.1.1.1"), mac = mac(tester_mac1), mpls = 0x4512},
+   {id = 45, port_id = 1, ip = ip("46.1.1.1"), mac = mac(tester_mac1), mpls = 0x4612},
+   {id = 46, port_id = 0, ip = ip("47.1.1.1"), mac = mac(tester_mac3), mpls = 0x4712},
+   {id = 47, port_id = 1, ip = ip("48.1.1.1"), mac = mac(tester_mac3), mpls = 0x4812},
+   {id = 48, port_id = 0, ip = ip("49.1.1.1"), mac = mac(tester_mac1), mpls = 0x4912},
+   {id = 49, port_id = 1, ip = ip("50.1.1.1"), mac = mac(tester_mac1), mpls = 0x5012},
+   {id = 50, port_id = 0, ip = ip("51.1.1.1"), mac = mac(tester_mac3), mpls = 0x5112},
+   {id = 51, port_id = 1, ip = ip("52.1.1.1"), mac = mac(tester_mac3), mpls = 0x5212},
+   {id = 52, port_id = 0, ip = ip("53.1.1.1"), mac = mac(tester_mac1), mpls = 0x5312},
+   {id = 53, port_id = 1, ip = ip("54.1.1.1"), mac = mac(tester_mac1), mpls = 0x5412},
+   {id = 54, port_id = 0, ip = ip("55.1.1.1"), mac = mac(tester_mac3), mpls = 0x5512},
+   {id = 55, port_id = 1, ip = ip("56.1.1.1"), mac = mac(tester_mac3), mpls = 0x5612},
+   {id = 56, port_id = 0, ip = ip("57.1.1.1"), mac = mac(tester_mac1), mpls = 0x5712},
+   {id = 57, port_id = 1, ip = ip("58.1.1.1"), mac = mac(tester_mac1), mpls = 0x5812},
+   {id = 58, port_id = 0, ip = ip("59.1.1.1"), mac = mac(tester_mac3), mpls = 0x5912},
+   {id = 59, port_id = 1, ip = ip("60.1.1.1"), mac = mac(tester_mac3), mpls = 0x6012},
+   {id = 60, port_id = 0, ip = ip("61.1.1.1"), mac = mac(tester_mac1), mpls = 0x6112},
+   {id = 61, port_id = 1, ip = ip("62.1.1.1"), mac = mac(tester_mac1), mpls = 0x6212},
+   {id = 62, port_id = 0, ip = ip("63.1.1.1"), mac = mac(tester_mac3), mpls = 0x6312},
+   {id = 63, port_id = 1, ip = ip("64.1.1.1"), mac = mac(tester_mac3), mpls = 0x6412},
+}
+
+lpm4.routes = {};
+
+base_ip = 10 * 2^24;
+
+for i = 1,2^13 do
+   res = ip(base_ip + (1 * 2^12) * (i - 1));
+
+   lpm4.routes[i] = {
+      cidr        = {ip = res, depth = 24},
+      next_hop_id = (i - 1) % 64,
+   }
+end
+
+return lpm4
index 1711c56..f869139 100644 (file)
@@ -36,7 +36,7 @@ scenarios:
         "-t": ""
       prox_files:
         "configs/gre_table.lua" : ""
-        "configs/ipv4.lua" : ""
+        "configs/ipv4_bng.lua" : ""
       prox_generate_parameter: True
 
     tg__0:
index a7d2d38..707fc1d 100644 (file)
@@ -36,7 +36,7 @@ scenarios:
         "-t": ""
       prox_files:
         "configs/gre_table.lua" : ""
-        "configs/ipv4.lua" : ""
+        "configs/ipv4_bng.lua" : ""
         "configs/dscp.lua" : ""
       prox_generate_parameter: True
 
index e4cd546..d580bd8 100644 (file)
@@ -36,7 +36,7 @@ scenarios:
         "-t": ""
       prox_files:
         "configs/gre_table.lua" : ""
-        "configs/ipv4.lua" : ""
+        "configs/ipv4_bng.lua" : ""
       prox_generate_parameter: True
 
     tg__0:
index 60002f0..7f447b1 100644 (file)
@@ -36,7 +36,7 @@ scenarios:
         "-t": ""
       prox_files:
         "configs/gre_table.lua" : ""
-        "configs/ipv4.lua" : ""
+        "configs/ipv4_bng.lua" : ""
         "configs/dscp.lua" : ""
       prox_generate_parameter: True
 
index 98b1bf9..c1acb69 100644 (file)
@@ -44,6 +44,7 @@ traffic_profile:
   traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
   frame_rate: 100  # pc of linerate
   duration: {{ duration }}
+  enable_latency: False
 
 uplink_0:
   ipv4:
index ee04153..54f42b2 100644 (file)
@@ -44,6 +44,7 @@ traffic_profile:
   traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
   frame_rate: 100  # pc of linerate
   duration: {{ duration }}
+  enable_latency: False
 
 uplink_0:
   ipv4:
index 19f0836..06fb220 100644 (file)
@@ -44,6 +44,7 @@ traffic_profile:
   traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
   frame_rate: 100  # pc of linerate
   duration: {{ duration }}
+  enable_latency: False
 
 uplink_0:
   ipv4:
index 95fa0b6..f6a12eb 100644 (file)
@@ -44,6 +44,7 @@ traffic_profile:
   traffic_type: RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
   frame_rate: 100  # pc of linerate
   duration: {{ duration }}
+  enable_latency: False
 
 uplink_0:
   ipv4:
index c267e76..194bcd9 100644 (file)
@@ -43,6 +43,7 @@ traffic_profile:
   traffic_type : RFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
   frame_rate : 100  # pc of linerate
   duration: {{ duration }}
+  enable_latency: False
 
 uplink_0:
       ipv4:
index 5074914..9067937 100644 (file)
@@ -29,6 +29,7 @@ traffic_profile:
   traffic_type : IXIARFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
   frame_rate : 100  # pc of linerate
   duration: {{ duration }}
+  enable_latency: True
 
 uplink_0:
       ipv4:
index 3cbd7cd..6e2f8ec 100644 (file)
@@ -29,6 +29,7 @@ traffic_profile:
   traffic_type : IXIARFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
   frame_rate : 100  # pc of linerate
   duration: {{ duration }}
+  enable_latency: True
 
 uplink_0:
       ipv4:
index edff361..cfc5f1e 100644 (file)
@@ -43,6 +43,7 @@ traffic_profile:
   traffic_type : IXIARFC2544Profile # defines traffic behavior - constant or look for highest possible throughput
   frame_rate : 100  # pc of linerate
   injection_time: {{ injection_time }}
+  enable_latency: True
 
 uplink_0:
       ipv4:
index ecddcbb..962cb48 100644 (file)
@@ -89,6 +89,17 @@ VM_TEMPLATE = """
   </devices>
 </domain>
 """
+
+USER_DATA_TEMPLATE = """
+cat > {user_file} <<EOF
+#cloud-config
+preserve_hostname: false
+hostname: {host}
+users:
+{user_config}
+EOF
+"""
+
 WAIT_FOR_BOOT = 30
 
 
@@ -268,7 +279,7 @@ class Libvirt(object):
         return vm_image
 
     @classmethod
-    def build_vm_xml(cls, connection, flavor, vm_name, index):
+    def build_vm_xml(cls, connection, flavor, vm_name, index, cdrom_img):
         """Build the XML from the configuration parameters"""
         memory = flavor.get('ram', '4096')
         extra_spec = flavor.get('extra_specs', {})
@@ -293,6 +304,9 @@ class Libvirt(object):
             socket=socket, threads=threads,
             vm_image=image, cpuset=cpuset, cputune=cputune)
 
+        # Add CD-ROM device
+        vm_xml = Libvirt.add_cdrom(cdrom_img, vm_xml)
+
         return vm_xml, mac
 
     @staticmethod
@@ -320,6 +334,71 @@ class Libvirt(object):
         et = ET.ElementTree(element=root)
         et.write(file_name, encoding='utf-8', method='xml')
 
+    @classmethod
+    def add_cdrom(cls, file_path, xml_str):
+        """Add a CD-ROM disk XML node in 'devices' node
+
+        <devices>
+            <disk type='file' device='cdrom'>
+              <driver name='qemu' type='raw'/>
+              <source file='/var/lib/libvirt/images/data.img'/>
+              <target dev='hdb'/>
+              <readonly/>
+            </disk>
+            ...
+        </devices>
+        """
+
+        root = ET.fromstring(xml_str)
+        device = root.find('devices')
+
+        disk = ET.SubElement(device, 'disk')
+        disk.set('type', 'file')
+        disk.set('device', 'cdrom')
+
+        driver = ET.SubElement(disk, 'driver')
+        driver.set('name', 'qemu')
+        driver.set('type', 'raw')
+
+        source = ET.SubElement(disk, 'source')
+        source.set('file', file_path)
+
+        target = ET.SubElement(disk, 'target')
+        target.set('dev', 'hdb')
+
+        ET.SubElement(disk, 'readonly')
+        return ET.tostring(root)
+
+    @staticmethod
+    def gen_cdrom_image(connection, file_path, vm_name, vm_user, key_filename):
+        """Generate ISO image for CD-ROM """
+
+        user_config = ["    - name: {user_name}",
+                       "      ssh_authorized_keys:",
+                       "        - {pub_key_str}"]
+        if vm_user != "root":
+            user_config.append("      sudo: ALL=(ALL) NOPASSWD:ALL")
+
+        meta_data = "/tmp/meta-data"
+        user_data = "/tmp/user-data"
+        with open(".".join([key_filename, "pub"]), "r") as pub_key_file:
+            pub_key_str = pub_key_file.read().rstrip()
+        user_conf = os.linesep.join(user_config).format(pub_key_str=pub_key_str, user_name=vm_user)
+
+        cmd_lst = [
+            "touch %s" % meta_data,
+            USER_DATA_TEMPLATE.format(user_file=user_data, host=vm_name, user_config=user_conf),
+            "genisoimage -output {0} -volid cidata -joliet -r {1} {2}".format(file_path,
+                                                                              meta_data,
+                                                                              user_data),
+            "rm {0} {1}".format(meta_data, user_data),
+        ]
+        for cmd in cmd_lst:
+            LOG.info(cmd)
+            status, _, error = connection.execute(cmd)
+            if status:
+                raise exceptions.LibvirtQemuImageCreateError(error=error)
+
 
 class StandaloneContextHelper(object):
     """ This class handles all the common code for standalone
@@ -331,7 +410,7 @@ class StandaloneContextHelper(object):
     @staticmethod
     def install_req_libs(connection, extra_pkgs=None):
         extra_pkgs = extra_pkgs or []
-        pkgs = ["qemu-kvm", "libvirt-bin", "bridge-utils", "numactl", "fping"]
+        pkgs = ["qemu-kvm", "libvirt-bin", "bridge-utils", "numactl", "fping", "genisoimage"]
         pkgs.extend(extra_pkgs)
         cmd_template = "dpkg-query -W --showformat='${Status}\\n' \"%s\"|grep 'ok installed'"
         for pkg in pkgs:
@@ -457,6 +536,25 @@ class StandaloneContextHelper(object):
                 node["ip"] = ip
         return nodes
 
+    @classmethod
+    def check_update_key(cls, connection, node, vm_name, id_name, cdrom_img):
+        # Generate public/private keys if private key file is not provided
+        user_name = node.get('user')
+        if not user_name:
+            node['user'] = 'root'
+            user_name = node.get('user')
+        if not node.get('key_filename'):
+            key_filename = ''.join(
+                [constants.YARDSTICK_ROOT_PATH,
+                 'yardstick/resources/files/yardstick_key-',
+                 id_name])
+            ssh.SSH.gen_keys(key_filename)
+            node['key_filename'] = key_filename
+        # Update image with public key
+        key_filename = node.get('key_filename')
+        Libvirt.gen_cdrom_image(connection, cdrom_img, vm_name, user_name, key_filename)
+        return node
+
 
 class Server(object):
     """ This class handles geting vnf nodes
index 88ad598..5891f79 100644 (file)
@@ -394,13 +394,14 @@ class OvsDpdkContext(base.Context):
         for index, (key, vnf) in enumerate(collections.OrderedDict(
                 self.servers).items()):
             cfg = '/tmp/vm_ovs_%d.xml' % index
-            vm_name = "vm_%d" % index
+            vm_name = "vm-%d" % index
+            cdrom_img = "/var/lib/libvirt/images/cdrom-%d.img" % index
 
             # 1. Check and delete VM if already exists
             model.Libvirt.check_if_vm_exists_and_delete(vm_name,
                                                         self.connection)
             xml_str, mac = model.Libvirt.build_vm_xml(
-                self.connection, self.vm_flavor, vm_name, index)
+                self.connection, self.vm_flavor, vm_name, index, cdrom_img)
 
             # 2: Cleanup already available VMs
             for vfs in [vfs for vfs_name, vfs in vnf["network_ports"].items()
@@ -411,16 +412,24 @@ class OvsDpdkContext(base.Context):
             model.Libvirt.write_file(cfg, xml_str)
             self.connection.put(cfg, cfg)
 
+            node = self.vnf_node.generate_vnf_instance(self.vm_flavor,
+                                                       self.networks,
+                                                       self.host_mgmt.get('ip'),
+                                                       key, vnf, mac)
+            # Generate public/private keys if password or private key file is not provided
+            node = model.StandaloneContextHelper.check_update_key(self.connection,
+                                                                  node,
+                                                                  vm_name,
+                                                                  self.name,
+                                                                  cdrom_img)
+
+            # store vnf node details
+            nodes.append(node)
+
             # NOTE: launch through libvirt
             LOG.info("virsh create ...")
             model.Libvirt.virsh_create_vm(self.connection, cfg)
 
             self.vm_names.append(vm_name)
 
-            # build vnf node details
-            nodes.append(self.vnf_node.generate_vnf_instance(self.vm_flavor,
-                                                             self.networks,
-                                                             self.host_mgmt.get('ip'),
-                                                             key, vnf, mac))
-
         return nodes
index 3da12a9..8d410b2 100644 (file)
@@ -225,13 +225,14 @@ class SriovContext(base.Context):
         for index, (key, vnf) in enumerate(collections.OrderedDict(
                 self.servers).items()):
             cfg = '/tmp/vm_sriov_%s.xml' % str(index)
-            vm_name = "vm_%s" % str(index)
+            vm_name = "vm-%s" % str(index)
+            cdrom_img = "/var/lib/libvirt/images/cdrom-%d.img" % index
 
             # 1. Check and delete VM if already exists
             model.Libvirt.check_if_vm_exists_and_delete(vm_name,
                                                         self.connection)
             xml_str, mac = model.Libvirt.build_vm_xml(
-                self.connection, self.vm_flavor, vm_name, index)
+                self.connection, self.vm_flavor, vm_name, index, cdrom_img)
 
             # 2: Cleanup already available VMs
             network_ports = collections.OrderedDict(
@@ -243,17 +244,26 @@ class SriovContext(base.Context):
             model.Libvirt.write_file(cfg, xml_str)
             self.connection.put(cfg, cfg)
 
+            node = self.vnf_node.generate_vnf_instance(self.vm_flavor,
+                                                       self.networks,
+                                                       self.host_mgmt.get('ip'),
+                                                       key, vnf, mac)
+            # Generate public/private keys if password or private key file is not provided
+            node = model.StandaloneContextHelper.check_update_key(self.connection,
+                                                                  node,
+                                                                  vm_name,
+                                                                  self.name,
+                                                                  cdrom_img)
+
+            # store vnf node details
+            nodes.append(node)
+
             # NOTE: launch through libvirt
             LOG.info("virsh create ...")
             model.Libvirt.virsh_create_vm(self.connection, cfg)
 
             self.vm_names.append(vm_name)
 
-            # build vnf node details
-            nodes.append(self.vnf_node.generate_vnf_instance(
-                self.vm_flavor, self.networks, self.host_mgmt.get('ip'),
-                key, vnf, mac))
-
         return nodes
 
     def _get_vf_data(self, value, vfmac, pfif):
index 53abd58..4c79a49 100644 (file)
@@ -34,6 +34,8 @@ class BaremetalAttacker(BaseAttacker):
     __attacker_type__ = 'bare-metal-down'
 
     def setup(self):
+        # baremetal down need to recover even sla pass
+        self.mandatory = True
         LOG.debug("config:%s context:%s", self._config, self._context)
         host = self._context.get(self._config['host'], None)
 
index d67a16b..7871cc9 100644 (file)
@@ -63,6 +63,7 @@ class BaseAttacker(object):
         self.data = {}
         self.setup_done = False
         self.intermediate_variables = {}
+        self.mandatory = False
 
     @staticmethod
     def get_attacker_cls(attacker_cfg):
index 7f976fd..fdfe7cb 100755 (executable)
@@ -88,9 +88,9 @@ class ServiceHA(base.Scenario):
 
     def teardown(self):
         """scenario teardown"""
-        # only recover when sla not pass
-        if not self.sla_pass:
-            for attacker in self.attackers:
+        # recover when mandatory or sla not pass
+        for attacker in self.attackers:
+            if attacker.mandatory or not self.sla_pass:
                 attacker.recover()
 
 
index b39a0af..10c1f3f 100644 (file)
@@ -79,6 +79,10 @@ class FunctionNotImplemented(YardstickException):
                '"%(class_name)" class.')
 
 
+class InvalidType(YardstickException):
+    message = 'Type "%(type_to_convert)s" is not valid'
+
+
 class InfluxDBConfigurationMissing(YardstickException):
     message = ('InfluxDB configuration is not available. Add "influxdb" as '
                'a dispatcher and the configuration section')
index c019cd2..31885c0 100644 (file)
@@ -21,6 +21,7 @@ import importlib
 import ipaddress
 import logging
 import os
+import pydoc
 import random
 import re
 import signal
@@ -578,3 +579,24 @@ def send_socket_command(host, port, command):
     finally:
         sock.close()
     return ret
+
+
+def safe_cast(value, type_to_convert, default_value):
+    """Convert value to type, in case of error return default_value
+
+    :param value: value to convert
+    :param type_to_convert: type to convert, could be "type" or "string"
+    :param default_value: default value to return
+    :return: converted value or default_value
+    """
+    if isinstance(type_to_convert, type):
+        _type = type_to_convert
+    else:
+        _type = pydoc.locate(type_to_convert)
+        if not _type:
+            raise exceptions.InvalidType(type_to_convert=type_to_convert)
+
+    try:
+        return _type(value)
+    except ValueError:
+        return default_value
index a8f950b..4fbceea 100644 (file)
@@ -44,6 +44,7 @@ class TrafficProfileConfig(object):
         self.lower_bound = tprofile.get('lower_bound')
         self.upper_bound = tprofile.get('upper_bound')
         self.step_interval = tprofile.get('step_interval')
+        self.enable_latency = tprofile.get('enable_latency', False)
 
     def _parse_rate(self, rate):
         """Parse traffic profile rate
index 760b1e8..f079733 100644 (file)
@@ -179,10 +179,6 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
         except ZeroDivisionError:
             LOG.info('No traffic is flowing')
 
-        samples['TxThroughput'] = tx_throughput
-        samples['RxThroughput'] = rx_throughput
-        samples['DropPercentage'] = drop_percent
-
         if first_run:
             completed = True if drop_percent <= tolerance else False
         if (first_run and
@@ -196,4 +192,21 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile):
         else:
             completed = True
 
+        latency_ns_avg = float(
+            sum([samples[iface]['Store-Forward_Avg_latency_ns']
+            for iface in samples])) / num_ifaces
+        latency_ns_min = float(
+            sum([samples[iface]['Store-Forward_Min_latency_ns']
+            for iface in samples])) / num_ifaces
+        latency_ns_max = float(
+            sum([samples[iface]['Store-Forward_Max_latency_ns']
+            for iface in samples])) / num_ifaces
+
+        samples['TxThroughput'] = tx_throughput
+        samples['RxThroughput'] = rx_throughput
+        samples['DropPercentage'] = drop_percent
+        samples['latency_ns_avg'] = latency_ns_avg
+        samples['latency_ns_min'] = latency_ns_min
+        samples['latency_ns_max'] = latency_ns_max
+
         return completed, samples
index b54fc57..9870293 100644 (file)
@@ -118,7 +118,8 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
                 ports.append(port_num)
                 port_pg_id.add_port(port_num)
                 profile = self._create_profile(profile_data,
-                                               self.rate, port_pg_id)
+                                               self.rate, port_pg_id,
+                                               self.config.enable_latency)
                 self.generator.client.add_streams(profile, ports=[port_num])
 
         self.generator.client.start(ports=ports,
@@ -126,7 +127,7 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
                                     force=True)
         return ports, port_pg_id
 
-    def _create_profile(self, profile_data, rate, port_pg_id):
+    def _create_profile(self, profile_data, rate, port_pg_id, enable_latency):
         """Create a STL profile (list of streams) for a port"""
         streams = []
         for packet_name in profile_data:
@@ -134,7 +135,8 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
                     get('outer_l2', {}).get('framesize'))
             imix_data = self._create_imix_data(imix)
             self._create_vm(profile_data[packet_name])
-            _streams = self._create_streams(imix_data, rate, port_pg_id)
+            _streams = self._create_streams(imix_data, rate, port_pg_id,
+                                            enable_latency)
             streams.extend(_streams)
         return trex_stl_streams.STLProfile(streams)
 
@@ -213,7 +215,7 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
         return trex_stl_packet_builder_scapy.STLPktBuilder(
             pkt=base_pkt / pad, vm=self.trex_vm)
 
-    def _create_streams(self, imix_data, rate, port_pg_id):
+    def _create_streams(self, imix_data, rate, port_pg_id, enable_latency):
         """Create a list of streams per packet size
 
         The STL TX mode speed of the generated streams will depend on the frame
@@ -237,7 +239,8 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
                              in imix_data.items() if float(weight) > 0):
             packet = self._create_single_packet(size)
             pg_id = port_pg_id.increase_pg_id()
-            stl_flow = trex_stl_streams.STLFlowLatencyStats(pg_id=pg_id)
+            stl_flow = (trex_stl_streams.STLFlowLatencyStats(pg_id=pg_id) if
+                        enable_latency else None)
             mode = trex_stl_streams.STLTXCont(percentage=weight * rate / 100)
             streams.append(trex_stl_client.STLStream(
                 packet=packet, flow_stats=stl_flow, mode=mode))
@@ -247,19 +250,16 @@ class RFC2544Profile(trex_traffic_profile.TrexProfile):
                             correlated_traffic):
         """Calculate the drop percentage and run the traffic"""
         completed = False
-        tx_rate_fps = 0
-        rx_rate_fps = 0
-        for sample in samples:
-            tx_rate_fps += sum(
-                port['tx_throughput_fps'] for port in sample.values())
-            rx_rate_fps += sum(
-                port['rx_throughput_fps'] for port in sample.values())
-        tx_rate_fps = round(float(tx_rate_fps) / len(samples), 2)
-        rx_rate_fps = round(float(rx_rate_fps) / len(samples), 2)
-
-        # TODO(esm): RFC2544 doesn't tolerate packet loss, why do we?
-        out_packets = sum(port['out_packets'] for port in samples[-1].values())
-        in_packets = sum(port['in_packets'] for port in samples[-1].values())
+        out_pkt_end = sum(port['out_packets'] for port in samples[-1].values())
+        in_pkt_end = sum(port['in_packets'] for port in samples[-1].values())
+        out_pkt_ini = sum(port['out_packets'] for port in samples[0].values())
+        in_pkt_ini = sum(port['in_packets'] for port in samples[0].values())
+        time_diff = (list(samples[-1].values())[0]['timestamp'] -
+                     list(samples[0].values())[0]['timestamp']).total_seconds()
+        out_packets = out_pkt_end - out_pkt_ini
+        in_packets = in_pkt_end - in_pkt_ini
+        tx_rate_fps = float(out_packets) / time_diff
+        rx_rate_fps = float(in_packets) / time_diff
         drop_percent = 100.0
 
         # https://tools.ietf.org/html/rfc2544#section-26.3
index 854319a..d12c42e 100644 (file)
@@ -12,9 +12,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from __future__ import absolute_import
-
 import logging
+import copy
 
 from yardstick.network_services.utils import get_nsb_option
 from yardstick.network_services.vnf_generic.vnf.prox_vnf import ProxApproxVnf
@@ -32,7 +31,9 @@ class ProxTrafficGen(SampleVNFTrafficGen):
 
     def __init__(self, name, vnfd, task_id, setup_env_helper_type=None,
                  resource_helper_type=None):
-        # don't call superclass, use custom wrapper of ProxApproxVnf
+        vnfd_cpy = copy.deepcopy(vnfd)
+        super(ProxTrafficGen, self).__init__(name, vnfd_cpy, task_id)
+
         self._vnf_wrapper = ProxApproxVnf(
             name, vnfd, task_id, setup_env_helper_type, resource_helper_type)
         self.bin_path = get_nsb_option('bin_path', '')
index 8b16874..94ab069 100644 (file)
@@ -70,24 +70,23 @@ class IxiaResourceHelper(ClientResourceHelper):
             try:
                 # reverse lookup port name from port_num so the stats dict is descriptive
                 intf = self.vnfd_helper.find_interface_by_port(port_num)
-                port_name = intf["name"]
+                port_name = intf['name']
+                avg_latency = stats['Store-Forward_Avg_latency_ns'][port_num]
+                min_latency = stats['Store-Forward_Min_latency_ns'][port_num]
+                max_latency = stats['Store-Forward_Max_latency_ns'][port_num]
                 samples[port_name] = {
-                    "rx_throughput_kps": float(stats["Rx_Rate_Kbps"][port_num]),
-                    "tx_throughput_kps": float(stats["Tx_Rate_Kbps"][port_num]),
-                    "rx_throughput_mbps": float(stats["Rx_Rate_Mbps"][port_num]),
-                    "tx_throughput_mbps": float(stats["Tx_Rate_Mbps"][port_num]),
-                    "in_packets": int(stats["Valid_Frames_Rx"][port_num]),
-                    "out_packets": int(stats["Frames_Tx"][port_num]),
-                    "RxThroughput": float(stats["Valid_Frames_Rx"][port_num]) / duration,
-                    "TxThroughput": float(stats["Frames_Tx"][port_num]) / duration,
+                    'rx_throughput_kps': float(stats['Rx_Rate_Kbps'][port_num]),
+                    'tx_throughput_kps': float(stats['Tx_Rate_Kbps'][port_num]),
+                    'rx_throughput_mbps': float(stats['Rx_Rate_Mbps'][port_num]),
+                    'tx_throughput_mbps': float(stats['Tx_Rate_Mbps'][port_num]),
+                    'in_packets': int(stats['Valid_Frames_Rx'][port_num]),
+                    'out_packets': int(stats['Frames_Tx'][port_num]),
+                    'RxThroughput': float(stats['Valid_Frames_Rx'][port_num]) / duration,
+                    'TxThroughput': float(stats['Frames_Tx'][port_num]) / duration,
+                    'Store-Forward_Avg_latency_ns': utils.safe_cast(avg_latency, int, 0),
+                    'Store-Forward_Min_latency_ns': utils.safe_cast(min_latency, int, 0),
+                    'Store-Forward_Max_latency_ns': utils.safe_cast(max_latency, int, 0)
                 }
-                avg_latency = stats["Store-Forward_Avg_latency_ns"][port_num]
-                min_latency = stats["Store-Forward_Min_latency_ns"][port_num]
-                max_latency = stats["Store-Forward_Max_latency_ns"][port_num]
-                samples[port_name] = {
-                    "Store-Forward_Avg_latency_ns": avg_latency,
-                    "Store-Forward_Min_latency_ns": min_latency,
-                    "Store-Forward_Max_latency_ns": max_latency}
             except IndexError:
                 pass
 
index 58b7348..4296da8 100644 (file)
@@ -11,8 +11,8 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-""" Trex acts as traffic generation and vnf definitions based on IETS Spec """
 
+import datetime
 import logging
 import os
 
@@ -167,6 +167,7 @@ class TrexResourceHelper(ClientResourceHelper):
 
     def _get_samples(self, ports, port_pg_id=None):
         stats = self.get_stats(ports)
+        timestamp = datetime.datetime.now()
         samples = {}
         for pname in (intf['name'] for intf in self.vnfd_helper.interfaces):
             port_num = self.vnfd_helper.port_num(pname)
@@ -178,6 +179,7 @@ class TrexResourceHelper(ClientResourceHelper):
                 'tx_throughput_bps': float(port_stats.get('tx_bps', 0.0)),
                 'in_packets': int(port_stats.get('ipackets', 0)),
                 'out_packets': int(port_stats.get('opackets', 0)),
+                'timestamp': timestamp
             }
 
             pg_id_list = port_pg_id.get_pg_ids(port_num)
index 8ad5819..371e4ef 100644 (file)
@@ -46,6 +46,16 @@ XML_SAMPLE_INTERFACE = """<?xml version="1.0"?>
 
 class ModelLibvirtTestCase(unittest.TestCase):
 
+    XML_STR = model.VM_TEMPLATE.format(
+        vm_name="vm_name",
+        random_uuid=uuid.uuid4(),
+        mac_addr="00:01:02:03:04:05",
+        memory=2048, vcpu=2, cpu=2,
+        numa_cpus=0 - 10,
+        socket=1, threads=1,
+        vm_image="/var/lib/libvirt/images/yardstick-nsb-image.img",
+        cpuset=2 - 10, cputune='')
+
     def setUp(self):
         self.pci_address_str = '0001:04:03.2'
         self.pci_address = utils.PciAddress(self.pci_address_str)
@@ -66,34 +76,34 @@ class ModelLibvirtTestCase(unittest.TestCase):
             ssh_mock.execute = mock.Mock(return_value=(0, "a", ""))
             ssh.return_value = ssh_mock
         # NOTE(ralonsoh): this test doesn't cover function execution.
-        model.Libvirt.check_if_vm_exists_and_delete("vm_0", ssh_mock)
+        model.Libvirt.check_if_vm_exists_and_delete('vm-0', ssh_mock)
 
     def test_virsh_create_vm(self):
         self.mock_ssh.execute = mock.Mock(return_value=(0, 0, 0))
-        model.Libvirt.virsh_create_vm(self.mock_ssh, 'vm_0')
-        self.mock_ssh.execute.assert_called_once_with('virsh create vm_0')
+        model.Libvirt.virsh_create_vm(self.mock_ssh, 'vm-0')
+        self.mock_ssh.execute.assert_called_once_with('virsh create vm-0')
 
     def test_virsh_create_vm_error(self):
         self.mock_ssh.execute = mock.Mock(return_value=(1, 0, 'error_create'))
         with self.assertRaises(exceptions.LibvirtCreateError) as exc:
-            model.Libvirt.virsh_create_vm(self.mock_ssh, 'vm_0')
+            model.Libvirt.virsh_create_vm(self.mock_ssh, 'vm-0')
         self.assertEqual('Error creating the virtual machine. Error: '
                          'error_create.', str(exc.exception))
-        self.mock_ssh.execute.assert_called_once_with('virsh create vm_0')
+        self.mock_ssh.execute.assert_called_once_with('virsh create vm-0')
 
     def test_virsh_destroy_vm(self):
         self.mock_ssh.execute = mock.Mock(return_value=(0, 0, 0))
-        model.Libvirt.virsh_destroy_vm('vm_0', self.mock_ssh)
-        self.mock_ssh.execute.assert_called_once_with('virsh destroy vm_0')
+        model.Libvirt.virsh_destroy_vm('vm-0', self.mock_ssh)
+        self.mock_ssh.execute.assert_called_once_with('virsh destroy vm-0')
 
     @mock.patch.object(model, 'LOG')
     def test_virsh_destroy_vm_error(self, mock_logger):
         self.mock_ssh.execute = mock.Mock(return_value=(1, 0, 'error_destroy'))
         mock_logger.warning = mock.Mock()
-        model.Libvirt.virsh_destroy_vm('vm_0', self.mock_ssh)
+        model.Libvirt.virsh_destroy_vm('vm-0', self.mock_ssh)
         mock_logger.warning.assert_called_once_with(
-            'Error destroying VM %s. Error: %s', 'vm_0', 'error_destroy')
-        self.mock_ssh.execute.assert_called_once_with('virsh destroy vm_0')
+            'Error destroying VM %s. Error: %s', 'vm-0', 'error_destroy')
+        self.mock_ssh.execute.assert_called_once_with('virsh destroy vm-0')
 
     def test_add_interface_address(self):
         xml = ElementTree.ElementTree(
@@ -171,6 +181,56 @@ class ModelLibvirtTestCase(unittest.TestCase):
         self.assertEqual('0x' + vm_pci.split(':')[2].split('.')[1],
                          interface_address.get('function'))
 
+    def test_add_cdrom(self):
+        xml_input = copy.deepcopy(XML_SAMPLE)
+        xml_output = model.Libvirt.add_cdrom('/var/lib/libvirt/images/data.img', xml_input)
+
+        root = ElementTree.fromstring(xml_output)
+        et_out = ElementTree.ElementTree(element=root)
+        disk = et_out.find('devices').find('disk')
+        self.assertEqual('file', disk.get('type'))
+        self.assertEqual('cdrom', disk.get('device'))
+        driver = disk.find('driver')
+        self.assertEqual('qemu', driver.get('name'))
+        self.assertEqual('raw', driver.get('type'))
+        source = disk.find('source')
+        self.assertEqual('/var/lib/libvirt/images/data.img', source.get('file'))
+        target = disk.find('target')
+        self.assertEqual('hdb', target.get('dev'))
+        self.assertIsNotNone(disk.find('readonly'))
+
+    def test_gen_cdrom_image(self):
+        self.mock_ssh.execute = mock.Mock(return_value=(0, 0, 0))
+        root = ElementTree.fromstring(self.XML_STR)
+        hostname = root.find('name').text
+        meta_data = "/tmp/meta-data"
+        user_data = "/tmp/user-data"
+        file_path = "/tmp/cdrom-0.img"
+        key_filename = "id_rsa"
+        pub_key_str = "KEY"
+        user = 'root'
+        user_config = ["    - name: {user_name}",
+                       "      ssh_authorized_keys:",
+                       "        - {pub_key_str}"]
+
+        user_conf = os.linesep.join(user_config).format(pub_key_str=pub_key_str, user_name=user)
+        with mock.patch('six.moves.builtins.open', mock.mock_open(read_data=pub_key_str),
+                        create=True) as mock_file:
+            with open(key_filename, "r") as h:
+                result = h.read()
+            model.Libvirt.gen_cdrom_image(self.mock_ssh, file_path, hostname, user, key_filename)
+            mock_file.assert_called_with(".".join([key_filename, "pub"]), "r")
+        self.assertEqual(result, pub_key_str)
+
+        self.mock_ssh.execute.assert_has_calls([
+            mock.call("touch %s" % meta_data),
+            mock.call(model.USER_DATA_TEMPLATE.format(user_file=user_data, host=hostname,
+                                                      user_config=user_conf)),
+            mock.call("genisoimage -output {0} -volid cidata"
+                      " -joliet -r {1} {2}".format(file_path, meta_data, user_data)),
+            mock.call("rm {0} {1}".format(meta_data, user_data))
+        ])
+
     def test_create_snapshot_qemu(self):
         self.mock_ssh.execute = mock.Mock(return_value=(0, 0, 0))
         index = 1
@@ -211,6 +271,19 @@ class ModelLibvirtTestCase(unittest.TestCase):
         self.mock_ssh.put_file.assert_called_once_with(base_image,
                                                        '/tmp/base_image')
 
+    @mock.patch.object(model.Libvirt, 'gen_cdrom_image')
+    def test_check_update_key(self, mock_gen_cdrom_image):
+        node = {'user': 'defuser', 'key_filename': '/home/ubuntu/id_rsa'}
+        cdrom_img = "/var/lib/libvirt/images/data.img"
+        id_name = 'fake_name'
+        key_filename = node.get('key_filename')
+        root = ElementTree.fromstring(self.XML_STR)
+        hostname = root.find('name').text
+        model.StandaloneContextHelper.check_update_key(self.mock_ssh, node, hostname, id_name,
+                                                       cdrom_img)
+        mock_gen_cdrom_image.assert_called_once_with(self.mock_ssh, cdrom_img, hostname,
+                                                     node.get('user'), key_filename)
+
     @mock.patch.object(os, 'access', return_value=False)
     def test_create_snapshot_qemu_no_image_local(self, mock_os_access):
         self.mock_ssh.execute = mock.Mock(side_effect=[(0, 0, 0), (1, 0, 0)])
@@ -253,18 +326,20 @@ class ModelLibvirtTestCase(unittest.TestCase):
         mac = model.StandaloneContextHelper.get_mac_address(0x00)
         _uuid = uuid.uuid4()
         connection = mock.Mock()
+        cdrom_img = '/tmp/cdrom-0.img'
         with mock.patch.object(model.StandaloneContextHelper,
                                'get_mac_address', return_value=mac) as \
                 mock_get_mac_address, \
                 mock.patch.object(uuid, 'uuid4', return_value=_uuid):
             xml_out, mac = model.Libvirt.build_vm_xml(
-                connection, flavor, 'vm_name', 100)
+                connection, flavor, 'vm_name', 100, cdrom_img)
 
         xml_ref = model.VM_TEMPLATE.format(vm_name='vm_name',
             random_uuid=_uuid, mac_addr=mac, memory='1024', vcpu='8', cpu='4',
             numa_cpus='0-7', socket='3', threads='2',
             vm_image='qemu_image', cpuset='4,5', cputune='cool')
-        self.assertEqual(xml_ref, xml_out)
+        xml_ref = model.Libvirt.add_cdrom(cdrom_img, xml_ref)
+        self.assertEqual(xml_out, xml_ref)
         mock_get_mac_address.assert_called_once_with(0x00)
         mock_create_snapshot_qemu.assert_called_once_with(
             connection, 100, 'images')
@@ -296,6 +371,7 @@ class ModelLibvirtTestCase(unittest.TestCase):
         status = model.Libvirt.pin_vcpu_for_perf(ssh_mock, 4)
         self.assertIsNotNone(status)
 
+
 class StandaloneContextHelperTestCase(unittest.TestCase):
 
     NODE_SAMPLE = "nodes_sample.yaml"
@@ -463,7 +539,7 @@ class ServerTestCase(unittest.TestCase):
             }
         }
         status = self.server.generate_vnf_instance(
-            {}, self.NETWORKS, '1.1.1.1/24', 'vm_0', vnf, '00:00:00:00:00:01')
+            {}, self.NETWORKS, '1.1.1.1/24', 'vm-0', vnf, '00:00:00:00:00:01')
         self.assertIsNotNone(status)
 
 
index 69779d3..1a24075 100644 (file)
@@ -231,8 +231,8 @@ class OvsDpdkContextTestCase(unittest.TestCase):
     def test_undeploy(self, mock_libvirt):
         self.ovs_dpdk.vm_deploy = True
         self.ovs_dpdk.connection = mock.Mock()
-        self.ovs_dpdk.vm_names = ['vm_0', 'vm_1']
-        self.ovs_dpdk.drivers = ['vm_0', 'vm_1']
+        self.ovs_dpdk.vm_names = ['vm-0', 'vm-1']
+        self.ovs_dpdk.drivers = ['vm-0', 'vm-1']
         self.ovs_dpdk.cleanup_ovs_dpdk_env = mock.Mock()
         self.ovs_dpdk.networks = self.NETWORKS
         self.ovs_dpdk.undeploy()
@@ -370,7 +370,7 @@ class OvsDpdkContextTestCase(unittest.TestCase):
             ssh.return_value = ssh_mock
         self.ovs_dpdk.vm_deploy = True
         self.ovs_dpdk.connection = ssh_mock
-        self.ovs_dpdk.vm_names = ['vm_0', 'vm_1']
+        self.ovs_dpdk.vm_names = ['vm-0', 'vm-1']
         self.ovs_dpdk.drivers = []
         self.ovs_dpdk.networks = self.NETWORKS
         self.ovs_dpdk.helper.get_mac_address = mock.Mock(return_value="")
@@ -381,7 +381,7 @@ class OvsDpdkContextTestCase(unittest.TestCase):
     def test__enable_interfaces(self, mock_add_ovs_interface):
         self.ovs_dpdk.vm_deploy = True
         self.ovs_dpdk.connection = mock.Mock()
-        self.ovs_dpdk.vm_names = ['vm_0', 'vm_1']
+        self.ovs_dpdk.vm_names = ['vm-0', 'vm-1']
         self.ovs_dpdk.drivers = []
         self.ovs_dpdk.networks = self.NETWORKS
         self.ovs_dpdk.ovs_properties = {'vpath': 'fake_path'}
@@ -391,15 +391,16 @@ class OvsDpdkContextTestCase(unittest.TestCase):
             'fake_path', 0, self.NETWORKS['private_0']['vpci'],
             self.NETWORKS['private_0']['mac'], 'test')
 
+    @mock.patch.object(model.StandaloneContextHelper, 'check_update_key')
     @mock.patch.object(model.Libvirt, 'write_file')
     @mock.patch.object(model.Libvirt, 'build_vm_xml')
     @mock.patch.object(model.Libvirt, 'check_if_vm_exists_and_delete')
     @mock.patch.object(model.Libvirt, 'virsh_create_vm')
-    def test_setup_ovs_dpdk_context(self, mock_create_vm, mock_check_if_exists,
-                                    mock_build_xml, mock_write_file):
+    def test_setup_ovs_dpdk_context(self, mock_create_vm, mock_check_if_exists, mock_build_xml,
+                                    mock_write_file, mock_check_update_key):
         self.ovs_dpdk.vm_deploy = True
         self.ovs_dpdk.connection = mock.Mock()
-        self.ovs_dpdk.vm_names = ['vm_0', 'vm_1']
+        self.ovs_dpdk.vm_names = ['vm-0', 'vm-1']
         self.ovs_dpdk.drivers = []
         self.ovs_dpdk.servers = {
             'vnf_0': {
@@ -413,23 +414,32 @@ class OvsDpdkContextTestCase(unittest.TestCase):
         self.ovs_dpdk.networks = self.NETWORKS
         self.ovs_dpdk.host_mgmt = {}
         self.ovs_dpdk.flavor = {}
+        self.ovs_dpdk.file_path = '/var/lib/libvirt/images/cdrom-0.img'
         self.ovs_dpdk.configure_nics_for_ovs_dpdk = mock.Mock(return_value="")
-        xml_str = mock.Mock()
+        self.ovs_dpdk._name_task_id = 'fake_name'
+        xml_str = 'vm-0'
         mock_build_xml.return_value = (xml_str, '00:00:00:00:00:01')
         self.ovs_dpdk._enable_interfaces = mock.Mock(return_value=xml_str)
         vnf_instance = mock.Mock()
+        vnf_instance_2 = mock.Mock()
+        mock_check_update_key.return_value = vnf_instance_2
         self.ovs_dpdk.vnf_node.generate_vnf_instance = mock.Mock(
             return_value=vnf_instance)
 
-        self.assertEqual([vnf_instance],
+        self.assertEqual([vnf_instance_2],
                          self.ovs_dpdk.setup_ovs_dpdk_context())
         mock_create_vm.assert_called_once_with(
             self.ovs_dpdk.connection, '/tmp/vm_ovs_0.xml')
         mock_check_if_exists.assert_called_once_with(
-            'vm_0', self.ovs_dpdk.connection)
+            'vm-0', self.ovs_dpdk.connection)
         mock_build_xml.assert_called_once_with(
-            self.ovs_dpdk.connection, self.ovs_dpdk.vm_flavor, 'vm_0', 0)
+            self.ovs_dpdk.connection, self.ovs_dpdk.vm_flavor, 'vm-0', 0, self.ovs_dpdk.file_path)
         mock_write_file.assert_called_once_with('/tmp/vm_ovs_0.xml', xml_str)
+        mock_check_update_key.assert_called_once_with(self.ovs_dpdk.connection,
+                                                      vnf_instance,
+                                                      xml_str,
+                                                      self.ovs_dpdk._name_task_id,
+                                                      self.ovs_dpdk.file_path)
 
     @mock.patch.object(io, 'BytesIO')
     def test__check_hugepages(self, mock_bytesio):
index 74c3156..ae8e95f 100644 (file)
@@ -113,8 +113,8 @@ class SriovContextTestCase(unittest.TestCase):
 
         self.sriov.vm_deploy = True
         self.sriov.connection = mock_ssh
-        self.sriov.vm_names = ['vm_0', 'vm_1']
-        self.sriov.drivers = ['vm_0', 'vm_1']
+        self.sriov.vm_names = ['vm-0', 'vm-1']
+        self.sriov.drivers = ['vm-0', 'vm-1']
         self.assertIsNone(self.sriov.undeploy())
 
     def _get_file_abspath(self, filename):
@@ -254,7 +254,7 @@ class SriovContextTestCase(unittest.TestCase):
             ssh.return_value = ssh_mock
         self.sriov.vm_deploy = True
         self.sriov.connection = ssh_mock
-        self.sriov.vm_names = ['vm_0', 'vm_1']
+        self.sriov.vm_names = ['vm-0', 'vm-1']
         self.sriov.drivers = []
         self.sriov.networks = self.NETWORKS
         self.sriov.helper.get_mac_address = mock.Mock(return_value="")
@@ -267,7 +267,7 @@ class SriovContextTestCase(unittest.TestCase):
     def test__enable_interfaces(self, mock_add_sriov, mock_ssh):
         self.sriov.vm_deploy = True
         self.sriov.connection = mock_ssh
-        self.sriov.vm_names = ['vm_0', 'vm_1']
+        self.sriov.vm_names = ['vm-0', 'vm-1']
         self.sriov.drivers = []
         self.sriov.networks = self.NETWORKS
         self.assertEqual(
@@ -276,12 +276,13 @@ class SriovContextTestCase(unittest.TestCase):
         mock_add_sriov.assert_called_once_with(
             '0000:00:0a.0', 0, self.NETWORKS['private_0']['mac'], 'test')
 
+    @mock.patch.object(model.StandaloneContextHelper, 'check_update_key')
     @mock.patch.object(model.Libvirt, 'build_vm_xml')
     @mock.patch.object(model.Libvirt, 'check_if_vm_exists_and_delete')
     @mock.patch.object(model.Libvirt, 'write_file')
     @mock.patch.object(model.Libvirt, 'virsh_create_vm')
-    def test_setup_sriov_context(self, mock_create_vm, mock_write_file,
-                                 mock_check, mock_build_vm_xml):
+    def test_setup_sriov_context(self, mock_create_vm, mock_write_file, mock_check,
+                                 mock_build_vm_xml, mock_check_update_key):
         self.sriov.servers = {
             'vnf_0': {
                 'network_ports': {
@@ -297,24 +298,29 @@ class SriovContextTestCase(unittest.TestCase):
         self.sriov.vm_flavor = 'flavor'
         self.sriov.networks = 'networks'
         self.sriov.configure_nics_for_sriov = mock.Mock()
+        self.sriov._name_task_id = 'fake_name'
         cfg = '/tmp/vm_sriov_0.xml'
-        vm_name = 'vm_0'
+        vm_name = 'vm-0'
         xml_out = mock.Mock()
         mock_build_vm_xml.return_value = (xml_out, '00:00:00:00:00:01')
+        mock_check_update_key.return_value = 'node_2'
+        cdrom_img = '/var/lib/libvirt/images/cdrom-0.img'
 
         with mock.patch.object(self.sriov, 'vnf_node') as mock_vnf_node, \
                 mock.patch.object(self.sriov, '_enable_interfaces') as \
                 mock_enable_interfaces:
             mock_enable_interfaces.return_value = 'out_xml'
             mock_vnf_node.generate_vnf_instance = mock.Mock(
-                return_value='node')
+                return_value='node_1')
             nodes_out = self.sriov.setup_sriov_context()
-        self.assertEqual(['node'], nodes_out)
+        mock_check_update_key.assert_called_once_with(connection, 'node_1', vm_name,
+                                                      self.sriov._name_task_id, cdrom_img)
+        self.assertEqual(['node_2'], nodes_out)
         mock_vnf_node.generate_vnf_instance.assert_called_once_with(
             'flavor', 'networks', '1.2.3.4', 'vnf_0',
             self.sriov.servers['vnf_0'], '00:00:00:00:00:01')
         mock_build_vm_xml.assert_called_once_with(
-            connection, 'flavor', vm_name, 0)
+            connection, 'flavor', vm_name, 0, cdrom_img)
         mock_create_vm.assert_called_once_with(connection, cfg)
         mock_check.assert_called_once_with(vm_name, connection)
         mock_write_file.assert_called_once_with(cfg, 'out_xml')
@@ -332,7 +338,7 @@ class SriovContextTestCase(unittest.TestCase):
             ssh.return_value = ssh_mock
         self.sriov.vm_deploy = True
         self.sriov.connection = ssh_mock
-        self.sriov.vm_names = ['vm_0', 'vm_1']
+        self.sriov.vm_names = ['vm-0', 'vm-1']
         self.sriov.drivers = []
         self.sriov.servers = {
             'vnf_0': {
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_baseattacker.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_baseattacker.py
new file mode 100644 (file)
index 0000000..74f8698
--- /dev/null
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import unittest
+
+from yardstick.benchmark.scenarios.availability.attacker import baseattacker
+
+
+class BaseAttackerTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.attacker_cfg = {
+            'fault_type': 'test-attacker',
+            'action_parameter': {'process_name': 'nova_api'},
+            'rollback_parameter': {'process_name': 'nova_api'},
+            'key': 'stop-service',
+            'attack_key': 'stop-service',
+            'host': 'node1',
+        }
+        self.base_attacker = baseattacker.BaseAttacker({}, {})
+
+    def test__init__(self):
+        self.assertEqual(self.base_attacker.data, {})
+        self.assertFalse(self.base_attacker.mandatory)
+        self.assertEqual(self.base_attacker.intermediate_variables, {})
+        self.assertFalse(self.base_attacker.mandatory)
+
+    def test_get_attacker_cls(self):
+        with self.assertRaises(RuntimeError):
+            baseattacker.BaseAttacker.get_attacker_cls(self.attacker_cfg)
index ec0e597..d61fa67 100644 (file)
@@ -109,6 +109,23 @@ class ServicehaTestCase(unittest.TestCase):
         ret = {}
         p.run(ret)
         attacker = mock.Mock()
+        attacker.mandatory = False
         p.attackers = [attacker]
         p.teardown()
         attacker.recover.assert_not_called()
+
+    @mock.patch.object(serviceha, 'baseattacker')
+    @mock.patch.object(serviceha, 'basemonitor')
+    def test__serviceha_teardown_when_mandatory(self, mock_monitor,
+                                                *args):
+        p = serviceha.ServiceHA(self.args, self.ctx)
+        p.setup()
+        self.assertTrue(p.setup_done)
+        mock_monitor.MonitorMgr().verify_SLA.return_value = True
+        ret = {}
+        p.run(ret)
+        attacker = mock.Mock()
+        attacker.mandatory = True
+        p.attackers = [attacker]
+        p.teardown()
+        attacker.recover.assert_called_once()
index ef41421..3cf6c4d 100644 (file)
@@ -1391,3 +1391,19 @@ class GetPortIPTestCase(unittest.TestCase):
 
     def test_return_value(self):
         self.assertEqual('foo', utils.get_port_ip(self.ssh_client, 99))
+
+
+class SafeCaseTestCase(unittest.TestCase):
+
+    def test_correct_type_int(self):
+        self.assertEqual(35, utils.safe_cast('35', int, 0))
+
+    def test_correct_int_as_string(self):
+        self.assertEqual(25, utils.safe_cast('25', 'int', 0))
+
+    def test_incorrect_type_as_string(self):
+        with self.assertRaises(exceptions.InvalidType):
+            utils.safe_cast('100', 'intt', 0)
+
+    def test_default_value(self):
+        self.assertEqual(0, utils.safe_cast('', 'int', 0))
index 27ab460..0759ece 100644 (file)
@@ -575,9 +575,15 @@ class TestIXIARFC2544Profile(unittest.TestCase):
 
     def test_get_drop_percentage_completed(self):
         samples = {'iface_name_1':
-                       {'in_packets': 1000, 'out_packets': 1000},
+                       {'in_packets': 1000, 'out_packets': 1000,
+                        'Store-Forward_Avg_latency_ns': 20,
+                        'Store-Forward_Min_latency_ns': 15,
+                        'Store-Forward_Max_latency_ns': 25},
                    'iface_name_2':
-                       {'in_packets': 1005, 'out_packets': 1007}
+                       {'in_packets': 1005, 'out_packets': 1007,
+                        'Store-Forward_Avg_latency_ns': 23,
+                        'Store-Forward_Min_latency_ns': 13,
+                        'Store-Forward_Max_latency_ns': 28}
                    }
         rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
         completed, samples = rfc2544_profile.get_drop_percentage(samples, 0, 1)
@@ -585,12 +591,21 @@ class TestIXIARFC2544Profile(unittest.TestCase):
         self.assertEqual(66.9, samples['TxThroughput'])
         self.assertEqual(66.833, samples['RxThroughput'])
         self.assertEqual(0.099651, samples['DropPercentage'])
+        self.assertEqual(21.5, samples['latency_ns_avg'])
+        self.assertEqual(14.0, samples['latency_ns_min'])
+        self.assertEqual(26.5, samples['latency_ns_max'])
 
     def test_get_drop_percentage_over_drop_percentage(self):
         samples = {'iface_name_1':
-                       {'in_packets': 1000, 'out_packets': 1000},
+                       {'in_packets': 1000, 'out_packets': 1000,
+                        'Store-Forward_Avg_latency_ns': 20,
+                        'Store-Forward_Min_latency_ns': 15,
+                        'Store-Forward_Max_latency_ns': 25},
                    'iface_name_2':
-                       {'in_packets': 1005, 'out_packets': 1007}
+                       {'in_packets': 1005, 'out_packets': 1007,
+                        'Store-Forward_Avg_latency_ns': 20,
+                        'Store-Forward_Min_latency_ns': 15,
+                        'Store-Forward_Max_latency_ns': 25}
                    }
         rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
         rfc2544_profile.rate = 1000
@@ -604,9 +619,15 @@ class TestIXIARFC2544Profile(unittest.TestCase):
 
     def test_get_drop_percentage_under_drop_percentage(self):
         samples = {'iface_name_1':
-                       {'in_packets': 1000, 'out_packets': 1000},
+                       {'in_packets': 1000, 'out_packets': 1000,
+                        'Store-Forward_Avg_latency_ns': 20,
+                        'Store-Forward_Min_latency_ns': 15,
+                        'Store-Forward_Max_latency_ns': 25},
                    'iface_name_2':
-                       {'in_packets': 1005, 'out_packets': 1007}
+                       {'in_packets': 1005, 'out_packets': 1007,
+                        'Store-Forward_Avg_latency_ns': 20,
+                        'Store-Forward_Min_latency_ns': 15,
+                        'Store-Forward_Max_latency_ns': 25}
                    }
         rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
         rfc2544_profile.rate = 1000
@@ -621,9 +642,15 @@ class TestIXIARFC2544Profile(unittest.TestCase):
     @mock.patch.object(ixia_rfc2544.LOG, 'info')
     def test_get_drop_percentage_not_flow(self, *args):
         samples = {'iface_name_1':
-                       {'in_packets': 1000, 'out_packets': 0},
+                       {'in_packets': 1000, 'out_packets': 0,
+                        'Store-Forward_Avg_latency_ns': 20,
+                        'Store-Forward_Min_latency_ns': 15,
+                        'Store-Forward_Max_latency_ns': 25},
                    'iface_name_2':
-                       {'in_packets': 1005, 'out_packets': 0}
+                       {'in_packets': 1005, 'out_packets': 0,
+                        'Store-Forward_Avg_latency_ns': 20,
+                        'Store-Forward_Min_latency_ns': 15,
+                        'Store-Forward_Max_latency_ns': 25}
                    }
         rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
         rfc2544_profile.rate = 1000
@@ -637,9 +664,15 @@ class TestIXIARFC2544Profile(unittest.TestCase):
 
     def test_get_drop_percentage_first_run(self):
         samples = {'iface_name_1':
-                       {'in_packets': 1000, 'out_packets': 1000},
+                       {'in_packets': 1000, 'out_packets': 1000,
+                        'Store-Forward_Avg_latency_ns': 20,
+                        'Store-Forward_Min_latency_ns': 15,
+                        'Store-Forward_Max_latency_ns': 25},
                    'iface_name_2':
-                       {'in_packets': 1005, 'out_packets': 1007}
+                       {'in_packets': 1005, 'out_packets': 1007,
+                        'Store-Forward_Avg_latency_ns': 20,
+                        'Store-Forward_Min_latency_ns': 15,
+                        'Store-Forward_Max_latency_ns': 25}
                    }
         rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
         completed, samples = rfc2544_profile.get_drop_percentage(
index 2e0331e..cfeebaa 100644 (file)
@@ -12,8 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import mock
+import datetime
 
+import mock
 from trex_stl_lib import api as Pkt
 from trex_stl_lib import trex_stl_client
 from trex_stl_lib import trex_stl_packet_builder_scapy
@@ -102,10 +103,10 @@ class TestRFC2544Profile(base.BaseUnitTestCase):
                 mock_create_profile:
             rfc2544_profile.execute_traffic(traffic_generator=mock_generator)
         mock_create_profile.assert_has_calls([
-            mock.call('profile1', rfc2544_profile.rate, mock.ANY),
-            mock.call('profile1', rfc2544_profile.rate, mock.ANY),
-            mock.call('profile2', rfc2544_profile.rate, mock.ANY),
-            mock.call('profile2', rfc2544_profile.rate, mock.ANY)])
+            mock.call('profile1', rfc2544_profile.rate, mock.ANY, False),
+            mock.call('profile1', rfc2544_profile.rate, mock.ANY, False),
+            mock.call('profile2', rfc2544_profile.rate, mock.ANY, False),
+            mock.call('profile2', rfc2544_profile.rate, mock.ANY, False)])
         mock_generator.client.add_streams.assert_has_calls([
             mock.call(mock.ANY, ports=[10]),
             mock.call(mock.ANY, ports=[20]),
@@ -129,13 +130,14 @@ class TestRFC2544Profile(base.BaseUnitTestCase):
                 mock_create_streams:
             mock_create_imix.return_value = 'imix_data'
             mock_create_streams.return_value = ['stream1']
-            rfc2544_profile._create_profile(profile_data, rate, port_pg_id)
+            rfc2544_profile._create_profile(profile_data, rate, port_pg_id,
+                                            True)
 
         mock_create_imix.assert_called_once_with('imix_info')
         mock_create_vm.assert_called_once_with(
             {'outer_l2': {'framesize': 'imix_info'}})
         mock_create_streams.assert_called_once_with('imix_data', 100,
-                                                    port_pg_id)
+                                                    port_pg_id, True)
         mock_stl_profile.assert_called_once_with(['stream1'])
 
     def test__create_imix_data(self):
@@ -208,7 +210,7 @@ class TestRFC2544Profile(base.BaseUnitTestCase):
         rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
         with mock.patch.object(rfc2544_profile, '_create_single_packet'):
             output = rfc2544_profile._create_streams(imix_data, rate,
-                                                     port_pg_id)
+                                                     port_pg_id, True)
         self.assertEqual(['stream1', 'stream2'], output)
         mock_latency.assert_has_calls([
             mock.call(pg_id=1), mock.call(pg_id=2)])
@@ -219,34 +221,38 @@ class TestRFC2544Profile(base.BaseUnitTestCase):
     def test_get_drop_percentage(self):
         rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
         samples = [
-            {'xe1': {'tx_throughput_fps': 100,
+            {'xe1': {'tx_throughput_fps': 110,
                      'rx_throughput_fps': 101,
-                     'out_packets': 2000,
-                     'in_packets': 2010},
-             'xe2': {'tx_throughput_fps': 200,
+                     'out_packets': 2100,
+                     'in_packets': 2010,
+                     'timestamp': datetime.datetime(2000, 1, 1, 1, 1, 1, 1)},
+             'xe2': {'tx_throughput_fps': 210,
                      'rx_throughput_fps': 201,
-                     'out_packets': 4000,
-                     'in_packets': 4010}},
-            {'xe1': {'tx_throughput_fps': 106,
+                     'out_packets': 4100,
+                     'in_packets': 4010,
+                     'timestamp': datetime.datetime(2000, 1, 1, 1, 1, 1, 1)}},
+            {'xe1': {'tx_throughput_fps': 156,
                      'rx_throughput_fps': 108,
-                     'out_packets': 2031,
+                     'out_packets': 2110,
                      'in_packets': 2040,
-                     'latency': 'Latency1'},
-             'xe2': {'tx_throughput_fps': 203,
+                     'latency': 'Latency1',
+                     'timestamp': datetime.datetime(2000, 1, 1, 1, 1, 1, 31)},
+             'xe2': {'tx_throughput_fps': 253,
                      'rx_throughput_fps': 215,
-                     'out_packets': 4025,
-                     'in_packets': 4040,
-                     'latency': 'Latency2'}}
+                     'out_packets': 4150,
+                     'in_packets': 4010,
+                     'latency': 'Latency2',
+                     'timestamp': datetime.datetime(2000, 1, 1, 1, 1, 1, 31)}}
         ]
         completed, output = rfc2544_profile.get_drop_percentage(
             samples, 0, 0, False)
-        expected = {'DropPercentage': 0.3963,
+        expected = {'DropPercentage': 50.0,
                     'Latency': {'xe1': 'Latency1', 'xe2': 'Latency2'},
-                    'RxThroughput': 312.5,
-                    'TxThroughput': 304.5,
-                    'CurrentDropPercentage': 0.3963,
+                    'RxThroughput': 1000000.0,
+                    'TxThroughput': 2000000.0,
+                    'CurrentDropPercentage': 50.0,
                     'Rate': 100.0,
-                    'Throughput': 312.5}
+                    'Throughput': 1000000.0}
         self.assertEqual(expected, output)
         self.assertFalse(completed)
 
index 5ad182f..a7e61da 100644 (file)
@@ -317,6 +317,7 @@ class TestProxTrafficGen(unittest.TestCase):
         prox_traffic_gen = ProxTrafficGen(NAME, self.VNFD0, 'task_id')
         self.assertIsNone(prox_traffic_gen._tg_process)
         self.assertIsNone(prox_traffic_gen._traffic_process)
+        self.assertIsNone(prox_traffic_gen._mq_producer)
 
     @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node')
     @mock.patch(SSH_HELPER)