Merge "DRAFT: update yardstick generic test cases dashboard"
authorRoss Brattain <ross.b.brattain@intel.com>
Mon, 28 Aug 2017 09:19:28 +0000 (09:19 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Mon, 28 Aug 2017 09:19:28 +0000 (09:19 +0000)
226 files changed:
ansible/build_yardstick_image.yml
ansible/install_dependencies.yml [moved from samples/vnf_samples/traffic_profiles/ipv4_1flow_Packets.yaml with 80% similarity]
ansible/library/find_kernel.py [new file with mode: 0644]
ansible/post_build_yardstick_image.yml
ansible/roles/download_dpdk/tasks/main.yml
ansible/roles/download_prox/defaults/main.yml [deleted file]
ansible/roles/download_prox/tasks/main.yml [deleted file]
ansible/roles/download_samplevnfs/defaults/main.yml
ansible/roles/download_samplevnfs/tasks/main.yml
ansible/roles/download_trex/defaults/main.yml
ansible/roles/download_trex/tasks/main.yml
ansible/roles/enable_hugepages_on_boot/defaults/main.yml [new file with mode: 0644]
ansible/roles/enable_hugepages_on_boot/tasks/main.yml
ansible/roles/install_dependencies/tasks/Debian.yml
ansible/roles/install_dependencies/tasks/RedHat.yml
ansible/roles/install_dpdk/defaults/main.yml [new file with mode: 0644]
ansible/roles/install_dpdk/tasks/Debian.yml
ansible/roles/install_dpdk/tasks/RedHat.yml
ansible/roles/install_dpdk/tasks/main.yml
ansible/roles/install_dpdk/vars/main.yml
ansible/roles/install_prox/tasks/Debian.yml [deleted file]
ansible/roles/install_prox/tasks/RedHat.yml [deleted file]
ansible/roles/install_samplevnf/tasks/main.yml [new file with mode: 0644]
ansible/roles/install_samplevnf/vars/main.yml [moved from ansible/roles/install_prox/tasks/main.yml with 55% similarity]
ansible/roles/install_trex/defaults/main.yml
ansible/roles/install_trex/tasks/main.yml
ansible/roles/install_vnf_vACL/tasks/main.yml [deleted file]
ansible/roles/install_vnf_vACL/vars/main.yml [deleted file]
ansible/roles/install_vnf_vCGNAPT/tasks/main.yml [deleted file]
ansible/roles/install_vnf_vCGNAPT/vars/main.yml [deleted file]
ansible/roles/install_vnf_vFW/tasks/main.yml [deleted file]
ansible/roles/install_vnf_vFW/vars/main.yml [deleted file]
ansible/roles/install_vnf_vPE/tasks/main.yml [deleted file]
ansible/roles/install_vnf_vPE/vars/main.yml [deleted file]
ansible/roles/reset_resolv_conf/tasks/main.yml
ansible/ubuntu_server_baremetal_deploy_samplevnfs.yml [moved from ansible/ubuntu_server_cloudimg_modify_vpe.yml with 65% similarity]
ansible/ubuntu_server_cloudimg_modify.yml
ansible/ubuntu_server_cloudimg_modify_cgnapt.yml [deleted file]
ansible/ubuntu_server_cloudimg_modify_dpdk.yml
ansible/ubuntu_server_cloudimg_modify_samplevnfs.yml [moved from ansible/ubuntu_server_cloudimg_modify_acl.yml with 61% similarity]
ansible/ubuntu_server_cloudimg_modify_vfw.yml [deleted file]
api/database/v2/handlers.py
api/database/v2/models.py
api/resources/v2/environments.py
api/resources/v2/images.py
api/server.py
api/urls.py
docker/nginx.sh
docs/testing/user/userguide/14-nsb_installation.rst
docs/testing/user/userguide/opnfv_yardstick_tc006.rst [new file with mode: 0644]
docs/testing/user/userguide/opnfv_yardstick_tc056.rst [new file with mode: 0644]
docs/testing/user/userguide/opnfv_yardstick_tc057.rst [new file with mode: 0644]
docs/testing/user/userguide/opnfv_yardstick_tc058.rst [new file with mode: 0644]
etc/yardstick/yardstick.conf.sample
gui/app/scripts/controllers/image.controller.js
gui/app/scripts/controllers/main.js
gui/app/scripts/controllers/projectDetail.controller.js
gui/app/scripts/factory/main.factory.js
gui/app/views/modal/environmentDialog.html
gui/app/views/modal/imageDialog.html [new file with mode: 0644]
gui/app/views/podupload.html
gui/app/views/uploadImage.html
nsb_setup.sh
samples/vnf_samples/nsut/acl/tc_baremetal_http_ixload_1b_Requests-65000_Concurrency.yaml
samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia.yaml
samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_packetsize.yaml
samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_corelated_traffic.yaml
samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_scale_up.yaml
samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_worstcaserules_1flow_64B_packetsize.yaml [deleted file]
samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_worstcaserules_1flow_64B_trex.yaml
samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_with_latency_ipv4_1rule_1flow_64B_trex.yaml
samples/vnf_samples/nsut/acl/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex.yaml [moved from samples/vnf_samples/nsut/acl/tc_heat_rfc2544_ipv4_1rule_1flow_64B_packetsize.yaml with 76% similarity]
samples/vnf_samples/nsut/acl/tc_heat_trex_external_rfc2544_ipv4_1rule_1flow_64B_packetsize.yaml
samples/vnf_samples/nsut/cgnapt/tc_baremetal_http_ixload_1b_Requests-65000_Concurrency.yaml
samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_ixia.yaml
samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_trex.yaml
samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_trex_corelated_traffic.yaml
samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_ipv4_1flow_64B_trex_scale_up.yaml
samples/vnf_samples/nsut/cgnapt/tc_baremetal_rfc2544_with_latency_ipv4_1flow_64B_trex.yaml
samples/vnf_samples/nsut/cgnapt/tc_heat_external_rfc2544_ipv4_1flow_64B_trex.yaml [new file with mode: 0644]
samples/vnf_samples/nsut/cgnapt/tc_heat_rfc2544_ipv4_1flow_64B_trex.yaml [new file with mode: 0644]
samples/vnf_samples/nsut/udp_replay/tc_baremetal_rfc2544_ipv4_1flow_64B_trex.yaml
samples/vnf_samples/nsut/vfw/acl_1rule.yaml
samples/vnf_samples/nsut/vfw/tc_baremetal_http_ixload_1b_Requests-65000_Concurrency.yaml
samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia.yaml
samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_corelated_traffic.yaml
samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_scale_up.yaml
samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_with_latency_ipv4_1rule_1flow_64B_trex.yaml
samples/vnf_samples/nsut/vfw/tc_heat_external_rfc2544_ipv4_1rule_1flow_64B_trex.yaml [new file with mode: 0644]
samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex.yaml [new file with mode: 0644]
samples/vnf_samples/nsut/vpe/tc_baremetal_http_ipv4_ixload.yaml
samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_1518B.yaml
samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_64B.yaml
samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_64B_ixia.yaml
samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_64B_trex_corelated_traffic.yaml
samples/vnf_samples/nsut/vpe/tc_baremetal_rfc2544_ipv4_1flow_IMIX.yaml
samples/vnf_samples/traffic_profiles/imix_storage.yaml [deleted file]
samples/vnf_samples/traffic_profiles/imix_video.yaml [deleted file]
samples/vnf_samples/traffic_profiles/imix_voice.yaml [deleted file]
samples/vnf_samples/traffic_profiles/ipv4_1flow_Packets_vpe.yaml [deleted file]
samples/vnf_samples/traffic_profiles/ipv4_throughput.yaml
samples/vnf_samples/traffic_profiles/ipv4_throughput_cgnapt.yaml
samples/vnf_samples/traffic_profiles/ipv4_throughput_vpe.yaml
samples/vnf_samples/traffic_profiles/ixia_ipv4_latency.yaml
samples/vnf_samples/traffic_profiles/ixia_ipv4_latency_vpe.yaml
tests/ci/apexlake-verify [deleted file]
tests/opnfv/test_cases/opnfv_yardstick_tc002.yaml
tests/opnfv/test_cases/opnfv_yardstick_tc006.yaml [new file with mode: 0644]
tests/opnfv/test_cases/opnfv_yardstick_tc056.yaml [new file with mode: 0644]
tests/opnfv/test_cases/opnfv_yardstick_tc057.yaml [new file with mode: 0644]
tests/opnfv/test_cases/opnfv_yardstick_tc058.yaml [new file with mode: 0644]
tests/opnfv/test_cases/opnfv_yardstick_tc078.yaml [new file with mode: 0644]
tests/opnfv/test_cases/opnfv_yardstick_tc079.yaml [new file with mode: 0644]
tests/opnfv/test_cases/opnfv_yardstick_tc080.yaml [moved from samples/ping_k8s.yaml with 100% similarity]
tests/opnfv/test_cases/opnfv_yardstick_tc081.yaml [moved from samples/container_ping_vm.yaml with 94% similarity]
tests/opnfv/test_suites/opnfv_k8-nosdn-lb-noha_daily.yaml [new file with mode: 0644]
tests/unit/__init__.py
tests/unit/benchmark/contexts/standalone/__init__.py [new file with mode: 0644]
tests/unit/benchmark/contexts/standalone/ovs_sample_password.yaml [moved from tests/unit/benchmark/contexts/ovs_sample_password.yaml with 100% similarity]
tests/unit/benchmark/contexts/standalone/ovs_sample_ssh_key.yaml [moved from tests/unit/benchmark/contexts/ovs_sample_ssh_key.yaml with 100% similarity]
tests/unit/benchmark/contexts/standalone/ovs_sample_write_to_file.txt [moved from tests/unit/benchmark/contexts/ovs_sample_write_to_file.txt with 100% similarity]
tests/unit/benchmark/contexts/standalone/sriov_sample_password.yaml [moved from tests/unit/benchmark/contexts/sriov_sample_password.yaml with 100% similarity]
tests/unit/benchmark/contexts/standalone/sriov_sample_ssh_key.yaml [moved from tests/unit/benchmark/contexts/sriov_sample_ssh_key.yaml with 100% similarity]
tests/unit/benchmark/contexts/standalone/sriov_sample_write_to_file.txt [moved from tests/unit/benchmark/contexts/sriov_sample_write_to_file.txt with 100% similarity]
tests/unit/benchmark/contexts/standalone/test_ovsdpdk.py [moved from tests/unit/benchmark/contexts/test_ovsdpdk.py with 98% similarity]
tests/unit/benchmark/contexts/standalone/test_sriov.py [moved from tests/unit/benchmark/contexts/test_sriov.py with 97% similarity]
tests/unit/benchmark/contexts/test_standalone.py
tests/unit/benchmark/runner/test_search.py
tests/unit/benchmark/scenarios/availability/test_scenario_general.py
tests/unit/benchmark/scenarios/lib/test_attach_volume.py [new file with mode: 0644]
tests/unit/benchmark/scenarios/lib/test_create_floating_ip.py [new file with mode: 0644]
tests/unit/benchmark/scenarios/lib/test_create_keypair.py [new file with mode: 0644]
tests/unit/benchmark/scenarios/lib/test_create_network.py [new file with mode: 0644]
tests/unit/benchmark/scenarios/lib/test_create_port.py [new file with mode: 0644]
tests/unit/benchmark/scenarios/lib/test_create_router.py [new file with mode: 0644]
tests/unit/benchmark/scenarios/lib/test_create_sec_group.py [new file with mode: 0644]
tests/unit/benchmark/scenarios/lib/test_create_subnet.py [new file with mode: 0644]
tests/unit/benchmark/scenarios/lib/test_delete_floating_ip.py [new file with mode: 0644]
tests/unit/benchmark/scenarios/lib/test_delete_keypair.py [new file with mode: 0644]
tests/unit/benchmark/scenarios/lib/test_delete_volume.py [new file with mode: 0644]
tests/unit/benchmark/scenarios/lib/test_detach_volume.py [new file with mode: 0644]
tests/unit/benchmark/scenarios/networking/test_pktgen.py
tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
tests/unit/benchmark/scenarios/storage/test_fio.py
tests/unit/common/test_utils.py
tests/unit/network_services/helpers/test_samplevnf_helper.py
tests/unit/network_services/nfvi/test_resource.py
tests/unit/network_services/traffic_profile/test_fixed.py
tests/unit/network_services/traffic_profile/test_http_ixload.py
tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
tests/unit/network_services/traffic_profile/test_prox_acl.py
tests/unit/network_services/traffic_profile/test_prox_binsearch.py
tests/unit/network_services/traffic_profile/test_prox_profile.py
tests/unit/network_services/traffic_profile/test_prox_ramp.py
tests/unit/network_services/traffic_profile/test_rfc2544.py
tests/unit/network_services/traffic_profile/test_traffic_profile.py
tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py
tests/unit/network_services/vnf_generic/vnf/test_iniparser.py
tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py
tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py
tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py
tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py
tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py
tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py
tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py
tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py
tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
yardstick/benchmark/contexts/heat.py
yardstick/benchmark/contexts/standalone/__init__.py [moved from yardstick/benchmark/contexts/standalone.py with 100% similarity]
yardstick/benchmark/contexts/standalone/ovsdpdk.py [moved from yardstick/benchmark/contexts/ovsdpdk.py with 100% similarity]
yardstick/benchmark/contexts/standalone/sriov.py [moved from yardstick/benchmark/contexts/sriov.py with 100% similarity]
yardstick/benchmark/scenarios/availability/attacker_conf.yaml
yardstick/benchmark/scenarios/availability/ha_tools/node/reboot_node.bash [new file with mode: 0644]
yardstick/benchmark/scenarios/availability/ha_tools/nova/get_server_floatingip.bash [new file with mode: 0644]
yardstick/benchmark/scenarios/availability/ha_tools/nova/list_servers.bash [new file with mode: 0644]
yardstick/benchmark/scenarios/availability/ha_tools/pacemaker/get_resource_status.bash [new file with mode: 0644]
yardstick/benchmark/scenarios/availability/ha_tools/pacemaker/get_resource_status_host.bash [new file with mode: 0644]
yardstick/benchmark/scenarios/availability/ha_tools/pacemaker/get_vip_host.bash [new file with mode: 0644]
yardstick/benchmark/scenarios/availability/operation_conf.yaml
yardstick/benchmark/scenarios/availability/result_checker_conf.yaml
yardstick/benchmark/scenarios/availability/scenario_general.py
yardstick/benchmark/scenarios/availability/serviceha.py
yardstick/benchmark/scenarios/availability/util.py
yardstick/benchmark/scenarios/compute/computecapacity.bash
yardstick/benchmark/scenarios/compute/qemu_migrate.py
yardstick/benchmark/scenarios/lib/attach_volume.py [new file with mode: 0644]
yardstick/benchmark/scenarios/lib/create_floating_ip.py [new file with mode: 0644]
yardstick/benchmark/scenarios/lib/create_keypair.py [new file with mode: 0644]
yardstick/benchmark/scenarios/lib/create_network.py [new file with mode: 0644]
yardstick/benchmark/scenarios/lib/create_port.py [new file with mode: 0644]
yardstick/benchmark/scenarios/lib/create_router.py [new file with mode: 0644]
yardstick/benchmark/scenarios/lib/create_sec_group.py [new file with mode: 0644]
yardstick/benchmark/scenarios/lib/create_server.py
yardstick/benchmark/scenarios/lib/create_subnet.py [new file with mode: 0644]
yardstick/benchmark/scenarios/lib/delete_floating_ip.py [new file with mode: 0644]
yardstick/benchmark/scenarios/lib/delete_keypair.py [new file with mode: 0644]
yardstick/benchmark/scenarios/lib/delete_volume.py [new file with mode: 0644]
yardstick/benchmark/scenarios/lib/detach_volume.py [new file with mode: 0644]
yardstick/benchmark/scenarios/networking/pktgen.py
yardstick/benchmark/scenarios/networking/vnf_generic.py
yardstick/benchmark/scenarios/storage/fio.py
yardstick/benchmark/scenarios/storage/storagecapacity.bash
yardstick/common/constants.py
yardstick/common/openstack_utils.py
yardstick/common/utils.py
yardstick/network_services/helpers/cpu.py
yardstick/network_services/helpers/samplevnf_helper.py
yardstick/network_services/nfvi/resource.py
yardstick/network_services/traffic_profile/fixed.py
yardstick/network_services/traffic_profile/http_ixload.py
yardstick/network_services/traffic_profile/rfc2544.py
yardstick/network_services/traffic_profile/traffic_profile.py
yardstick/network_services/utils.py
yardstick/network_services/vnf_generic/vnf/sample_vnf.py
yardstick/network_services/vnf_generic/vnf/tg_ixload.py
yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
yardstick/orchestrator/heat.py
yardstick/ssh.py

index 9a65d3a..025573b 100644 (file)
     sha256sums_filename: "{{ sha256sums_path|basename }}"
     sha256sums_url: "{{ lookup('env', 'SHA256SUMS_URL')|default('https://' ~ host ~ '/' ~ sha256sums_path, true) }}"
 
-    mountdir: "{{ lookup('env', 'mountdir')|default('/mnt/yardstick', true) }}"
     workspace: "{{ lookup('env', 'workspace')|default('/tmp/workspace/yardstick', true) }}"
     imgfile: "{{ workspace }}/yardstick-image.img"
     raw_imgfile_basename: "yardstick-{{ release }}-server.raw"
-    raw_imgfile: "{{ workspace }}/{{ raw_imgfile_basename }}"
   environment:
     PATH: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/bin
 
 
     - package: name=parted state=present
 
+    - set_fact:
+        mountdir: "{{ lookup('env', 'mountdir')|default('/mnt/yardstick', true) }}"
+
+    - set_fact:
+        raw_imgfile: "{{ workspace }}/{{ raw_imgfile_basename }}"
+
   # cleanup non-lxd
     - name: unmount all old mount points
       mount:
         ansible_python_interpreter: /usr/bin/python3
         # set this host variable here
         nameserver_ip: "{{ ansible_dns.nameservers[0] }}"
+        image_type: vm
 
 - name: include {{ img_modify_playbook }}
   include: "{{ img_modify_playbook }}"
 
 - name: run post build tasks
   include: post_build_yardstick_image.yml
+
+- hosts: localhost
+
+  tasks:
+    - debug:
+        msg: "yardstick image = {{ raw_imgfile }}"
@@ -1,4 +1,4 @@
-# Copyright (c) 2016-2017 Intel Corporation
+# Copyright (c) 2017 Intel Corporation.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+---
+- name: install yardstick dependencies
+  hosts: all
+
+  roles:
+    - install_dependencies
 
-flow:
-    srcip4_range: '152.16.100.20'
-    dstip4_range: '152.40.40.20'
-    count: 1
diff --git a/ansible/library/find_kernel.py b/ansible/library/find_kernel.py
new file mode 100644 (file)
index 0000000..4623bce
--- /dev/null
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+DOCUMENTATION = '''
+---
+module: find_kernel
+short_description: Look for the system kernel on the filesystem
+description:
+    - We need to find the kernel on non-booted systems, disk images, chroots, etc.
+    To do this we check /lib/modules and look for the kernel that matches the running
+    kernle, or failing that we look for the highest-numbered kernel
+options:
+  kernel: starting kernel to check
+  module_dir: Override kernel module dir, default /lib/modules
+'''
+
+LIB_MODULES = "/lib/modules"
+
+
+def try_int(s, *args):
+    """Convert to integer if possible."""
+    try:
+        return int(s)
+    except (TypeError, ValueError):
+        return args[0] if args else s
+
+
+def convert_ints(fields, orig):
+    return tuple((try_int(f) for f in fields)), orig
+
+
+def main():
+    module = AnsibleModule(
+        argument_spec={
+            'kernel': {'required': True, 'type': 'str'},
+            'module_dir': {'required': False, 'type': 'str', 'default': LIB_MODULES},
+        }
+    )
+    params = module.params
+    kernel = params['kernel']
+    module_dir = params['module_dir']
+
+    if os.path.isdir(os.path.join(module_dir, kernel)):
+        module.exit_json(changed=False, kernel=kernel)
+
+    kernel_dirs = os.listdir(module_dir)
+    kernels = sorted((convert_ints(re.split('[-.]', k), k) for k in kernel_dirs), reverse=True)
+    try:
+        newest_kernel = kernels[0][-1]
+    except IndexError:
+        module.fail_json(msg="Unable to find kernels in {}".format(module_dir))
+
+    if os.path.isdir(os.path.join(module_dir, newest_kernel)):
+        module.exit_json(changed=False, kernel=newest_kernel)
+    else:
+        return kernel
+
+    module.fail_json(msg="Unable to kernel other than {}".format(kernel))
+
+
+# <<INCLUDE_ANSIBLE_MODULE_COMMON>>
+from ansible.module_utils.basic import *  # noqa
+
+if __name__ == '__main__':
+    main()
+
+"""
+
+get kernel from uname,  ansible_kernel
+look for that kernel in /lib/modules
+if that kernel doens't exist
+sort lib/modules
+use latest
+
+parse grub
+
+
+
+"""
index b0c4187..d1f2a73 100644 (file)
@@ -40,5 +40,3 @@
     - name: kpartx -dv to delete all image partition device nodes
       command: kpartx -dv "{{ raw_imgfile }}"
       ignore_errors: true
-
-    - command: losetup -d "{{ loop_device }}"
\ No newline at end of file
index 322f3cd..bcb5dde 100644 (file)
     var: dpdk_version
     verbosity: 2
 
+- file:
+    path: "{{ dpdk_dest }}"
+    state: directory
+
 - name: fetch dpdk
   get_url:
     url: "{{ dpdk_url }}"
     checksum: "{{ dpdk_sha256s[dpdk_version] }}"
 
 - unarchive:
-    src: "{{ clone_dest }}/{{ dpdk_file }}"
-    dest: "{{ clone_dest }}/"
+    src: "{{ dpdk_dest }}/{{ dpdk_file }}"
+    dest: "{{ dpdk_dest }}/"
     copy: no
 
+- name: cleanup tar file to save space
+  file:
+      path: "{{ dpdk_dest }}/{{ dpdk_file }}"
+      state: absent
+
 - set_fact:
-    dpdk_path: "{{ clone_dest }}/{{ dpdk_unarchive }}"
+    dpdk_path: "{{ dpdk_dest }}/{{ dpdk_unarchive }}"
 
 - set_fact:
     RTE_SDK: "{{ dpdk_path }}"
diff --git a/ansible/roles/download_prox/defaults/main.yml b/ansible/roles/download_prox/defaults/main.yml
deleted file mode 100644 (file)
index 797db31..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
----
-prox_version: v037
-prox_suffix:
-  v035: "zip"
-  v037: "tar.gz"
-prox_url: "https://01.org/sites/default/files/downloads/intelr-data-plane-performance-demonstrators/dppd-prox-{{ prox_version }}.{{ prox_suffix[prox_version] }}"
-prox_file: "{{ prox_url|basename }}"
-prox_unarchive: "{{ prox_file|regex_replace('[.]zip$', '')|regex_replace('-prox-', '-PROX-') }}"
-prox_dest: "{{ clone_dest }}/"
-prox_sha256s:
- v035: "sha256:f5d3f7c3855ca198d2babbc7045ed4373f0ddc13dc243fedbe23ed395ce65cc9"
- v037: "sha256:a12d021fbc0f5ae55ab55a2bbf8f3b260705ce3e61866288f023ccabca010bca"
diff --git a/ansible/roles/download_prox/tasks/main.yml b/ansible/roles/download_prox/tasks/main.yml
deleted file mode 100644 (file)
index 0614c74..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-- debug:
-    var: prox_version
-    verbosity: 2
-
-- name: fetch prox
-  get_url:
-    url: "{{ prox_url }}"
-    dest: "{{ prox_dest }}"
-    validate_certs: False
-    checksum: "{{ prox_sha256s[prox_version] }}"
-
-- unarchive:
-    src: "{{ clone_dest }}/{{ prox_file }}"
-    dest: "{{ clone_dest }}/"
-    copy: no
-    
-- debug:
-    var: prox_unarchive
-    verbosity: 2
-
-- set_fact:
-    prox_path: "{{ clone_dest }}/{{ prox_unarchive }}"
\ No newline at end of file
index 44449af..5f565a4 100644 (file)
@@ -1,6 +1,4 @@
 ---
-samplevnf_version: ""
-samplevnf_file: "{{ samplevnf_url|basename }}"
-samplevnf_unarchive: "{{ samplevnf_file|regex_replace('[.]tar[.]gz$', '') }}"
-samplevnf_dest: "{{ clone_dest }}/"
-samplevnf_sha256: "sha256:36457cadfd23053c9ce1cf2e6f048cad6a5d04a7371d7a122e133dcbf007989e"
+samplevnf_url: "https://git.opnfv.org/samplevnf"
+samplevnf_dest: "{{ clone_dest }}/samplevnf"
+samplevnf_version: "master"
index 005d57d..e9d4142 100644 (file)
 #    verbosity: 2
 
 - name: fetch samplevnf
-  get_url:
-    url: "{{ samplevnf_url }}"
+  git:
+    repo: "{{ samplevnf_url }}"
     dest: "{{ samplevnf_dest }}"
-    validate_certs: False
-    checksum: "{{ samplevnf_sha256 }}"
-
-- unarchive:
-    src: "{{ clone_dest }}/{{ samplevnf_file }}"
-    dest: "{{ clone_dest }}/"
-    copy: no
+    version: "{{ samplevnf_version }}"
+    accept_hostkey: yes
+    recursive: no
+    force: yes
 
 - set_fact:
-    samplevnf_path: "{{ clone_dest }}/{{ samplevnf_unarchive }}"
+    samplevnf_path: "{{ samplevnf_dest }}"
index dd2dd27..6e8fa70 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
-trex_version: v2.20
+trex_version: v2.28
 trex_url: "https://trex-tgn.cisco.com/trex/release/{{ trex_version }}.tar.gz"
 trex_file: "{{ trex_url|basename }}"
 trex_unarchive: "{{ trex_file|regex_replace('[.]tar.gz$', '') }}"
 trex_dest: "{{ clone_dest }}/"
-trex_sha256: "sha256:eb5a069f758a36133a185c7e27af10834ca03d11441165403529fbd7844658fb"
+trex_sha256s:
+  "v2.20": "sha256:eb5a069f758a36133a185c7e27af10834ca03d11441165403529fbd7844658fb"
+  "v2.28": "sha256:c3f08aabbd69dddb09843984d41acbe9ba1af6a6ef3380a7830f7c9e33134207"
index 75a3169..baa964f 100644 (file)
   get_url:
     url: "{{ trex_url }}"
     dest: "{{ trex_dest }}"
-    checksum: "{{ trex_sha256 }}"
+    validate_certs: False
+    checksum: "{{ trex_sha256s[trex_version] }}"
 
 - name: unarchive Trex
   unarchive:
-    src: "{{ clone_dest }}/{{ trex_file }}"
-    dest: "{{ clone_dest }}/"
+    src: "{{ trex_dest }}/{{ trex_file }}"
+    dest: "{{ trex_dest }}/"
     copy: no
+
+- name: cleanup tar file to save space
+  file:
+      path: "{{ trex_dest }}/{{ trex_file }}"
+      state: absent
diff --git a/ansible/roles/enable_hugepages_on_boot/defaults/main.yml b/ansible/roles/enable_hugepages_on_boot/defaults/main.yml
new file mode 100644 (file)
index 0000000..015e01b
--- /dev/null
@@ -0,0 +1,3 @@
+---
+num_hugepages: auto
+huge_pagesize_mb: 1024
\ No newline at end of file
index f258bb6..be4a328 100755 (executable)
     line: '{{ hugepage_param }}'
     state: present
 
-- name: Update grub
-  command: "{{ update_grub[ansible_os_family] }}"
-
 - name: create hugetables mount
   file:
     path: "{{ hugetable_mount }}"
     state: directory
 
-
 - name: mount hugetlbfs
   mount:
     name: "{{ hugetable_mount }}"
index ac83322..0047a5e 100755 (executable)
@@ -29,6 +29,7 @@
     - qemu-kvm
     - qemu-user-static
     - qemu-utils
+    - kpartx
     - libvirt0
     - python-libvirt
     - bridge-utils
index 4bb7c31..b725933 100644 (file)
@@ -46,6 +46,7 @@
     - python-setuptools
     - libffi-devel
     - python-devel
+    - kpartx
     # don't install kernel-devel here it will trigger unwanted kernel upgrade
     # Mandatory Packages:
     # Don't use yum groups, they don't work, expand them manually
diff --git a/ansible/roles/install_dpdk/defaults/main.yml b/ansible/roles/install_dpdk/defaults/main.yml
new file mode 100644 (file)
index 0000000..fe21724
--- /dev/null
@@ -0,0 +1,2 @@
+---
+INSTALL_BIN_PATH: "/opt/nsb_bin"
\ No newline at end of file
index 486d40e..c77e4f9 100755 (executable)
@@ -17,3 +17,6 @@
   with_items:
     - libpcap-dev
 
+- name: Install kernel headers
+  action: "{{ ansible_pkg_mgr }} name=linux-headers-{{ dpdk_kernel }} state=present"
+
index af35c9b..2fb249e 100644 (file)
@@ -17,3 +17,5 @@
   with_items:
     - libpcap-devel
 
+- name: Install kernel headers
+  action: "{{ ansible_pkg_mgr }} name=kernel-headers-{{ dpdk_kernel }} state=present"
index fca0e33..cab093a 100644 (file)
 #  with_fileglob:
 #    - "{{ local_nsb_path }}/patches/dpdk_custom_patch/0*.patch"
 
+- name: find kernel for image, (including chroot)
+  find_kernel:
+    kernel: "{{ ansible_kernel }}"
+  register: found_kernel
+
+# Do this before installing kernel headers
+- name: Set dpdk_kernel to be the kernel we found
+  set_fact:
+    dpdk_kernel: "{{ found_kernel.kernel }}"
+
 - include: "{{ ansible_os_family }}.yml"
 
+- name: set RTE_KERNELDIR to point to found kernel
+  set_fact:
+    RTE_KERNELDIR: "/lib/modules/{{ dpdk_kernel }}/build"
+
 - my_make:
     chdir: "{{ dpdk_path }}"
     target: config
@@ -29,6 +43,8 @@
       T: "{{ dpdk_make_arch }}"
       O: "{{ dpdk_make_arch }}"
     extra_args: "-j {{ ansible_processor_vcpus }}"
+  environment:
+    RTE_KERNELDIR: "{{ RTE_KERNELDIR }}"
 
 - name: enable RTE_PORT_STATS_COLLECT
   lineinfile:
@@ -57,6 +73,8 @@
 - my_make:
     chdir: "{{ dpdk_path }}/{{ dpdk_make_arch}}"
     extra_args: "-j {{ ansible_processor_vcpus }}"
+  environment:
+    RTE_KERNELDIR: "{{ RTE_KERNELDIR }}"
 
 - file:
     path: "{{ dpdk_module_dir}}"
@@ -67,7 +85,8 @@
     dest: "{{ dpdk_module_dir }}/igb_uio.ko"
     remote_src: yes
 
-- command: depmod -a
+- name: run depmod for dpdk_kernel
+  command: depmod "{{ dpdk_kernel }}"
 
 - file:
     path: "{{ INSTALL_BIN_PATH }}"
index 730215c..1cc4f15 100644 (file)
@@ -1,6 +1,6 @@
 ---
 dpdk_make_arch: x86_64-native-linuxapp-gcc
-dpdk_module_dir: "/lib/modules/{{ ansible_kernel }}/extra"
+dpdk_module_dir: "/lib/modules/{{ dpdk_kernel }}/extra"
 hugetable_mount: /mnt/huge
 dpdk_devbind:
   "16.07": "{{ dpdk_path }}/tools/dpdk-devbind.py"
diff --git a/ansible/roles/install_prox/tasks/Debian.yml b/ansible/roles/install_prox/tasks/Debian.yml
deleted file mode 100755 (executable)
index 00a31fc..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright (c) 2017 Intel Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-- name: Install PROX build dependencies
-  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
-  with_items:
-    - pkg-config
-    - liblua5.2-dev
-    - libncurses5
-    - libncurses5-dev
-    - libncursesw5
-    - libncursesw5-dev
-    - libedit-dev
diff --git a/ansible/roles/install_prox/tasks/RedHat.yml b/ansible/roles/install_prox/tasks/RedHat.yml
deleted file mode 100644 (file)
index 69fa83b..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright (c) 2017 Intel Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-- name: Install PROX build dependencies
-  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
-  with_items:
-    - pkgconfig
-    - lua-devel
-    - ncurses-devel
-    - libedit-devel
-
diff --git a/ansible/roles/install_samplevnf/tasks/main.yml b/ansible/roles/install_samplevnf/tasks/main.yml
new file mode 100644 (file)
index 0000000..d332c88
--- /dev/null
@@ -0,0 +1,55 @@
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- set_fact:
+    vnf_build_dir: "{{ samplevnf_path }}/VNFs/{{ vnf_build_dirs[vnf_name] }}"
+
+- set_fact:
+    vnf_app_name: "{{ vnf_app_names[vnf_name] }}"
+
+- name: Install extra build dependencies
+  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+  with_items: "{{ vnf_build_dependencies.get(vnf_name, {}).get(ansible_os_family, []) }}"
+
+
+- name: set build env vars
+  set_fact:
+      build_env_vars:
+        RTE_SDK: "{{ RTE_SDK }}"
+        RTE_TARGET: "{{ RTE_TARGET }}"
+        VNF_CORE: "{{ samplevnf_path }}"
+
+- name: set soft CRC for PROX when building in VM
+  set_fact:
+      build_env_vars: "{{ build_env_vars|combine({'crc': 'soft'}) }}"
+  when: vnf_name == "PROX" and image_type is defined and image_type == "vm"
+
+- name: "make {{ vnf_name }} clean"
+  my_make: chdir="{{ vnf_build_dir }}" target=clean extra_args="-j {{ ansible_processor_vcpus }}"
+  environment: "{{ build_env_vars }}"
+
+- name: "make {{ vnf_name }}"
+  my_make: chdir="{{ vnf_build_dir }}" extra_args="-j {{ ansible_processor_vcpus }}"
+  environment: "{{ build_env_vars }}"
+
+#- command: cp "{{ vnf_build_dir }}/{{ vnf_name }}/build/ip_pipeline" "{{ INSTALL_BIN_PATH }}/vACL_vnf"
+
+- name: "Install {{ vnf_name }} VNF"
+  copy:
+    src: "{{ vnf_build_dir }}/build/{{ vnf_app_name }}"
+    dest: "{{ INSTALL_BIN_PATH }}/{{ vnf_app_name }}"
+    remote_src: True
+    # make executable
+    mode: 0755
+
similarity index 55%
rename from ansible/roles/install_prox/tasks/main.yml
rename to ansible/roles/install_samplevnf/vars/main.yml
index 93025fc..6f2c44a 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
-- include: "{{ ansible_os_family }}.yml"
-
-- name: workaround, make trailing.sh executable
-  file:
-    path: "{{ prox_path }}/helper-scripts/trailing.sh"
-    state: touch
-    mode: 0755
-  when: prox_version == "v035"
-
-- make:
-    chdir: "{{ prox_path }}"
-  environment:
-    RTE_SDK: "{{ RTE_SDK }}"
-    RTE_TARGET: "{{ RTE_TARGET }}"
-
-
+vnf_build_dependencies:
+  PROX:
+    Debian:
+      - pkg-config
+      - liblua5.2-dev
+      - libncurses5
+      - libncurses5-dev
+      - libncursesw5
+      - libncursesw5-dev
+      - libedit-dev
+    RedHat:
+      - pkgconfig
+      - lua-devel
+      - ncurses-devel
+      - libedit-devel
+vnf_build_dirs:
+  ACL: vACL
+  FW: vFW
+  CGNATP: vCGNAPT
+  UDP_Replay: UDP_Replay
+  PROX: DPPD-PROX
+vnf_app_names:
+  ACL: vACL
+  FW: vFW
+  CGNATP: vCGNAPT
+  UDP_Replay: UDP_Replay
+  PROX: prox
index 1b28763..a5555e3 100644 (file)
@@ -13,5 +13,6 @@
 # limitations under the License.
 ---
 #TREX_DOWNLOAD: "https://trex-tgn.cisco.com/trex/release/v2.05.tar.gz"
-TREX_VERSION: v2.20
+TREX_VERSION: v2.28
 TREX_DOWNLOAD: "{{ nsb_mirror_url|ternary(nsb_mirror_url, 'https://trex-tgn.cisco.com/trex/release' }}/{{ TREX_VERSION }}.tar.gz"
+INSTALL_BIN_PATH: "/opt/nsb_bin"
index 4818a80..7ba1fc8 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
-- set_fact:
-    trex_file: "{{ trex_url|basename|regex_replace('[.]tar.gz', '') }}"
-
 
 - file: path="{{ INSTALL_BIN_PATH }}/trex" state=absent
 - file: path="{{ INSTALL_BIN_PATH }}/trex" state=directory
 
+- command: mv "{{ trex_dest }}/{{ trex_unarchive }}" "{{ INSTALL_BIN_PATH }}/trex/scripts"
+
+# Don't overwrite igb_uio.ko compiled from DPDK
 
-- command: mv "{{ clone_dest }}/{{ trex_unarchive }}" "{{ INSTALL_BIN_PATH }}/trex/scripts"
+- name: fix stl __init__.py for python module
+  file:
+    path: "{{ INSTALL_BIN_PATH }}/trex/scripts/automation/trex_control_plane/stl/__init__.py"
+    state: touch
 
-- file: path="{{ INSTALL_BIN_PATH }}/trex/scripts/automation/trex_control_plane/stl/__init__.py" state=touch
+- name: "symlink client to {{ INSTALL_BIN_PATH }}/trex_client"
+  file:
+    src: "{{ INSTALL_BIN_PATH }}/trex/scripts/automation/trex_control_plane"
+    dest: "{{ INSTALL_BIN_PATH }}/trex_client"
+    state: link
 
 # Don't use trex/scripts/dpdk_nic_bind.py use DPDK usertools/dpdk-devbind.py
 #- command: cp "{{ INSTALL_BIN_PATH }}/trex/scripts/dpdk_nic_bind.py" "{{ INSTALL_BIN_PATH }}"
diff --git a/ansible/roles/install_vnf_vACL/tasks/main.yml b/ansible/roles/install_vnf_vACL/tasks/main.yml
deleted file mode 100644 (file)
index ff2e769..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-- name: vACL_vnf make clean
-  my_make: chdir="{{ acl_build_dir }}" target=clean extra_args="-j {{ ansible_processor_vcpus }}"
-  environment:
-    RTE_SDK: "{{ RTE_SDK }}"
-    RTE_TARGET: "{{ RTE_TARGET }}"
-    VNF_CORE: "{{ samplevnf_path }}"
-
-- name: make vACL VNF
-  my_make: chdir="{{ acl_build_dir }}" extra_args="-j {{ ansible_processor_vcpus }}"
-  environment:
-    RTE_SDK: "{{ RTE_SDK }}"
-    RTE_TARGET: "{{ RTE_TARGET }}"
-    VNF_CORE: "{{ samplevnf_path }}"
-
-#- command: cp "{{ acl_build_dir }}/vACL/build/ip_pipeline" "{{ INSTALL_BIN_PATH }}/vACL_vnf"
-- name: Install vACL VNF
-  copy:
-    src: "{{ acl_build_dir }}/build/vACL"
-    dest: "{{ INSTALL_BIN_PATH }}/vACL"
-    remote_src: True
-    # make executable
-    mode: 0755
-
-#- command: cp "{{ acl_build_dir }}/vACL/config/full_tm_profile_10G.cfg" "{{ INSTALL_BIN_PATH }}/"
-#- copy:
-#    src: "{{ acl_build_dir }}/vACL/config/full_tm_profile_10G.cfg"
-#    dest: "{{ INSTALL_BIN_PATH }}/"
diff --git a/ansible/roles/install_vnf_vACL/vars/main.yml b/ansible/roles/install_vnf_vACL/vars/main.yml
deleted file mode 100644 (file)
index ee61bf1..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
----
-acl_build_dir: "{{ samplevnf_path }}/VNFs/vACL"
\ No newline at end of file
diff --git a/ansible/roles/install_vnf_vCGNAPT/tasks/main.yml b/ansible/roles/install_vnf_vCGNAPT/tasks/main.yml
deleted file mode 100644 (file)
index 9f8458f..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-- name: vCGNAPT_vnf make clean
-  my_make: chdir="{{ acl_build_dir }}" target=clean extra_args="-j {{ ansible_processor_vcpus }}"
-  environment:
-    RTE_SDK: "{{ RTE_SDK }}"
-    RTE_TARGET: "{{ RTE_TARGET }}"
-    VNF_CORE: "{{ samplevnf_path }}"
-
-- name: make vCGNAPT VNF
-  my_make: chdir="{{ acl_build_dir }}" extra_args="-j {{ ansible_processor_vcpus }}"
-  environment:
-    RTE_SDK: "{{ RTE_SDK }}"
-    RTE_TARGET: "{{ RTE_TARGET }}"
-    VNF_CORE: "{{ samplevnf_path }}"
-
-#- command: cp "{{ acl_build_dir }}/vCGNAPT/build/ip_pipeline" "{{ INSTALL_BIN_PATH }}/vCGNAPT_vnf"
-- name: Install vCGNAPT VNF
-  copy:
-    src: "{{ acl_build_dir }}/build/vCGNAPT"
-    dest: "{{ INSTALL_BIN_PATH }}/vCGNAPT"
-    remote_src: True
-    # make executable
-    mode: 0755
-
-#- command: cp "{{ acl_build_dir }}/vCGNAPT/config/full_tm_profile_10G.cfg" "{{ INSTALL_BIN_PATH }}/"
-#- copy:
-#    src: "{{ acl_build_dir }}/vCGNAPT/config/full_tm_profile_10G.cfg"
-#    dest: "{{ INSTALL_BIN_PATH }}/"
diff --git a/ansible/roles/install_vnf_vCGNAPT/vars/main.yml b/ansible/roles/install_vnf_vCGNAPT/vars/main.yml
deleted file mode 100644 (file)
index cca1a89..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
----
-acl_build_dir: "{{ samplevnf_path }}/VNFs/vCGNAPT"
\ No newline at end of file
diff --git a/ansible/roles/install_vnf_vFW/tasks/main.yml b/ansible/roles/install_vnf_vFW/tasks/main.yml
deleted file mode 100644 (file)
index cb3df3e..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-- name: vFW_vnf make clean
-  my_make: chdir="{{ vfw_build_dir }}" target=clean extra_args="-j {{ ansible_processor_vcpus }}"
-  environment:
-    RTE_SDK: "{{ RTE_SDK }}"
-    RTE_TARGET: "{{ RTE_TARGET }}"
-    VNF_CORE: "{{ samplevnf_path }}"
-
-#- name: make vFW VNF
-#  my_make: chdir="{{ vfw_build_dir }}" extra_args="-j {{ ansible_processor_vcpus }}"
-#  environment:
-#    RTE_SDK: "{{ RTE_SDK }}"
-#    RTE_TARGET: "{{ RTE_TARGET }}"
-#    VNF_CORE: "{{ samplevnf_path }}"
-
-- name: make vFW VNF
-  command: make chdir="{{ vfw_build_dir }}" extra_args="-j {{ ansible_processor_vcpus }}" all
-  args:
-    chdir: "{{ vfw_build_dir }}"
-  environment:
-    RTE_SDK: "{{ RTE_SDK }}"
-    RTE_TARGET: "{{ RTE_TARGET }}"
-    VNF_CORE: "{{ samplevnf_path }}"
-
-#- command: cp "{{ vfw_build_dir }}/vFW/build/ip_pipeline" "{{ INSTALL_BIN_PATH }}/vFW_vnf"
-- name: Install vFW VNF
-  copy:
-    src: "{{ vfw_build_dir }}/build/vFW"
-    dest: "{{ INSTALL_BIN_PATH }}/vFW"
-    remote_src: True
-    # make executable
-    mode: 0755
-
-#- command: cp "{{ vfw_build_dir }}/vFW/config/full_tm_profile_10G.cfg" "{{ INSTALL_BIN_PATH }}/"
-#- copy:
-#    src: "{{ vfw_build_dir }}/vFW/config/full_tm_profile_10G.cfg"
-#    dest: "{{ INSTALL_BIN_PATH }}/"
diff --git a/ansible/roles/install_vnf_vFW/vars/main.yml b/ansible/roles/install_vnf_vFW/vars/main.yml
deleted file mode 100644 (file)
index 8a8a398..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
----
-vfw_build_dir: "{{ samplevnf_path }}/VNFs/vFW"
\ No newline at end of file
diff --git a/ansible/roles/install_vnf_vPE/tasks/main.yml b/ansible/roles/install_vnf_vPE/tasks/main.yml
deleted file mode 100644 (file)
index 91d449a..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-- name: vPE_vnf make clean
-  my_make: chdir="{{ vpe_build_dir }}" target=clean extra_args="-j {{ ansible_processor_vcpus }}"
-  environment:
-    RTE_SDK: "{{ RTE_SDK }}"
-    RTE_TARGET: "{{ RTE_TARGET }}"
-
-- name: make vPE VNF
-  my_make: chdir="{{ vpe_build_dir }}" extra_args="-j {{ ansible_processor_vcpus }}"
-  environment:
-    RTE_SDK: "{{ RTE_SDK }}"
-    RTE_TARGET: "{{ RTE_TARGET }}"
-
-#- command: cp "{{ vpe_build_dir }}/vPE/build/ip_pipeline" "{{ INSTALL_BIN_PATH }}/vPE_vnf"
-- name: Install vPE_vnf
-  copy:
-    src: "{{ vpe_build_dir }}/build/ip_pipeline"
-    dest: "{{ INSTALL_BIN_PATH }}/vPE_vnf"
-    remote_src: True
-
-#- command: cp "{{ vpe_build_dir }}/vPE/config/full_tm_profile_10G.cfg" "{{ INSTALL_BIN_PATH }}/"
-#- copy:
-#    src: "{{ vpe_build_dir }}/vPE/config/full_tm_profile_10G.cfg"
-#    dest: "{{ INSTALL_BIN_PATH }}/"
diff --git a/ansible/roles/install_vnf_vPE/vars/main.yml b/ansible/roles/install_vnf_vPE/vars/main.yml
deleted file mode 100644 (file)
index fe0a972..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
----
-vpe_build_dir: "{{ dpdk_path }}/examples/ip_pipeline"
\ No newline at end of file
index 50094f2..4e6de69 100644 (file)
@@ -21,7 +21,7 @@
   file:
     path: "{{ resolv_conf_stat.stat.lnk_source|dirname }}"
     state: directory
-    mode: 755
+    mode: 0755
 
 - name: Override resolv.conf link source with specific nameserver
   template:
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
-- hosts: chroot_image
-  connection: chroot
+- hosts: all
   vars:
     clone_dir: /tmp/yardstick-clone
 
-  pre_tasks:
-    - debug: msg="chrooted in {{ inventory_hostname }}"
 
   roles:
-    - reset_resolv_conf
     - add_custom_repos
     - role: set_package_installer_proxy
       when: proxy_env is defined and proxy_env
-    # can update grub in chroot/docker
-#    - enable_hugepages_on_boot
-    - modify_cloud_config
+#     can't update grub in chroot/docker
+    - enable_hugepages_on_boot
     - install_image_dependencies
     - role: download_dpdk
-      dpdk_version: "16.07"
+#      dpdk_version: "17.02"
     - install_dpdk
-    # vPE is part of DPDK so we don't need to copy it
-    - install_vnf_vPE
-#    - copy_L4Replay
-#    - install_L4Replay
-#    - copy_trex
-#    - install_trex
+    - download_trex
+    - install_trex
+    - download_samplevnfs
+    - role: install_samplevnf
+      vnf_name: PROX
+    - role: install_samplevnf
+      vnf_name: UDP_Replay
+    - role: install_samplevnf
+      vnf_name: ACL
+    - role: install_samplevnf
+      vnf_name: FW
+    - role: install_samplevnf
+      vnf_name: CGNATP
 
index 950655e..099d580 100644 (file)
@@ -25,6 +25,8 @@
     - reset_resolv_conf
     - add_custom_repos
     - modify_cloud_config
+    - role: set_package_installer_proxy
+      when: proxy_env is defined and proxy_env
     - install_image_dependencies
     - download_unixbench
     - install_unixbench
diff --git a/ansible/ubuntu_server_cloudimg_modify_cgnapt.yml b/ansible/ubuntu_server_cloudimg_modify_cgnapt.yml
deleted file mode 100644 (file)
index 3f2a179..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2017 Intel Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-- hosts: chroot_image
-  connection: chroot
-  vars:
-    clone_dir: /tmp/yardstick-clone
-
-  pre_tasks:
-    - debug: msg="chrooted in {{ inventory_hostname }}"
-
-  roles:
-#    - reset_resolv_conf
-#    - add_custom_repos
-#    - role: set_package_installer_proxy
-#      when: proxy_env is defined and proxy_env
-    # can update grub in chroot/docker
-#    - enable_hugepages_on_boot
-#    - modify_cloud_config
-#    - install_image_dependencies
-#    - role: download_dpdk
-#      dpdk_version: "16.07"
-#    - install_dpdk
-#    - download_samplevnfs
-    - install_vnf_vCGNAPT
-#    - copy_L4Replay
-#    - install_L4Replay
-#    - copy_trex
-#    - install_trex
-
index 2a087ce..6bbb383 100644 (file)
@@ -25,6 +25,8 @@
     - add_custom_repos
     - enable_hugepages_on_boot
     - modify_cloud_config
+    - role: set_package_installer_proxy
+      when: proxy_env is defined and proxy_env
     - install_image_dependencies
     - download_unixbench
     - install_unixbench
     clone_dir: /tmp/yardstick-clone
 
   pre_tasks:
-    - debug: msg="chrooted in {{ inventory_hostname }}"
+    - debug:
+        msg: "chrooted in {{ inventory_hostname }}"
+    - debug:
+        var: proxy_env
+        verbosity: 2
 
   roles:
     - reset_resolv_conf
     - add_custom_repos
     - role: set_package_installer_proxy
       when: proxy_env is defined and proxy_env
-    # can update grub in chroot/docker
-#    - enable_hugepages_on_boot
+#     can't update grub in chroot/docker
+    - enable_hugepages_on_boot
     - modify_cloud_config
     - install_image_dependencies
-#    - role: download_dpdk
-#      dpdk_version: "16.07"
-#    - install_dpdk
-#    - download_samplevnfs
-#    - install_vnf_vACL
-#    - copy_L4Replay
-#    - install_L4Replay
-    - copy_trex
+    - role: download_dpdk
+#      dpdk_version: "17.02"
+    - install_dpdk
+    - download_trex
     - install_trex
+    - download_samplevnfs
+    - role: install_samplevnf
+      vnf_name: PROX
+    - role: install_samplevnf
+      vnf_name: UDP_Replay
+    - role: install_samplevnf
+      vnf_name: ACL
+    - role: install_samplevnf
+      vnf_name: FW
+    - role: install_samplevnf
+      vnf_name: CGNATP
 
diff --git a/ansible/ubuntu_server_cloudimg_modify_vfw.yml b/ansible/ubuntu_server_cloudimg_modify_vfw.yml
deleted file mode 100644 (file)
index f8cd3ec..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2017 Intel Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
----
-- hosts: chroot_image
-  connection: chroot
-  vars:
-    clone_dir: /tmp/yardstick-clone
-
-  pre_tasks:
-    - debug: msg="chrooted in {{ inventory_hostname }}"
-
-  roles:
-#    - reset_resolv_conf
-#    - add_custom_repos
-#    - role: set_package_installer_proxy
-#      when: proxy_env is defined and proxy_env
-    # can update grub in chroot/docker
-#    - enable_hugepages_on_boot
-#    - modify_cloud_config
-#    - install_image_dependencies
-#    - role: download_dpdk
-#      dpdk_version: "16.07"
-#    - install_dpdk
-#    - download_samplevnfs
-    - install_vnf_vFW
-#    - copy_L4Replay
-#    - install_L4Replay
-#    - copy_trex
-#    - install_trex
-
index 1bc32bf..e4f1dd6 100644 (file)
@@ -87,6 +87,11 @@ class V2ImageHandler(object):
             raise ValueError
         return image
 
+    def delete_by_uuid(self, uuid):
+        image = self.get_by_uuid(uuid)
+        db_session.delete(image)
+        db_session.commit()
+
 
 class V2PodHandler(object):
 
index 1e85559..59dab3e 100644 (file)
@@ -48,9 +48,6 @@ class V2Image(Base):
     name = Column(String(30))
     description = Column(Text)
     environment_id = Column(String(30))
-    size = Column(String(30))
-    status = Column(String(30))
-    time = Column(DateTime)
 
 
 class V2Container(Base):
index f021a3c..158e98b 100644 (file)
@@ -35,6 +35,9 @@ class V2Environments(ApiResource):
             container_info = e['container_id']
             e['container_id'] = jsonutils.loads(container_info) if container_info else {}
 
+            image_id = e['image_id']
+            e['image_id'] = image_id.split(',') if image_id else []
+
         data = {
             'environments': environments
         }
@@ -78,8 +81,13 @@ class V2Environment(ApiResource):
             return result_handler(consts.API_ERROR, 'no such environment id')
 
         environment = change_obj_to_dict(environment)
+
         container_id = environment['container_id']
         environment['container_id'] = jsonutils.loads(container_id) if container_id else {}
+
+        image_id = environment['image_id']
+        environment['image_id'] = image_id.split(',') if image_id else []
+
         return result_handler(consts.API_SUCCESS, {'environment': environment})
 
     def delete(self, environment_id):
index 8359e10..0c36a0a 100644 (file)
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 import logging
-import subprocess
+import os
+import uuid
 import threading
+import requests
+import datetime
 
 from api import ApiResource
+from api.database.v2.handlers import V2ImageHandler
+from api.database.v2.handlers import V2EnvironmentHandler
 from yardstick.common.utils import result_handler
 from yardstick.common.utils import source_env
 from yardstick.common.utils import change_obj_to_dict
 from yardstick.common.openstack_utils import get_nova_client
+from yardstick.common.openstack_utils import get_glance_client
 from yardstick.common import constants as consts
 
 LOG = logging.getLogger(__name__)
 LOG.setLevel(logging.DEBUG)
 
+IMAGE_MAP = {
+    'yardstick-image': {
+        'path': os.path.join(consts.IMAGE_DIR, 'yardstick-image.img'),
+        'url': 'http://artifacts.opnfv.org/yardstick/images/yardstick-image.img'
+    },
+    'Ubuntu-16.04': {
+        'path': os.path.join(consts.IMAGE_DIR, 'xenial-server-cloudimg-amd64-disk1.img'),
+        'url': 'cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img'
+    },
+    'cirros-0.3.5': {
+        'path': os.path.join(consts.IMAGE_DIR, 'cirros-0.3.5-x86_64-disk.img'),
+        'url': 'http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img'
+    }
+}
+
 
 class V2Images(ApiResource):
 
     def get(self):
         try:
             source_env(consts.OPENRC)
-        except:
+        except Exception:
             return result_handler(consts.API_ERROR, 'source openrc error')
 
         nova_client = get_nova_client()
         try:
             images_list = nova_client.images.list()
-        except:
+        except Exception:
             return result_handler(consts.API_ERROR, 'get images error')
         else:
-            images = [self.get_info(change_obj_to_dict(i)) for i in images_list]
-            status = 1 if all(i['status'] == 'ACTIVE' for i in images) else 0
-            if not images:
-                status = 0
+            images = {i.name: self.get_info(change_obj_to_dict(i)) for i in images_list}
 
-        return result_handler(consts.API_SUCCESS, {'status': status, 'images': images})
+        return result_handler(consts.API_SUCCESS, {'status': 1, 'images': images})
 
     def post(self):
         return self._dispatch_post()
 
     def get_info(self, data):
+        try:
+            size = data['OS-EXT-IMG-SIZE:size']
+        except KeyError:
+            size = None
+        else:
+            size = float(size) / 1024 / 1024
+
         result = {
             'name': data.get('name', ''),
-            'size': data.get('OS-EXT-IMG-SIZE:size', ''),
-            'status': data.get('status', ''),
-            'time': data.get('updated', '')
+            'discription': data.get('description', ''),
+            'size': size,
+            'status': data.get('status'),
+            'time': data.get('updated')
         }
         return result
 
     def load_image(self, args):
-        thread = threading.Thread(target=self._load_images)
+        try:
+            image_name = args['name']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'image name must provided')
+
+        if image_name not in IMAGE_MAP:
+            return result_handler(consts.API_ERROR, 'wrong image name')
+
+        thread = threading.Thread(target=self._do_load_image, args=(image_name,))
         thread.start()
+        return result_handler(consts.API_SUCCESS, {'image': image_name})
+
+    def upload_image(self, args):
+        try:
+            image_file = args['file']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'file must be provided')
+
+        try:
+            environment_id = args['environment_id']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'environment_id must be provided')
+
+        try:
+            uuid.UUID(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid environment id')
+
+        environment_handler = V2EnvironmentHandler()
+        try:
+            environment = environment_handler.get_by_uuid(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such environment')
+
+        file_path = os.path.join(consts.IMAGE_DIR, image_file.filename)
+        LOG.info('saving file')
+        image_file.save(file_path)
+
+        LOG.info('loading image')
+        self._load_image(image_file.filename, file_path)
+
+        LOG.info('creating image in DB')
+        image_handler = V2ImageHandler()
+        image_id = str(uuid.uuid4())
+        image_init_data = {
+            'uuid': image_id,
+            'name': image_file.filename,
+            'environment_id': environment_id
+        }
+        image_handler.insert(image_init_data)
+
+        LOG.info('update image in environment')
+        if environment.image_id:
+            image_list = environment.image_id.split(',')
+            image_list.append(image_id)
+            new_image_id = ','.join(image_list)
+        else:
+            new_image_id = image_id
+
+        environment_handler.update_attr(environment_id, {'image_id': new_image_id})
+
+        return result_handler(consts.API_SUCCESS, {'uuid': image_id})
+
+    def upload_image_by_url(self, args):
+        try:
+            url = args['url']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'url must be provided')
+
+        try:
+            environment_id = args['environment_id']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'environment_id must be provided')
+
+        try:
+            uuid.UUID(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid environment id')
+
+        environment_handler = V2EnvironmentHandler()
+        try:
+            environment = environment_handler.get_by_uuid(environment_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such environment')
+
+        thread = threading.Thread(target=self._do_upload_image_by_url, args=(url,))
+        thread.start()
+
+        file_name = url.split('/')[-1]
+
+        LOG.info('creating image in DB')
+        image_handler = V2ImageHandler()
+        image_id = str(uuid.uuid4())
+        image_init_data = {
+            'uuid': image_id,
+            'name': file_name,
+            'environment_id': environment_id
+        }
+        image_handler.insert(image_init_data)
+
+        LOG.info('update image in environment')
+        if environment.image_id:
+            image_list = environment.image_id.split(',')
+            image_list.append(image_id)
+            new_image_id = ','.join(image_list)
+        else:
+            new_image_id = image_id
+
+        environment_handler.update_attr(environment_id, {'image_id': new_image_id})
+
+        return result_handler(consts.API_SUCCESS, {'uuid': image_id})
+
+    def delete_image(self, args):
+        try:
+            image_name = args['name']
+        except KeyError:
+            return result_handler(consts.API_ERROR, 'image name must provided')
+
+        if image_name not in IMAGE_MAP:
+            return result_handler(consts.API_ERROR, 'wrong image name')
+
+        glance_client = get_glance_client()
+        try:
+            image = next((i for i in glance_client.images.list() if i.name == image_name))
+        except StopIteration:
+            return result_handler(consts.API_ERROR, 'can not find image')
+
+        glance_client.images.delete(image.id)
+
         return result_handler(consts.API_SUCCESS, {})
 
-    def _load_images(self):
+    def _do_upload_image_by_url(self, url):
+        file_name = url.split('/')[-1]
+        path = os.path.join(consts.IMAGE_DIR, file_name)
+
+        LOG.info('download image')
+        self._download_image(url, path)
+
+        LOG.info('loading image')
+        self._load_image(file_name, path)
+
+    def _do_load_image(self, image_name):
+        if not os.path.exists(IMAGE_MAP[image_name]['path']):
+            self._download_image(IMAGE_MAP[image_name]['url'],
+                                 IMAGE_MAP[image_name]['path'])
+
+        self._load_image(image_name, IMAGE_MAP[image_name]['path'])
+
+    def _load_image(self, image_name, image_path):
         LOG.info('source openrc')
         source_env(consts.OPENRC)
 
-        LOG.info('clean images')
-        cmd = [consts.CLEAN_IMAGES_SCRIPT]
-        p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
-                             cwd=consts.REPOS_DIR)
-        _, err = p.communicate()
-        if p.returncode != 0:
-            LOG.error('clean image failed: %s', err)
-
-        LOG.info('load images')
-        cmd = [consts.LOAD_IMAGES_SCRIPT]
-        p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
-                             cwd=consts.REPOS_DIR)
-        _, err = p.communicate()
-        if p.returncode != 0:
-            LOG.error('load image failed: %s', err)
+        LOG.info('load image')
+        glance_client = get_glance_client()
+        image = glance_client.images.create(name=image_name,
+                                            visibility='public',
+                                            disk_format='qcow2',
+                                            container_format='bare')
+        with open(image_path, 'rb') as f:
+            glance_client.images.upload(image.id, f)
 
         LOG.info('Done')
+
+    def _download_image(self, url, path):
+        start = datetime.datetime.now().replace(microsecond=0)
+
+        LOG.info('download image from: %s', url)
+        self._download_file(url, path)
+
+        end = datetime.datetime.now().replace(microsecond=0)
+        LOG.info('download image success, total: %s s', end - start)
+
+    def _download_handler(self, start, end, url, filename):
+
+        headers = {'Range': 'bytes=%d-%d' % (start, end)}
+        r = requests.get(url, headers=headers, stream=True)
+
+        with open(filename, "r+b") as fp:
+            fp.seek(start)
+            fp.tell()
+            fp.write(r.content)
+
+    def _download_file(self, url, path, num_thread=5):
+
+        r = requests.head(url)
+        try:
+            file_size = int(r.headers['content-length'])
+        except Exception:
+            return
+
+        with open(path, 'wb') as f:
+            f.truncate(file_size)
+
+        thread_list = []
+        part = file_size // num_thread
+        for i in range(num_thread):
+            start = part * i
+            end = start + part if i != num_thread - 1 else file_size
+
+            kwargs = {'start': start, 'end': end, 'url': url, 'filename': path}
+            t = threading.Thread(target=self._download_handler, kwargs=kwargs)
+            t.setDaemon(True)
+            t.start()
+            thread_list.append(t)
+
+        for t in thread_list:
+            t.join()
+
+
+class V2Image(ApiResource):
+    def get(self, image_id):
+        try:
+            uuid.UUID(image_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid image id')
+
+        image_handler = V2ImageHandler()
+        try:
+            image = image_handler.get_by_uuid(image_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such image id')
+
+        nova_client = get_nova_client()
+        images = nova_client.images.list()
+        try:
+            image = next((i for i in images if i.name == image.name))
+        except StopIteration:
+            pass
+
+        return_image = self.get_info(change_obj_to_dict(image))
+        return_image['id'] = image_id
+
+        return result_handler(consts.API_SUCCESS, {'image': return_image})
+
+    def delete(self, image_id):
+        try:
+            uuid.UUID(image_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'invalid image id')
+
+        image_handler = V2ImageHandler()
+        try:
+            image = image_handler.get_by_uuid(image_id)
+        except ValueError:
+            return result_handler(consts.API_ERROR, 'no such image id')
+
+        LOG.info('delete image in openstack')
+        glance_client = get_glance_client()
+        try:
+            image_o = next((i for i in glance_client.images.list() if i.name == image.name))
+        except StopIteration:
+            return result_handler(consts.API_ERROR, 'can not find image')
+
+        glance_client.images.delete(image_o.id)
+
+        LOG.info('delete image in environment')
+        environment_id = image.environment_id
+        environment_handler = V2EnvironmentHandler()
+        environment = environment_handler.get_by_uuid(environment_id)
+        image_list = environment.image_id.split(',')
+        image_list.remove(image_id)
+        environment_handler.update_attr(environment_id, {'image_id': ','.join(image_list)})
+
+        LOG.info('delete image in DB')
+        image_handler.delete_by_uuid(image_id)
+
+        return result_handler(consts.API_SUCCESS, {'image': image_id})
+
+    def get_info(self, data):
+        try:
+            size = data['OS-EXT-IMG-SIZE:size']
+        except KeyError:
+            size = None
+        else:
+            size = float(size) / 1024 / 1024
+
+        result = {
+            'name': data.get('name', ''),
+            'description': data.get('description', ''),
+            'size': size,
+            'status': data.get('status'),
+            'time': data.get('updated')
+        }
+        return result
index 158b8a5..37a1ab6 100644 (file)
@@ -35,6 +35,7 @@ except ImportError:
 LOG = logging.getLogger(__name__)
 
 app = Flask(__name__)
+app.config['MAX_CONTENT_LENGTH'] = 2 * 1024 * 1024 * 1024
 
 Swagger(app)
 
index 83cf4da..9b0040b 100644 (file)
@@ -36,6 +36,7 @@ urlpatterns = [
 
     Url('/api/v2/yardstick/images', 'v2_images'),
     Url('/api/v2/yardstick/images/action', 'v2_images'),
+    Url('/api/v2/yardstick/images/<image_id>', 'v2_image'),
 
     Url('/api/v2/yardstick/containers', 'v2_containers'),
     Url('/api/v2/yardstick/containers/action', 'v2_containers'),
index 74009f5..1ac1d3f 100755 (executable)
@@ -20,6 +20,7 @@ server {
     index  index.htm index.html;
     location / {
         include uwsgi_params;
+        client_max_body_size    2000m;
         uwsgi_pass unix:///var/run/yardstick.sock;
     }
 
index 3eb17bb..7c53279 100644 (file)
@@ -103,7 +103,7 @@ Config yardstick conf
     cp ./etc/yardstick/yardstick.conf.sample /etc/yardstick/yardstick.conf
     vi /etc/yardstick/yardstick.conf
 
-Add trex_path and bin_path in 'nsb' section.
+Add trex_path, trex_client_lib and bin_path in 'nsb' section.
 
 ::
 
@@ -121,6 +121,7 @@ Add trex_path and bin_path in 'nsb' section.
   [nsb]
   trex_path=/opt/nsb_bin/trex/scripts
   bin_path=/opt/nsb_bin
+  trex_client_lib=/opt/nsb_bin/trex_client/stl
 
 
 Config pod.yaml describing Topology
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc006.rst b/docs/testing/user/userguide/opnfv_yardstick_tc006.rst
new file mode 100644 (file)
index 0000000..d2d6467
--- /dev/null
@@ -0,0 +1,119 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Huawei Technologies Co.,Ltd and others.
+
+*************************************
+Yardstick Test Case Description TC006
+*************************************
+
+.. _fio: http://bluestop.org/files/fio/HOWTO.txt
+
++-----------------------------------------------------------------------------+
+|Volume storage Performance                                                   |
+|                                                                             |
++--------------+--------------------------------------------------------------+
+|test case id  | OPNFV_YARDSTICK_TC006_VOLUME STORAGE PERFORMANCE             |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|metric        | IOPS (Average IOs performed per second),                     |
+|              | Throughput (Average disk read/write bandwidth rate),         |
+|              | Latency (Average disk read/write latency)                    |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test purpose  | The purpose of TC006 is to evaluate the IaaS volume storage  |
+|              | performance with regards to IOPS, throughput and latency.    |
+|              |                                                              |
+|              | The purpose is also to be able to spot the trends.           |
+|              | Test results, graphs and similar shall be stored for         |
+|              | comparison reasons and product evolution understanding       |
+|              | between different OPNFV versions and/or configurations.      |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test tool     | fio                                                          |
+|              |                                                              |
+|              | fio is an I/O tool meant to be used both for benchmark and   |
+|              | stress/hardware verification. It has support for 19          |
+|              | different types of I/O engines (sync, mmap, libaio,          |
+|              | posixaio, SG v3, splice, null, network, syslet, guasi,       |
+|              | solarisaio, and more), I/O priorities (for newer Linux       |
+|              | kernels), rate I/O, forked or threaded jobs, and much more.  |
+|              |                                                              |
+|              | (fio is not always part of a Linux distribution, hence it    |
+|              | needs to be installed. As an example see the                 |
+|              | /yardstick/tools/ directory for how to generate a Linux      |
+|              | image with fio included.)                                    |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test          | fio test is invoked in a host VM with a volume attached on a |
+|description   | compute blade, a job file as well as parameters are passed   |
+|              | to fio and fio will start doing what the job file tells it   |
+|              | to do.                                                       |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|configuration | file: opnfv_yardstick_tc006.yaml                             |
+|              |                                                              |
+|              | Fio job file is provided to define the benchmark process     |
+|              | Target volume is mounted at /FIO_Test directory              |
+|              |                                                              |
+|              | For SLA, minimum read/write iops is set to 100,              |
+|              | minimum read/write throughput is set to 400 KB/s,            |
+|              | and maximum read/write latency is set to 20000 usec.         |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|applicability | This test case can be configured with different:             |
+|              |                                                              |
+|              |   * Job file;                                                |
+|              |   * Volume mount directory.                                  |
+|              |                                                              |
+|              | SLA is optional. The SLA in this test case serves as an      |
+|              | example. Considerably higher throughput and lower latency    |
+|              | are expected. However, to cover most configurations, both    |
+|              | baremetal and fully virtualized  ones, this value should be  |
+|              | possible to achieve and acceptable for black box testing.    |
+|              | Many heavy IO applications start to suffer badly if the      |
+|              | read/write bandwidths are lower than this.                   |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|usability     | This test case is one of Yardstick's generic test. Thus it   |
+|              | is runnable on most of the scenarios.                        |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|references    | fio_                                                         |
+|              |                                                              |
+|              | ETSI-NFV-TST001                                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|pre-test      | The test case image needs to be installed into Glance        |
+|conditions    | with fio included in it.                                     |
+|              |                                                              |
+|              | No POD specific requirements have been identified.           |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 1        | A host VM with fio installed is booted.                      |
+|              | A 200G volume is attached to the host VM                     |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 2        | Yardstick is connected with the host VM by using ssh.        |
+|              | 'job_file.ini' is copyied from Jump Host to the host VM via  |
+|              | the ssh tunnel. The attached volume is formated and mounted. |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 3        | Fio benchmark is invoked. Simulated IO operations are        |
+|              | started. IOPS, disk read/write bandwidth and latency are     |
+|              | recorded and checked against the SLA. Logs are produced and  |
+|              | stored.                                                      |
+|              |                                                              |
+|              | Result: Logs are stored.                                     |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 4        | The host VM is deleted.                                      |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test verdict  | Fails only if SLA is not passed, or if there is a test case  |
+|              | execution problem.                                           |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc056.rst b/docs/testing/user/userguide/opnfv_yardstick_tc056.rst
new file mode 100644 (file)
index 0000000..01aa99a
--- /dev/null
@@ -0,0 +1,149 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Yin Kanglin and others.
+.. 14_ykl@tongji.edu.cn
+
+*************************************
+Yardstick Test Case Description TC056
+*************************************
+
++-----------------------------------------------------------------------------+
+|OpenStack Controller Messaging Queue Service High Availability               |
++==============+==============================================================+
+|test case id  | OPNFV_YARDSTICK_TC056:OpenStack Controller Messaging Queue   |
+|              | Service High Availability                                    |
++--------------+--------------------------------------------------------------+
+|test purpose  | This test case will verify the high availability of the      |
+|              | messaging queue service(RabbitMQ) that supports OpenStack on |
+|              | controller node. When messaging queue service(which is       |
+|              | active) of a specified controller node is killed, the test   |
+|              | case will check whether messaging queue services(which are   |
+|              | standby) on other controller nodes will be switched active,  |
+|              | and whether the cluster manager on attacked the controller   |
+|              | node will restart the stopped messaging queue.               |
++--------------+--------------------------------------------------------------+
+|test method   | This test case kills the processes of messaging queue        |
+|              | service on a selected controller node, then checks whether   |
+|              | the request of the related Openstack command is OK and the   |
+|              | killed processes are recovered.                              |
++--------------+--------------------------------------------------------------+
+|attackers     | In this test case, an attacker called "kill-process" is      |
+|              | needed. This attacker includes three parameters:             |
+|              | 1) fault_type: which is used for finding the attacker's      |
+|              | scripts. It should be always set to "kill-process" in this   |
+|              | test case.                                                   |
+|              | 2) process_name: which is the process name of the specified  |
+|              | OpenStack service. If there are multiple processes use the   |
+|              | same name on the host, all of them are killed by this        |
+|              | attacker.                                                    |
+|              | In this case, this parameter should always set to "rabbitmq".|
+|              | 3) host: which is the name of a control node being attacked. |
+|              |                                                              |
+|              | e.g.                                                         |
+|              | -fault_type: "kill-process"                                  |
+|              | -process_name: "rabbitmq-server"                             |
+|              | -host: node1                                                 |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|monitors      | In this test case, two kinds of monitor are needed:          |
+|              | 1. the "openstack-cmd" monitor constantly request a specific |
+|              | Openstack command, which needs two parameters:               |
+|              | 1) monitor_type: which is used for finding the monitor class |
+|              | and related scritps. It should be always set to              |
+|              | "openstack-cmd" for this monitor.                            |
+|              | 2) command_name: which is the command name used for request. |
+|              |                                                              |
+|              | 2. the "process" monitor check whether a process is running  |
+|              | on a specific node, which needs three parameters:            |
+|              | 1) monitor_type: which used for finding the monitor class    |
+|              | and related scripts. It should be always set to "process"    |
+|              | for this monitor.                                            |
+|              | 2) process_name: which is the process name for monitor       |
+|              | 3) host: which is the name of the node runing the process    |
+|              | In this case, the command_name of monitor1 should be         |
+|              | services that will use the messaging queue(current nova,     |
+|              | neutron, cinder ,heat and ceilometer are using RabbitMQ)     |
+|              | , and the process-name of monitor2 should be "rabbitmq",     |
+|              | for example:                                                 |
+|              |                                                              |
+|              | e.g.                                                         |
+|              | monitor1-1:                                                  |
+|              | -monitor_type: "openstack-cmd"                               |
+|              | -command_name: "openstack image list"                        |
+|              | monitor1-2:                                                  |
+|              | -monitor_type: "openstack-cmd"                               |
+|              | -command_name: "openstack network list"                      |
+|              | monitor1-3:                                                  |
+|              | -monitor_type: "openstack-cmd"                               |
+|              | -command_name: "openstack volume list"                       |
+|              | monitor2:                                                    |
+|              | -monitor_type: "process"                                     |
+|              | -process_name: "rabbitmq"                                    |
+|              | -host: node1                                                 |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|metrics       | In this test case, there are two metrics:                    |
+|              | 1)service_outage_time: which indicates the maximum outage    |
+|              | time (seconds) of the specified Openstack command request.   |
+|              | 2)process_recover_time: which indicates the maximum time     |
+|              | (seconds) from the process being killed to recovered         |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test tool     | Developed by the project. Please see folder:                 |
+|              | "yardstick/benchmark/scenarios/availability/ha_tools"        |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|references    | ETSI NFV REL001                                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|configuration | This test case needs two configuration files:                |
+|              | 1) test case file:opnfv_yardstick_tc056.yaml                 |
+|              | -Attackers: see above "attackers" description                |
+|              | -waiting_time: which is the time (seconds) from the process  |
+|              | being killed to stoping monitors the monitors                |
+|              | -Monitors: see above "monitors" description                  |
+|              | -SLA: see above "metrics" description                        |
+|              |                                                              |
+|              | 2)POD file: pod.yaml                                         |
+|              | The POD configuration should record on pod.yaml first.       |
+|              | the "host" item in this test case will use the node name in  |
+|              | the pod.yaml.                                                |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 1        | start monitors:                                              |
+|              | each monitor will run with independently process             |
+|              |                                                              |
+|              | Result: The monitor info will be collected.                  |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 2        | do attacker: connect the host through SSH, and then execute  |
+|              | the kill process script with param value specified by        |
+|              | "process_name"                                               |
+|              |                                                              |
+|              | Result: Process will be killed.                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 3        | stop monitors after a period of time specified by            |
+|              | "waiting_time"                                               |
+|              |                                                              |
+|              | Result: The monitor info will be aggregated.                 |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 4        | verify the SLA                                               |
+|              |                                                              |
+|              | Result: The test case is passed or not.                      |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|post-action   | It is the action when the test cases exist. It will check    |
+|              | the status of the specified process on the host, and restart |
+|              | the process if it is not running for next test cases.        |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test verdict  | Fails only if SLA is not passed, or if there is a test case  |
+|              | execution problem.                                           |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc057.rst b/docs/testing/user/userguide/opnfv_yardstick_tc057.rst
new file mode 100644 (file)
index 0000000..2a4ce40
--- /dev/null
@@ -0,0 +1,165 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Yin Kanglin and others.
+.. 14_ykl@tongji.edu.cn
+
+*************************************
+Yardstick Test Case Description TC057
+*************************************
+
++-----------------------------------------------------------------------------+
+|OpenStack Controller Cluster Management Service High Availability            |
++==============+==============================================================+
+|test case id  |                                                              |
++--------------+--------------------------------------------------------------+
+|test purpose  | This test case will verify the quorum configuration of the   |
+|              | cluster manager(pacemaker) on controller nodes. When a       |
+|              | controller node , which holds all active application         |
+|              | resources, failed to communicate with other cluster nodes    |
+|              | (via corosync), the test case will check whether the standby |
+|              | application resources will take place of those active        |
+|              | application resources which should be regarded to be down in |
+|              | the cluster manager.                                         |
++--------------+--------------------------------------------------------------+
+|test method   | This test case kills the processes of cluster messaging      |
+|              | service(corosync) on a selected controller node(the node     |
+|              | holds the active application resources), then checks whether |
+|              | active application resources are switched to other           |
+|              | controller nodes and whether the Openstack commands are OK.  |
++--------------+--------------------------------------------------------------+
+|attackers     | In this test case, an attacker called "kill-process" is      |
+|              | needed. This attacker includes three parameters:             |
+|              | 1) fault_type: which is used for finding the attacker's      |
+|              | scripts. It should be always set to "kill-process" in this   |
+|              | test case.                                                   |
+|              | 2) process_name: which is the process name of the load       |
+|              | balance service. If there are multiple processes use the     |
+|              | same name on the host, all of them are killed by this        |
+|              | attacker.                                                    |
+|              | 3) host: which is the name of a control node being attacked. |
+|              |                                                              |
+|              | In this case, this process name should set to "corosync" ,   |
+|              | for example                                                  |
+|              | -fault_type: "kill-process"                                  |
+|              | -process_name: "corosync"                                    |
+|              | -host: node1                                                 |
++--------------+--------------------------------------------------------------+
+|monitors      | In this test case, a kind of monitor is needed:              |
+|              | 1. the "openstack-cmd" monitor constantly request a specific |
+|              |    Openstack command, which needs two parameters:            |
+|              | 1) monitor_type: which is used for finding the monitor class |
+|              | and related scripts. It should be always set to              |
+|              | "openstack-cmd" for this monitor.                            |
+|              | 2) command_name: which is the command name used for request  |
+|              |                                                              |
+|              | In this case, the command_name of monitor1 should be services|
+|              | that are managed by the cluster manager. (Since rabbitmq and |
+|              | haproxy are managed by pacemaker, most Openstack Services    |
+|              | can be used to check high availability in this case)         |
+|              |                                                              |
+|              | (e.g.)                                                       |
+|              | monitor1:                                                    |
+|              | -monitor_type: "openstack-cmd"                               |
+|              | -command_name: "nova image-list"                             |
+|              | monitor2:                                                    |
+|              | -monitor_type: "openstack-cmd"                               |
+|              | -command_name: "neutron router-list"                         |
+|              | monitor3:                                                    |
+|              | -monitor_type: "openstack-cmd"                               |
+|              | -command_name: "heat stack-list"                             |
+|              | monitor4:                                                    |
+|              | -monitor_type: "openstack-cmd"                               |
+|              | -command_name: "cinder list"                                 |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|checkers      | In this test case, a checker is needed, the checker will     |
+|              | the status of application resources in pacemaker and the     |
+|              | checker have three parameters:                               |
+|              | 1) checker_type: which is used for finding the result        |
+|              | checker class and related scripts. In this case the checker  |
+|              | type will be "pacemaker-check-resource"                      |
+|              | 2) resource_name: the application resource name              |
+|              | 3) resource_status: the expected status of the resource      |
+|              | 4) expectedValue: the expected value for the output of the   |
+|              | checker script, in the case the expected value will be the   |
+|              | identifier in the cluster manager                            |
+|              | 3) condition: whether the expected value is in the output of |
+|              | checker script or is totally same with the output.           |
+|              | (note: pcs is required to installed on controller node in    |
+|              | order to run this checker)                                   |
+|              |                                                              |
+|              | (e.g.)                                                       |
+|              | checker1:                                                    |
+|              | -checker_type: "pacemaker-check-resource"                    |
+|              | -resource_name: "p_rabbitmq-server"                          |
+|              | -resource_status: "Stopped"                                  |
+|              | -expectedValue: "node-1"                                     |
+|              | -condition: "in"                                             |
+|              | checker2:                                                    |
+|              | -checker_type: "pacemaker-check-resource"                    |
+|              | -resource_name: "p_rabbitmq-server"                          |
+|              | -resource_status: "Master"                                   |
+|              | -expectedValue: "node-2"                                     |
+|              | -condition: "in"                                             |
++--------------+--------------------------------------------------------------+
+|metrics       | In this test case, there are two metrics:                    |
+|              | 1)service_outage_time: which indicates the maximum outage    |
+|              | time (seconds) of the specified Openstack command request.   |
++--------------+--------------------------------------------------------------+
+|test tool     | None. Self-developed.                                        |
++--------------+--------------------------------------------------------------+
+|references    | ETSI NFV REL001                                              |
++--------------+--------------------------------------------------------------+
+|configuration | This test case needs two configuration files:                |
+|              | 1) test case file: opnfv_yardstick_tc057.yaml                |
+|              | -Attackers: see above "attackers" description                |
+|              | -Monitors: see above "monitors" description                  |
+|              | -Checkers: see above "checkers" description                  |
+|              | -Steps: the test case execution step, see "test sequence"    |
+|              | description below                                            |
+|              |                                                              |
+|              | 2)POD file: pod.yaml                                         |
+|              | The POD configuration should record on pod.yaml first.       |
+|              | the "host" item in this test case will use the node name in  |
+|              | the pod.yaml.                                                |
++--------------+------+----------------------------------+--------------------+
+|test sequence | description and expected result                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 1        | start monitors:                                              |
+|              | each monitor will run with independently process             |
+|              |                                                              |
+|              | Result: The monitor info will be collected.                  |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 2        | do attacker: connect the host through SSH, and then execute  |
+|              | the kill process script with param value specified by        |
+|              | "process_name"                                               |
+|              |                                                              |
+|              | Result: Process will be killed.                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 3        | do checker: check whether the status of application          |
+|              | resources on different nodes are updated                     |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 4        | stop monitors after a period of time specified by            |
+|              | "waiting_time"                                               |
+|              |                                                              |
+|              | Result: The monitor info will be aggregated.                 |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 5        | verify the SLA                                               |
+|              |                                                              |
+|              | Result: The test case is passed or not.                      |
+|              |                                                              |
++--------------+------+----------------------------------+--------------------+
+|post-action   | It is the action when the test cases exist. It will check the|
+|              | status of the cluster messaging process(corosync) on the     |
+|              | host, and restart the process if it is not running for next  |
+|              | test cases                                                   |
++--------------+------+----------------------------------+--------------------+
+|test verdict  | Fails only if SLA is not passed, or if there is a test case  |
+|              | execution problem.                                           |
++--------------+--------------------------------------------------------------+
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc058.rst b/docs/testing/user/userguide/opnfv_yardstick_tc058.rst
new file mode 100644 (file)
index 0000000..fb9a4c2
--- /dev/null
@@ -0,0 +1,148 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Yin Kanglin and others.
+.. 14_ykl@tongji.edu.cn
+
+*************************************
+Yardstick Test Case Description TC058
+*************************************
+
++-----------------------------------------------------------------------------+
+|OpenStack Controller Virtual Router Service High Availability                |
++==============+==============================================================+
+|test case id  | OPNFV_YARDSTICK_TC058:OpenStack Controller Virtual Router    |
+|              | Service High Availability                                    |
++--------------+--------------------------------------------------------------+
+|test purpose  | This test case will verify the high availability of virtual  |
+|              | routers(L3 agent) on controller node. When a virtual router  |
+|              | service on a specified controller node is shut down, this    |
+|              | test case will check whether the network of virtual machines |
+|              | will be affected, and whether the attacked virtual router    |
+|              | service will be recovered.                                   |
++--------------+--------------------------------------------------------------+
+|test method   | This test case kills the processes of virtual router service |
+|              | (l3-agent) on a selected controller node(the node holds the  |
+|              | active l3-agent), then checks whether the network routing    |
+|              | of virtual machines is OK and whether the killed service     |
+|              | will be recovered.                                           |
++--------------+--------------------------------------------------------------+
+|attackers     | In this test case, an attacker called "kill-process" is      |
+|              | needed. This attacker includes three parameters:             |
+|              | 1) fault_type: which is used for finding the attacker's      |
+|              | scripts. It should be always set to "kill-process" in this   |
+|              | test case.                                                   |
+|              | 2) process_name: which is the process name of the load       |
+|              | balance service. If there are multiple processes use the     |
+|              | same name on the host, all of them are killed by this        |
+|              | attacker.                                                    |
+|              | 3) host: which is the name of a control node being attacked. |
+|              |                                                              |
+|              | In this case, this process name should set to "l3agent" ,    |
+|              | for example                                                  |
+|              | -fault_type: "kill-process"                                  |
+|              | -process_name: "l3agent"                                     |
+|              | -host: node1                                                 |
++--------------+--------------------------------------------------------------+
+|monitors      | In this test case, two kinds of monitor are needed:          |
+|              | 1. the "ip_status" monitor that pings a specific ip to check |
+|              | the connectivity of this ip, which needs two parameters:     |
+|              | 1) monitor_type: which is used for finding the monitor class |
+|              | and related scripts. It should be always set to "ip_status"  |
+|              | for this monitor.                                            |
+|              | 2) ip_address: The ip to be pinged. In this case, ip_address |
+|              | will be either an ip address of external network or an ip    |
+|              | address of a virtual machine.                                |
+|              | 3) host: The node on which ping will be executed, in this    |
+|              | case the host will be a virtual machine.                     |
+|              |                                                              |
+|              | 2. the "process" monitor check whether a process is running  |
+|              | on a specific node, which needs three parameters:            |
+|              | 1) monitor_type: which used for finding the monitor class    |
+|              | and related scripts. It should be always set to "process"    |
+|              | for this monitor.                                            |
+|              | 2) process_name: which is the process name for monitor. In   |
+|              | this case, the process-name of monitor2 should be "l3agent"  |
+|              | 3) host: which is the name of the node running the process   |
+|              |                                                              |
+|              | e.g.                                                         |
+|              | monitor1-1:                                                  |
+|              | -monitor_type: "ip_status"                                   |
+|              | -host: 172.16.0.11                                           |
+|              | -ip_address: 172.16.1.11                                     |
+|              | monitor1-2:                                                  |
+|              | -monitor_type: "ip_status"                                   |
+|              | -host: 172.16.0.11                                           |
+|              | -ip_address: 8.8.8.8                                         |
+|              | monitor2:                                                    |
+|              | -monitor_type: "process"                                     |
+|              | -process_name: "l3agent"                                     |
+|              | -host: node1                                                 |
++--------------+--------------------------------------------------------------+
+|metrics       | In this test case, there are two metrics:                    |
+|              | 1)service_outage_time: which indicates the maximum outage    |
+|              | time (seconds) of the specified Openstack command request.   |
+|              | 2)process_recover_time: which indicates the maximum time     |
+|              | (seconds) from the process being killed to recovered         |
++--------------+--------------------------------------------------------------+
+|test tool     | None. Self-developed.                                        |
++--------------+--------------------------------------------------------------+
+|references    | ETSI NFV REL001                                              |
++--------------+--------------------------------------------------------------+
+|configuration | This test case needs two configuration files:                |
+|              | 1) test case file: opnfv_yardstick_tc058.yaml                |
+|              | -Attackers: see above "attackers" description                |
+|              | -Monitors: see above "monitors" description                  |
+|              | -Steps: the test case execution step, see "test sequence"    |
+|              | description below                                            |
+|              |                                                              |
+|              | 2)POD file: pod.yaml                                         |
+|              | The POD configuration should record on pod.yaml first.       |
+|              | the "host" item in this test case will use the node name in  |
+|              | the pod.yaml.                                                |
++--------------+------+----------------------------------+--------------------+
+|test sequence | description and expected result                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|pre-test      | The test case image needs to be installed into Glance        |
+|conditions    | with cachestat included in the image.                        |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 1        | Two host VMs are booted, these two hosts are in two different|
+|              | networks, the networks are connected by a virtual router     |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 1        | start monitors:                                              |
+|              | each monitor will run with independently process             |
+|              |                                                              |
+|              | Result: The monitor info will be collected.                  |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 2        | do attacker: connect the host through SSH, and then execute  |
+|              | the kill process script with param value specified by        |
+|              | "process_name"                                               |
+|              |                                                              |
+|              | Result: Process will be killed.                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 4        | stop monitors after a period of time specified by            |
+|              | "waiting_time"                                               |
+|              |                                                              |
+|              | Result: The monitor info will be aggregated.                 |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 5        | verify the SLA                                               |
+|              |                                                              |
+|              | Result: The test case is passed or not.                      |
+|              |                                                              |
++--------------+------+----------------------------------+--------------------+
+|post-action   | It is the action when the test cases exist. It will check    |
+|              | the status of the specified process on the host, and restart |
+|              | the process if it is not running for next test cases.        |
+|              | Virtual machines and network created in the test case will   |
+|              | be destoryed.                                                |
+|              |                                                              |
++--------------+------+----------------------------------+--------------------+
+|test verdict  | Fails only if SLA is not passed, or if there is a test case  |
+|              | execution problem.                                           |
++--------------+--------------------------------------------------------------+
index 227aded..5675cc3 100644 (file)
@@ -31,3 +31,4 @@ password = root
 [nsb]
 trex_path=/opt/nsb_bin/trex/scripts
 bin_path=/opt/nsb_bin
+trex_client_lib=/opt/nsb_bin/trex_client/stl
index f6c9159..d7a7edf 100644 (file)
 'use strict';
 
 angular.module('yardStickGui2App')
-    .controller('ImageController', ['$scope', '$state', '$stateParams', 'mainFactory', 'Upload', 'toaster', '$location', '$interval',
-        function($scope, $state, $stateParams, mainFactory, Upload, toaster, $location, $interval) {
+    .controller('ImageController', ['$scope', '$state', '$stateParams', 'mainFactory', 'Upload', 'toaster', '$location', '$interval', 'ngDialog',
+        function($scope, $state, $stateParams, mainFactory, Upload, toaster, $location, $interval, ngDialog) {
 
 
             init();
-            $scope.showloading = false;
-            $scope.ifshowStatus = 0;
 
             function init() {
+                $scope.showloading = false;
+                $scope.ifshowStatus = 0;
+
+                $scope.yardstickImage = [
+                    {
+                        'name': 'yardstick-image',
+                        'description': '',
+                        'size': 'N/A',
+                        'status': 'N/A',
+                        'time': 'N/A'
+                    },
+                    {
+                        'name': 'Ubuntu-16.04',
+                        'description': '',
+                        'size': 'N/A',
+                        'status': 'N/A',
+                        'time': 'N/A'
+                    },
+                    {
+                        'name': 'cirros-0.3.5',
+                        'description': '',
+                        'size': 'N/A',
+                        'status': 'N/A',
+                        'time': 'N/A'
+                    }
+                ];
+                $scope.customImage = [];
 
 
                 $scope.uuid = $stateParams.uuid;
-                $scope.uploadImage = uploadImage;
-                getItemIdDetail();
-                getImageListSimple();
+                $scope.showloading = false;
+                $scope.url = null;
+                $scope.environmentInfo = null;
+
+                getYardstickImageList();
+                getCustomImageList(function(image, image_id){});
             }
 
-            function getItemIdDetail() {
+            function getYardstickImageList(){
+                mainFactory.ImageList().get({}).$promise.then(function(response){
+                    if(response.status == 1){
+                        angular.forEach($scope.yardstickImage, function(ele, index){
+                            if(typeof(response.result.images[ele.name]) != 'undefined'){
+                                $scope.yardstickImage[index] = response.result.images[ele.name];
+                            }
+                        });
+                    }else{
+                        mainFactory.errorHandler1(response);
+                    }
+                }, function(response){
+                    mainFactory.errorHandler2(response);
+                });
+            }
+
+            function getCustomImageList(func){
                 mainFactory.ItemDetail().get({
                     'envId': $stateParams.uuid
                 }).$promise.then(function(response) {
-                    if (response.status == 1) {
-                        $scope.baseElementInfo = response.result.environment;
-
-
-                    } else {
-                        toaster.pop({
-                            type: 'error',
-                            title: 'fail',
-                            body: response.error_msg,
-                            timeout: 3000
+                    if(response.status == 1){
+                        $scope.environmentInfo = response.result.environment;
+                        $scope.customImage = [];
+                        angular.forEach(response.result.environment.image_id, function(ele){
+                            mainFactory.getImage().get({'imageId': ele}).$promise.then(function(responseData){
+                                if(responseData.status == 1){
+                                    $scope.customImage.push(responseData.result.image);
+                                    func(responseData.result.image, ele);
+                                }else{
+                                    mainFactory.errorHandler1(responseData);
+                                }
+                            }, function(errorData){
+                                mainFactory.errorHandler2(errorData);
+                            });
                         });
+                    }else{
+                        mainFactory.errorHandler1(response);
                     }
-                }, function(error) {
-                    toaster.pop({
-                        type: 'error',
-                        title: 'fail',
-                        body: 'unknow error',
-                        timeout: 3000
-                    });
-                })
+                }, function(response){
+                    mainFactory.errorHandler2(response);
+                });
             }
 
-            function getImageListSimple() {
-
-                mainFactory.ImageList().get({}).$promise.then(function(response) {
-                    if (response.status == 1) {
-                        $scope.imageListData = response.result.images;
-                        // $scope.imageStatus = response.result.status;
-
-                    } else {
-                        toaster.pop({
-                            type: 'error',
-                            title: 'get data failed',
-                            body: 'please retry',
-                            timeout: 3000
-                        });
-                    }
-                }, function(error) {
-                    toaster.pop({
-                        type: 'error',
-                        title: 'get data failed',
-                        body: 'please retry',
-                        timeout: 3000
+            $scope.loadYardstickImage = function(image_name){
+
+                var updateImageTask = $interval(updateYardstickImage, 10000);
+
+                function updateYardstickImage(){
+                    mainFactory.ImageList().get({}).$promise.then(function(responseData){
+                        if(responseData.status == 1){
+                            if(typeof(responseData.result.images[image_name]) != 'undefined' && responseData.result.images[image_name].status == 'ACTIVE'){
+                                angular.forEach($scope.yardstickImage, function(ele, index){
+                                    if(ele.name == image_name){
+                                        $scope.yardstickImage[index] = responseData.result.images[ele.name];
+                                    }
+                                });
+                                $interval.cancel(updateImageTask);
+                            }
+                        }else{
+                            mainFactory.errorHandler1(responseData);
+                        }
+                    },function(errorData){
+                        mainFactory.errorHandler2(errorData);
                     });
-                })
-            }
+                }
 
+                mainFactory.uploadImage().post({'action': 'load_image', 'args': {'name': image_name}}).$promise.then(function(response){
+                },function(response){
+                    mainFactory.errorHandler2(response);
+                });
+            }
 
-            function getImageList() {
-                if ($scope.intervalImgae != undefined) {
-                    $interval.cancel($scope.intervalImgae);
-                }
-                mainFactory.ImageList().get({}).$promise.then(function(response) {
-                    if (response.status == 1) {
-                        $scope.imageListData = response.result.images;
-                        $scope.imageStatus = response.result.status;
-
-                        if ($scope.imageStatus == 0) {
-                            $scope.intervalImgae = $interval(function() {
-                                getImageList();
-                            }, 5000);
-                        } else if ($scope.intervalImgae != undefined) {
-                            $interval.cancel($scope.intervalImgae);
+            $scope.deleteYardstickImage = function(image_name){
+
+                var updateImageTask = $interval(updateYardstickImage, 10000);
+
+                function updateYardstickImage(){
+                    mainFactory.ImageList().get({}).$promise.then(function(response){
+                        if(response.status == 1){
+                            if(typeof(response.result.images[image_name]) == 'undefined'){
+                                angular.forEach($scope.yardstickImage, function(ele, index){
+                                    if(ele.name == image_name){
+                                        $scope.yardstickImage[index].size = 'N/A';
+                                        $scope.yardstickImage[index].status = 'N/A';
+                                        $scope.yardstickImage[index].time = 'N/A';
+                                    }
+                                });
+                                $interval.cancel(updateImageTask);
+                            }
+                        }else{
+                            mainFactory.errorHandler1(response);
                         }
+                    },function(response){
+                        mainFactory.errorHandler2(response);
+                    });
+                }
 
-                    } else {
-                        toaster.pop({
-                            type: 'error',
-                            title: 'get data failed',
-                            body: 'please retry',
-                            timeout: 3000
+                mainFactory.uploadImage().post({'action': 'delete_image', 'args': {'name': image_name}}).$promise.then(function(response){
+                },function(response){
+                    mainFactory.errorHandler2(response);
+                });
+            }
+
+            $scope.uploadCustomImageByUrl = function(url){
+                mainFactory.uploadImageByUrl().post({
+                    'action': 'upload_image_by_url',
+                    'args': {
+                        'environment_id': $stateParams.uuid,
+                        'url': url
+                    }
+                }).$promise.then(function(response){
+                    if(response.status == 1){
+                        var updateImageTask = $interval(getCustomImageList, 30000, 10, true, function(image, image_id){
+                            if(image_id == response.result.uuid && image.status == 'ACTIVE'){
+                                $interval.cancel(updateImageTask);
+                            }
                         });
+                        ngDialog.close();
+                    }else{
+                        mainFactory.errorHandler1(response);
                     }
-                }, function(error) {
-                    toaster.pop({
-                        type: 'error',
-                        title: 'get data failed',
-                        body: 'please retry',
-                        timeout: 3000
-                    });
-                })
+                }, function(response){
+                    mainFactory.errorHandler2(response);
+                });
             }
 
-            function uploadImage() {
-                $scope.imageStatus = 0;
-                $interval.cancel($scope.intervalImgae);
-                $scope.ifshowStatus = 1;
+            $scope.uploadCustomImage = function($file, $invalidFiles) {
                 $scope.showloading = true;
-                mainFactory.uploadImage().post({
-                    'action': 'load_image',
-                    'args': {
-                        'environment_id': $scope.uuid
 
-                    }
-                }).$promise.then(function(response) {
+                $scope.displayImageFile = $file;
+                Upload.upload({
+                    url: Base_URL + '/api/v2/yardstick/images',
+                    data: { file: $file, 'environment_id': $scope.uuid, 'action': 'upload_image' }
+                }).then(function(response) {
+
                     $scope.showloading = false;
-                    if (response.status == 1) {
+                    if (response.data.status == 1) {
+
                         toaster.pop({
                             type: 'success',
-                            title: 'create success',
+                            title: 'upload success',
                             body: 'you can go next step',
                             timeout: 3000
                         });
-                        setTimeout(function() {
-                            getImageList();
-                        }, 10000);
 
-                    } else {
-                        toaster.pop({
-                            type: 'error',
-                            title: 'failed',
-                            body: 'something wrong',
-                            timeout: 3000
+                        var updateImageTask = $interval(getCustomImageList, 10000, 10, true, function(image, image_id){
+                            if(image_id == response.data.result.uuid && image.status == 'ACTIVE'){
+                                $interval.cancel(updateImageTask);
+                            }
                         });
+                    }else{
+                        mainFactory.errorHandler1(response);
+                    }
 
+                }, function(response) {
+                    $scope.uploadfile = null;
+                    mainFactory.errorHandler2(response);
+                })
+            }
+
+            $scope.deleteCustomImage = function(image_id){
+                mainFactory.deleteImage().delete({'imageId': image_id}).$promise.then(function(response){
+                    if(response.status == 1){
+                        $interval(getCustomImageList, 10000, 5, true, function(image, image_id){
+                        });
+                    }else{
+                        mainFactory.errorHandler2(response);
                     }
-                }, function(error) {
-                    toaster.pop({
-                        type: 'error',
-                        title: 'failed',
-                        body: 'something wrong',
-                        timeout: 3000
-                    });
+                }, function(response){
+                    mainFactory.errorHandler2(response);
+                });
+            }
+
+            $scope.openImageDialog = function(){
+                $scope.url = null;
+                ngDialog.open({
+                    preCloseCallback: function(value) {
+                    },
+                    template: 'views/modal/imageDialog.html',
+                    scope: $scope,
+                    className: 'ngdialog-theme-default',
+                    width: 950,
+                    showClose: true,
+                    closeByDocument: false
                 })
             }
 
@@ -158,9 +243,5 @@ angular.module('yardStickGui2App')
                 $state.go('app.podUpload', { uuid: $scope.uuid });
             }
 
-
-
-
-
         }
     ]);
index ab76bf0..ceec83f 100644 (file)
@@ -15,7 +15,7 @@ angular.module('yardStickGui2App')
             $scope.showImage = null;
             $scope.showContainer = null;
             $scope.showNextOpenRc = null;
-            $scope.showNextPod = null;
+            $scope.showNextPod = 1;
             $scope.displayContainerInfo = [];
             $scope.containerList = [{ value: 'create_influxdb', name: "InfluxDB" }, { value: 'create_grafana', name: "Grafana" }]
 
@@ -51,7 +51,6 @@ angular.module('yardStickGui2App')
                 $scope.chooseResult = chooseResult;
 
                 getEnvironmentList();
-                // getImageList();
 
             }
 
@@ -85,7 +84,7 @@ angular.module('yardStickGui2App')
             }
 
             $scope.goToImage = function goToImage() {
-                getImageListSimple();
+                getImageList();
                 $scope.showImage = 1;
             }
             $scope.goToPod = function goToPod() {
@@ -290,7 +289,7 @@ angular.module('yardStickGui2App')
                 $scope.showImage = null;
                 $scope.showContainer = null;
                 $scope.showNextOpenRc = null;
-                $scope.showNextPod = null;
+                $scope.showNextPod = 1;
                 $scope.displayContainerInfo = [];
 
                 $scope.displayPodFile = null;
@@ -308,7 +307,6 @@ angular.module('yardStickGui2App')
                 ngDialog.open({
                     preCloseCallback: function(value) {
                         getEnvironmentList();
-                        // getImageList();
                     },
                     template: 'views/modal/environmentDialog.html',
                     scope: $scope,
@@ -479,106 +477,97 @@ angular.module('yardStickGui2App')
                 })
             }
 
-            $scope.uploadImage = function uploadImage() {
-                $scope.imageStatus = 0;
-                $scope.showImageStatus = 1;
-                $scope.showloading = true;
-                mainFactory.uploadImage().post({
-                    'action': 'load_image',
-                    'args': {
-                        'environment_id': $scope.uuid
+            $scope.yardstickImage = {
+                'yardstick-image': {
+                    'name': 'yardstick-image',
+                    'description': '',
+                    'status': 'N/A'
+                },
+                'Ubuntu-16.04': {
+                    'name': 'Ubuntu-16.04',
+                    'description': '',
+                    'status': 'N/A'
+                },
+                'cirros-0.3.5': {
+                    'name': 'cirros-0.3.5',
+                    'description': '',
+                    'status': 'N/A'
+                }
+            };
 
-                    }
-                }).$promise.then(function(response) {
-                    $scope.showloading = false;
-                    if (response.status == 1) {
-                        toaster.pop({
-                            type: 'success',
-                            title: 'create success',
-                            body: 'you can go next step',
-                            timeout: 3000
-                        });
-                        setTimeout(function() {
-                            getImageList();
-                        }, 10000);
-                        $scope.showNextPod = 1;
+            $scope.selectImageList = [];
 
-                    } else {
-                        toaster.pop({
-                            type: 'error',
-                            title: 'failed',
-                            body: 'something wrong',
-                            timeout: 3000
-                        });
+            $scope.selectImage = function(name){
+                $scope.selectImageList.push(name);
+            }
 
-                    }
-                }, function(error) {
-                    toaster.pop({
-                        type: 'error',
-                        title: 'failed',
-                        body: 'something wrong',
-                        timeout: 3000
-                    });
-                })
+            $scope.unselectImage = function(name){
+                var index = $scope.selectImageList.indexOf(name);
+                $scope.selectImageList.splice(index, 1);
             }
 
-            function getImageList() {
-                if ($scope.intervalImgae != undefined) {
-                    $interval.cancel($scope.intervalImgae);
-                }
-                mainFactory.ImageList().get({}).$promise.then(function(response) {
-                    if (response.status == 1) {
-                        $scope.imageListData = response.result.images;
-                        $scope.imageStatus = response.result.status;
+            $scope.uploadImage = function() {
+                $scope.imageStatus = 0;
+                $scope.showImageStatus = 1;
+                $scope.showloading = true;
 
-                        if ($scope.imageStatus == 0) {
-                            $scope.intervalImgae = $interval(function() {
-                                getImageList();
-                            }, 5000);
-                        } else if ($scope.intervalImgae != undefined) {
-                            $interval.cancel($scope.intervalImgae);
+                var updateImageTask = $interval(function(){
+                    mainFactory.ImageList().get({}).$promise.then(function(response){
+                        if(response.status == 1){
+                            var isOk = true;
+                            angular.forEach($scope.selectImageList, function(ele){
+                                if(typeof(response.result.images[ele]) != 'undefined' && response.result.images[ele].status == 'ACTIVE'){
+                                    $scope.yardstickImage[ele] = response.result.images[ele];
+                                }else{
+                                    isOk = false;
+                                }
+                            });
+                            if(isOk){
+                                $interval.cancel(updateImageTask);
+                                $scope.imageStatus = 1;
+                            }
+                        }else{
+                            mainFactory.errorHandler1(response);
                         }
-
-                    } else {
-                        toaster.pop({
-                            type: 'error',
-                            title: 'get data failed',
-                            body: 'please retry',
-                            timeout: 3000
-                        });
-                    }
-                }, function(error) {
-                    toaster.pop({
-                        type: 'error',
-                        title: 'get data failed',
-                        body: 'please retry',
-                        timeout: 3000
+                    }, function(response){
+                        mainFactory.errorHandler2(response);
                     });
-                })
+                }, 10000);
+
+                angular.forEach($scope.selectImageList, function(ele){
+                    mainFactory.uploadImage().post({
+                        'action': 'load_image',
+                        'args': {
+                            'name': ele
+                        }
+                    }).$promise.then(function(response) {
+                        if(response.status == 1){
+                            $scope.showloading = false;
+                            $scope.showNextPod = 1;
+                        }else{
+                            mainFactory.errorHandler1(response);
+                        }
+                    }, function(response) {
+                        mainFactory.errorHandler2(response);
+                    })
+                });
             }
 
-            function getImageListSimple() {
+            function getImageList() {
 
                 mainFactory.ImageList().get({}).$promise.then(function(response) {
                     if (response.status == 1) {
-                        $scope.imageListData = response.result.images;
-                        $scope.imageStatus = response.result.status;
-
-                    } else {
-                        toaster.pop({
-                            type: 'error',
-                            title: 'get data failed',
-                            body: 'please retry',
-                            timeout: 3000
+                        angular.forEach($scope.yardstickImage, function(value, key){
+                            if(typeof(response.result.images[key]) != 'undefined'){
+                                $scope.yardstickImage[key] = response.result.images[key];
+                            }
                         });
+                        $scope.imageStatus = response.result.status;
+                    }else{
+                        mainFactory.errorHandler1(response);
                     }
-                }, function(error) {
-                    toaster.pop({
-                        type: 'error',
-                        title: 'get data failed',
-                        body: 'please retry',
-                        timeout: 3000
-                    });
+                }, function(response) {
+                    mainFactory.errorHandler2(response);
                 })
             }
 
index 843f66c..e846804 100644 (file)
@@ -672,7 +672,7 @@ angular.module('yardStickGui2App')
             }
 
             $scope.gotoLog = function gotoLog(task_id) {
-                $state.go('app2.taskLog', { taskId: task_id });
+                $state.go('app.taskLog', { taskId: task_id });
             }
         }
     ]);
index 44fbeb3..7637a9f 100644 (file)
@@ -9,7 +9,7 @@ var Base_URL;
 var Grafana_URL;
 
 angular.module('yardStickGui2App')
-    .factory('mainFactory', ['$resource','$rootScope','$http', '$location',function($resource, $rootScope,$http,$location) {
+    .factory('mainFactory', ['$resource','$rootScope','$http', '$location', 'toaster',function($resource, $rootScope ,$http ,$location, toaster) {
 
         Base_URL = 'http://' + $location.host() + ':' + $location.port();
         Grafana_URL = 'http://' + $location.host();
@@ -86,6 +86,20 @@ angular.module('yardStickGui2App')
                     }
                 })
             },
+            getImage: function(){
+                return $resource(Base_URL + '/api/v2/yardstick/images/:imageId', {imageId: "@imageId"}, {
+                    'get': {
+                        method: 'GET'
+                    }
+                })
+            },
+            deleteImage: function() {
+                return $resource(Base_URL + '/api/v2/yardstick/images/:imageId', { imageId: '@imageId' }, {
+                    'delete': {
+                        method: 'DELETE'
+                    }
+                })
+            },
             uploadImage: function() {
                 return $resource(Base_URL + '/api/v2/yardstick/images', {}, {
                     'post': {
@@ -93,6 +107,13 @@ angular.module('yardStickGui2App')
                     }
                 })
             },
+            uploadImageByUrl: function() {
+                return $resource(Base_URL + '/api/v2/yardstick/images', {}, {
+                    'post': {
+                        method: 'POST'
+                    }
+                })
+            },
             getPodDetail: function() {
                 return $resource(Base_URL + '/api/v2/yardstick/pods/:podId', { podId: "@podId" }, {
                     'get': {
@@ -249,6 +270,22 @@ angular.module('yardStickGui2App')
                         method: 'DELETE'
                     }
                 })
+            },
+            errorHandler1: function(response){
+                toaster.pop({
+                    'type': 'error',
+                    'title': 'error',
+                    'body': response.result,
+                    'showCloseButton': true
+                });
+            },
+            errorHandler2: function(response){
+                toaster.pop({
+                    'type': 'error',
+                    'title': response.status,
+                    'body': response.statusText,
+                    'showCloseButton': true
+                });
             }
 
         };
index 389de83..4c539fc 100644 (file)
                     <table class="table table-striped">
 
                         <tr>
+                            <th>choose</th>
                             <th>name</th>
-                            <th>size</th>
+                            <th>description</th>
                             <th>status</th>
-                            <th>time</th>
                         </tr>
-                        <tr ng-repeat="image in imageListData">
-                            <td>{{image.name}}</td>
-                            <td>{{image.size/1024}} mb</td>
-                            <td>{{image.status}}</td>
-                            <td>{{image.time}}</td>
+                        <tr ng-repeat="(name, value) in yardstickImage">
+                            <td ng-if="selectImageList.indexOf(name) > -1"><img src="images/checkyes.png" style="height:12px;cursor:pointer" ng-click="unselectImage(name)" /></td>
+                            <td ng-if="selectImageList.indexOf(name) == -1"><img src="images/checkno.png" style="height:12px;cursor:pointer" ng-click="selectImage(name)" /></td>
+                            <td>{{name}}</td>
+                            <td>{{value.description}}</td>
+                            <td>{{value.status}}</td>
 
                         </tr>
 
diff --git a/gui/app/views/modal/imageDialog.html b/gui/app/views/modal/imageDialog.html
new file mode 100644 (file)
index 0000000..c568f2a
--- /dev/null
@@ -0,0 +1,19 @@
+<div>
+
+    <h4>Enter Remote Image Url</h4>
+    <input type="text" ng-model="url" />
+
+    <div style="text-align:center;margin-top:20px;">
+        <button class="btn btn-default" ng-disabled=" url==null || url==''" ng-click="uploadCustomImageByUrl(url)">Upload</button>
+    </div>
+
+</div>
+
+
+<style>
+    input {
+        border-radius: 10px;
+        border: 1px solid #eeeeee;
+        width: 100%;
+    }
+</style>
index 99e83ac..d6d7c0c 100644 (file)
@@ -13,7 +13,7 @@
 
             <hr/>
 
-            <button class="btn btn-default" ngf-select="uploadFiles($file, $invalidFiles)" ngf-max-size="5MB">
+            <button class="btn btn-default" ngf-select="uploadFiles($file, $invalidFiles)" ngf-max-size="1024MB">
                                     <div ng-show="!loadingOPENrc">Upload</div>
                                      <img src="images/loading2.gif" width="25" height="25" ng-if="loadingOPENrc" />
             </button>
index 17ccfdb..0c337fe 100644 (file)
@@ -4,56 +4,86 @@
     <div style="display:flex;flex-direction:row;">
         <div style="width:750px;">
 
-            <h3>{{baseElementInfo.name}} -- Image
+            <h3>{{environmentInfo.name}} -- Image
                 <button class="btn btn-default" style="float:right" ng-click="goNext()">Next</button>
             </h3>
             <!--<p>In this process, you can input your define openrc config or upload a openrc file</p>-->
 
-            <hr/>
-            <button class="btn btn-default" ng-click="uploadImage()">
-                 <div ng-if="!showloading">Load Image</div>
-                 <img src="images/loading2.gif" width="25" height="25" ng-if="showloading" />
-            </button>
-            <i class="fa fa-check" aria-hidden="true" style="margin-top:34px;margin-left:5px;color: #2ecc71;" ng-show="imageStatus==1&&ifshowStatus==1">done</i>
-            <i class="fa fa-spinner" aria-hidden="true" style="margin-top:34px;margin-left:5px;color: #2ecc71;" ng-show="imageStatus==0&&ifshowStatus==1">loading</i>
-            <i class="fa fa-exclamation-triangle" aria-hidden="true" style="margin-top:34px;margin-left:5px;color: red;" ng-show="imageStatus==2&&ifshowStatus==1">error</i>
-
             <hr>
-            <h4>Current Images</h4>
-
+            <h4>Alternative Images</h4>
             <div>
                 <table class="table table-striped">
 
                     <tr>
                         <th>name</th>
+                        <th>description</th>
                         <th>size</th>
                         <th>status</th>
                         <th>time</th>
+                        <th>action</th>
                     </tr>
-                    <tr ng-repeat="image in imageListData">
+                    <tr ng-repeat="image in yardstickImage">
                         <td>{{image.name}}</td>
-                        <td>{{image.size/1024}} MB</td>
+                        <td>{{image.description}}</td>
+                        <td>{{image.size | number:2}} MB</td>
                         <td>{{image.status}}</td>
                         <td>{{image.time}}</td>
-
+                        <td>
+                            <div class="btn-group" uib-dropdown>
+                                <button id="single-button" type="button" class="btn btn-default btn-sm" uib-dropdown-toggle>
+                                    action<span class="caret"></span>
+                                </button>
+                                <ul class="dropdown-menu" uib-dropdown-menu role="menu" aria-labelledby="single-button">
+                                    <li role="menuitem" ng-show="image.status == 'N/A'"><a ng-click="loadYardstickImage(image.name)">load</a></li>
+                                    <li role="menuitem" ng-show="image.status != 'N/A'"><a ng-click="deleteYardstickImage(image.name)">delete</a></li>
+                                </ul>
+                            </div>
+                        </td>
                     </tr>
-
-
-
                 </table>
             </div>
 
+            <hr>
+            <h4 style="display:inline">Custom Images</h4>
+            <div class="btn-group button-margin" style="float:right;margin-top:-10px;margin-bottom:5px">
+                <button class="btn btn-default" style="width:60px" ngf-select="uploadCustomImage($file, $invalidFiles)" ngf-max-size="2048MB">
+                        <div ng-show="!showloading">Local</div>
+                        <img src="images/loading2.gif" width="25" height="25" ng-if="showloading" />
+                </button>
+                <button class="btn btn-default" style="width:60px" ng-click="openImageDialog()">Url</button>
+            </div>
+            <div>
+                <table class="table table-striped">
 
-
-
-
-
-
-
+                    <tr>
+                        <th>name</th>
+                        <th>description</th>
+                        <th>size</th>
+                        <th>status</th>
+                        <th>time</th>
+                        <th>action</th>
+                    </tr>
+                    <tr ng-repeat="image in customImage">
+                        <td>{{image.name}}</td>
+                        <td>{{image.description}}</td>
+                        <td>{{image.size | number:2}} MB</td>
+                        <td>{{image.status}}</td>
+                        <td>{{image.time}}</td>
+                        <td>
+                            <div class="btn-group" uib-dropdown>
+                                <button id="single-button" type="button" class="btn btn-default btn-sm" uib-dropdown-toggle>
+                                    action<span class="caret"></span>
+                                </button>
+                                <ul class="dropdown-menu" uib-dropdown-menu role="menu" aria-labelledby="single-button">
+                                    <li role="menuitem" ><a ng-click="deleteCustomImage(image.id)">delete</a></li>
+                                </ul>
+                            </div>
+                        </td>
+                    </tr>
+                </table>
+            </div>
 
         </div>
-
-
     </div>
 
 </div>
index cc25429..c11dc10 100755 (executable)
@@ -83,7 +83,7 @@ install_trex()
 {
     TREX_DIR=$INSTALL_BIN_PATH/trex/scripts
     if [ -d "$TREX_DIR" ]; then
-        echo "Trex v2.20 already installed. Make sure it contains PYTHONPATH which is required to run TRex"
+        echo "Trex $TREX_VERSION already installed."
     else
         echo "Build TRex and installing Trex TG in $INSTALL_BIN_PATH/trex"
         rm -rf ${TREX_DOWNLOAD##*/}
@@ -99,9 +99,7 @@ install_trex()
         cd trex/scripts/ko/src/
         make
         make install
-        # workaround trex module issue
-        touch "$REPO_DIR/trex/scripts/automation/trex_control_plane/stl/__init__.py"
-        cp "$REPO_DIR/trex/scripts/dpdk_nic_bind.py" "$INSTALL_BIN_PATH"
+        ln -s $TREX_DIR/automation/trex_control_plane $INSTALL_BIN_PATH/trex_client
         popd
     fi
     echo "Done."
index cf2dbfc..85ed245 100644 (file)
@@ -22,7 +22,13 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
     traffic_type: 4
     vnf__1:
       rules: acl_1rule.yaml
index 477bd8f..1b33773 100644 (file)
@@ -22,7 +22,13 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -33,9 +39,6 @@ scenarios:
     type: Iteration
     iterations: 10
     interval: 35
-  traffic_options:
-    flow: "../../traffic_profiles/ipv4_1flow_Packets.yaml"
-    imix: "../../traffic_profiles/imix_voice.yaml"
   ixia_profile: ../../traffic_profiles/acl/acl_ipv4_profile_1flows.ixncfg
 context:
   type: Node
index 6957681..9a16466 100644 (file)
@@ -21,20 +21,24 @@ scenarios:
   nodes:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
-  tc_options:
+  options:
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
+    traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
-  vnf_options:
-    acl:
+    vnf__1:
       rules: acl_1rule.yaml
-      cfg: acl_config
+      vnf_config: {lb_config: 'SW', lb_count: 1, worker_config: '1C/1T', worker_threads: 1}
   runner:
-    type: Duration
-    duration: 400
+    type: Iteration
+    iterations: 10
     interval: 35
-  traffic_options:
-    flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
-    imix: ../../traffic_profiles/imix_voice.yaml
 context:
   type: Node
   name: yardstick
index 8d7fe3c..95fad73 100644 (file)
@@ -22,7 +22,13 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -33,9 +39,6 @@ scenarios:
     type: Iteration
     iterations: 10
     interval: 35
-  traffic_options:
-    flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
-    imix: ../../traffic_profiles/imix_voice.yaml
 context:
   type: Node
   name: yardstick
index 6b21ba5..397f352 100644 (file)
@@ -23,7 +23,13 @@ scenarios:
     vnf__1: vnf.yardstick
     tg__2: trafficgen_2.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -35,9 +41,6 @@ scenarios:
     type: Iteration
     iterations: 10
     interval: 35
-  traffic_options:
-    flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
-    imix: ../../traffic_profiles/imix_voice.yaml
 context:
   type: Node
   name: yardstick
index df7a909..1fa0b5b 100644 (file)
@@ -23,7 +23,13 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -34,9 +40,6 @@ scenarios:
     type: Iteration
     iterations: 10
     interval: 35
-  traffic_options:
-    flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
-    imix: ../../traffic_profiles/imix_voice.yaml
 {% endfor %}
 context:
   type: Node
diff --git a/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_worstcaserules_1flow_64B_packetsize.yaml b/samples/vnf_samples/nsut/acl/tc_baremetal_rfc2544_ipv4_worstcaserules_1flow_64B_packetsize.yaml
deleted file mode 100644 (file)
index 96b3f68..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright (c) 2016 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the License);
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an AS IS BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
----
-schema: yardstick:task:0.1
-scenarios:
-- type: NSPerf
-  traffic_profile: ../../traffic_profiles/ipv4_throughput.yaml
-  topology: acl-tg-topology.yaml
-  nodes:
-    tg__1: trafficgen_1.yardstick
-    vnf__1: vnf.yardstick
-  tc_options:
-    rfc2544:
-      allowed_drop_rate: 0.0001 - 0.0001
-  vnf_options:
-    acl:
-      rules: acl_worstcaserules.yaml
-      cfg: acl_config
-  runner:
-    type: Duration
-    duration: 400
-    interval: 35
-  traffic_options:
-    flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
-    imix: ../../traffic_profiles/imix_voice.yaml
-context:
-  type: Node
-  name: yardstick
-  nfvi_type: baremetal
-  file: /etc/yardstick/nodes/pod.yaml
index 8d7fe3c..95fad73 100644 (file)
@@ -22,7 +22,13 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -33,9 +39,6 @@ scenarios:
     type: Iteration
     iterations: 10
     interval: 35
-  traffic_options:
-    flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
-    imix: ../../traffic_profiles/imix_voice.yaml
 context:
   type: Node
   name: yardstick
index ab688a2..3ba22ff 100644 (file)
@@ -21,23 +21,24 @@ scenarios:
   nodes:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
-  tc_options:
+  options:
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
+    traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
-    latency: true
-  vnf_options:
-    acl:
+    vnf__1:
       rules: acl_1rule.yaml
-      cfg: acl_config
-  options:
-    packetsize: 64
+      vnf_config: {lb_config: 'SW', lb_count: 1, worker_config: '1C/1T', worker_threads: 1}
   runner:
     type: Iteration
     iterations: 28
     interval: 35
-  traffic_options:
-    flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
-    imix: ../../traffic_profiles/imix_voice.yaml
 context:
   type: Node
   name: yardstick
@@ -22,7 +22,13 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -33,23 +39,18 @@ scenarios:
     type: Iteration
     iterations: 10
     interval: 35
-  traffic_options:
-    flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
-    imix: ../../traffic_profiles/imix_voice.yaml
 context:
   # put node context first, so we don't HEAT deploy if node has errors
   name: yardstick
-  flavor: yardstick-dpdk-flavor
-#  flavor:
-#    name: yardstick-dpdk-flavor
-#    vcpus: 10
-#    ram: 20480
-#    disk: 4
-#    extra_specs:
-#      hw:cpu_sockets: 1
-#      hw:cpu_cores: 10
-#      hw:cpu_threads: 1
-#  #      hw:mem_page_size: large
+  image: yardstick-samplevnfs
+  flavor:
+    vcpus: 10
+    ram: 20480
+    disk: 4
+    extra_specs:
+      hw:cpu_sockets: 1
+      hw:cpu_cores: 10
+      hw:cpu_threads: 1
   user: ubuntu
   placement_groups:
     pgrp1:
@@ -58,25 +59,24 @@ context:
     vnf:
       floating_ip: true
       placement: "pgrp1"
-      image: yardstick-vnfs
     trafficgen_1:
       floating_ip: true
       placement: "pgrp1"
-      image: yardstick-trex
   networks:
     mgmt:
       cidr: '10.0.1.0/24'
-      external_network: "yardstick-public"
     xe0:
       cidr: '10.0.2.0/24'
-      vld_id: public
+      vld_id: public_1
+      gateway_ip: 'null'
 #      port_security_enabled: False
       allowed_address_pairs:
         - ip_address:
             '0.0.0.0/0'
     xe1:
       cidr: '10.0.3.0/24'
-      vld_id: private
+      vld_id: private_1
+      gateway_ip: 'null'
 #      port_security_enabled: False
       allowed_address_pairs:
         - ip_address:
index 3344a1d..998a126 100644 (file)
@@ -21,38 +21,39 @@ scenarios:
   nodes:
     tg__1: trafficgen_1.baremetal
     vnf__1: vnf.yardstick
-  tc_options:
+  options:
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
+    traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
-  vnf_options:
-    acl:
+    vnf__1:
       rules: acl_1rule.yaml
-      cfg: acl_config
+      vnf_config: {lb_config: 'SW', lb_count: 1, worker_config: '1C/1T', worker_threads: 1}
   runner:
-    type: Duration
-    duration: 400
+    type: Iteration
+    iterations: 10
     interval: 35
-  traffic_options:
-    flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
-    imix: ../../traffic_profiles/imix_voice.yaml
 contexts:
   # put node context first, so we don't HEAT deploy if node has errors
   - name: baremetal
     type: Node
     file: trex-baremetal.yml
   - name: yardstick
-    image: yardstick-acl
-    flavor: yardstick-flavor
-#    flavor:
-#  #    name: yardstick-dpdk-flavor
-#      vcpus: 6
-#      ram: 20480
-#      disk: 4
-#      extra_specs:
-#        hw:cpu_sockets: 1
-#        hw:cpu_cores: 6
-#        hw:cpu_threads: 1
-#  #      hw:mem_page_size: large
+    image: yardstick-samplevnfs
+    flavor:
+      vcpus: 10
+      ram: 20480
+      disk: 4
+      extra_specs:
+        hw:cpu_sockets: 1
+        hw:cpu_cores: 10
+        hw:cpu_threads: 1
     user: ubuntu
     placement_groups:
       pgrp1:
@@ -64,11 +65,17 @@ contexts:
     networks:
       mgmt:
         cidr: '10.0.1.0/24'
-        external_network: "yardstick-public"
       xe0:
         cidr: '10.0.2.0/24'
-        vld_id: public
+        vld_id: public_1
+        gateway_ip: 'null'
+        provider: true
+        physical_network: phystenant1
+        port_security_enabled: False
       xe1:
         cidr: '10.0.3.0/24'
-        vld_id: private
-
+        vld_id: private_1
+        gateway_ip: 'null'
+        provider: true
+        physical_network: phystenant2
+        port_security_enabled: False
index 7e9a589..fc1c3e9 100644 (file)
@@ -22,7 +22,9 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
index 5203e8d..6160ca0 100644 (file)
@@ -22,7 +22,14 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      publicip: ["152.16.40.10"]
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -32,9 +39,6 @@ scenarios:
     type: Iteration
     iterations: 10
     interval: 35
-  traffic_options:
-    flow: "../../traffic_profiles/ipv4_1flow_Packets.yaml"
-    imix: "../../traffic_profiles/imix_voice.yaml"
   ixia_profile: ../../traffic_profiles/cgnapt/cgnat_ipv4_profile_1flows.ixncfg
 context:
   type: Node
index feeacf5..15365b0 100644 (file)
@@ -22,7 +22,14 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      publicip: ["152.16.40.10"]
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -32,9 +39,6 @@ scenarios:
     type: Iteration
     iterations: 10
     interval: 35
-  traffic_options:
-    flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
-    imix: ../../traffic_profiles/imix_voice.yaml
 context:
   type: Node
   name: yardstick
index b5548d5..c1f5f21 100644 (file)
@@ -23,7 +23,13 @@ scenarios:
     vnf__1: vnf.yardstick
     tg__2: trafficgen_2.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__2': 'xe0'}
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -35,9 +41,6 @@ scenarios:
     type: Iteration
     iterations: 10
     interval: 35
-  traffic_options:
-    flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
-    imix: ../../traffic_profiles/imix_voice.yaml
 context:
   type: Node
   name: yardstick
index 7d746f0..1bf7df8 100644 (file)
@@ -23,7 +23,14 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      publicip: ["152.16.40.10"]
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -33,9 +40,6 @@ scenarios:
     type: Iteration
     iterations: 10
     interval: 35
-  traffic_options:
-    flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
-    imix: ../../traffic_profiles/imix_voice.yaml
 {% endfor %}
 context:
   type: Node
index 16d0d08..e8cac4a 100644 (file)
@@ -22,7 +22,14 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      publicip: ["152.16.40.10"]
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -33,9 +40,6 @@ scenarios:
     type: Iteration
     iterations: 28
     interval: 35
-  traffic_options:
-    flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
-    imix: ../../traffic_profiles/imix_voice.yaml
 context:
   type: Node
   name: yardstick
diff --git a/samples/vnf_samples/nsut/cgnapt/tc_heat_external_rfc2544_ipv4_1flow_64B_trex.yaml b/samples/vnf_samples/nsut/cgnapt/tc_heat_external_rfc2544_ipv4_1flow_64B_trex.yaml
new file mode 100644 (file)
index 0000000..0ad7898
--- /dev/null
@@ -0,0 +1,80 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+schema: yardstick:task:0.1
+scenarios:
+- type: NSPerf
+  traffic_profile: ../../traffic_profiles/ipv4_throughput_cgnapt.yaml
+  topology: cgnapt-vnf-topology.yaml
+  nodes:
+    tg__1: trafficgen_1.baremetal
+    vnf__1: vnf.yardstick
+  options:
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
+    traffic_type: 4
+    rfc2544:
+      allowed_drop_rate: 0.0001 - 0.0001
+    vnf__1:
+      vnf_config: {lb_config: 'SW', lb_count: 1, worker_config: '1C/1T', worker_threads: 1}
+  runner:
+    type: Iteration
+    iterations: 10
+    interval: 35
+contexts:
+  # put node context first, so we don't HEAT deploy if node has errors
+  - name: baremetal
+    type: Node
+    file: trex-baremetal.yml
+  - name: yardstick
+    image: yardstick-samplevnfs
+    flavor:
+      vcpus: 10
+      ram: 20480
+      disk: 4
+      extra_specs:
+        hw:cpu_sockets: 1
+        hw:cpu_cores: 10
+        hw:cpu_threads: 1
+    user: ubuntu
+    placement_groups:
+      pgrp1:
+        policy: "availability"
+    servers:
+      vnf:
+        floating_ip: true
+        placement: "pgrp1"
+    networks:
+      mgmt:
+        cidr: '10.0.1.0/24'
+      xe0:
+        cidr: '10.0.2.0/24'
+        vld_id: public_1
+        gateway_ip: 'null'
+        provider: true
+        physical_network: phystenant1
+        port_security_enabled: False
+      xe1:
+        cidr: '10.0.3.0/24'
+        vld_id: private_1
+        gateway_ip: 'null'
+        provider: true
+        physical_network: phystenant2
+        port_security_enabled: False
diff --git a/samples/vnf_samples/nsut/cgnapt/tc_heat_rfc2544_ipv4_1flow_64B_trex.yaml b/samples/vnf_samples/nsut/cgnapt/tc_heat_rfc2544_ipv4_1flow_64B_trex.yaml
new file mode 100644 (file)
index 0000000..516c727
--- /dev/null
@@ -0,0 +1,83 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+schema: yardstick:task:0.1
+scenarios:
+- type: NSPerf
+  traffic_profile: ../../traffic_profiles/ipv4_throughput_cgnapt.yaml
+  topology: cgnapt-vnf-topology.yaml
+  nodes:
+    tg__1: trafficgen_1.yardstick
+    vnf__1: vnf.yardstick
+  options:
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
+    traffic_type: 4
+    rfc2544:
+      allowed_drop_rate: 0.0001 - 0.0001
+    vnf__1:
+      vnf_config: {lb_config: 'SW', lb_count: 1, worker_config: '1C/1T', worker_threads: 1}
+  runner:
+    type: Iteration
+    iterations: 10
+    interval: 35
+context:
+  # put node context first, so we don't HEAT deploy if node has errors
+  name: yardstick
+  image: yardstick-samplevnfs
+  flavor:
+    vcpus: 10
+    ram: 20480
+    disk: 4
+    extra_specs:
+      hw:cpu_sockets: 1
+      hw:cpu_cores: 10
+      hw:cpu_threads: 1
+  user: ubuntu
+  placement_groups:
+    pgrp1:
+      policy: "availability"
+  servers:
+    vnf:
+      floating_ip: true
+      placement: "pgrp1"
+    trafficgen_1:
+      floating_ip: true
+      placement: "pgrp1"
+  networks:
+    mgmt:
+      cidr: '10.0.1.0/24'
+    xe0:
+      cidr: '10.0.2.0/24'
+      vld_id: public_1
+      gateway_ip: 'null'
+#      port_security_enabled: False
+      allowed_address_pairs:
+        - ip_address:
+            '0.0.0.0/0'
+    xe1:
+      cidr: '10.0.3.0/24'
+      vld_id: private_1
+      gateway_ip: 'null'
+#      port_security_enabled: False
+      allowed_address_pairs:
+        - ip_address:
+            '0.0.0.0/0'
+
index a2b73b6..e80e1fb 100644 (file)
@@ -22,7 +22,13 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -30,9 +36,6 @@ scenarios:
     type: Iteration
     iterations: 10
     interval: 35
-  traffic_options:
-    flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
-    imix: ../../traffic_profiles/imix_voice.yaml
 context:
   type: Node
   name: yardstick
index b184a29..6753645 100644 (file)
@@ -20,7 +20,7 @@ access-list1:
           match-counter: 0
         actions: drop,count
         matches:
-          destination-ipv4-network: 152.16.40.20/24
+          destination-ipv4-network: 152.16.0.0/24
           destination-port-range:
             lower-port: 0
             upper-port: 65535
@@ -38,7 +38,7 @@ access-list1:
           destination-port-range:
             lower-port: 0
             upper-port: 65535
-          source-ipv4-network: 152.16.100.20/24
+          source-ipv4-network: 152.16.0.0/24
           source-port-range:
             lower-port: 0
             upper-port: 65535
index 1347226..e0bd82a 100644 (file)
@@ -22,7 +22,9 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -37,4 +39,4 @@ context:
   type: Node
   name: yardstick
   nfvi_type: baremetal
-  file: /etc/yardstick/nodes/pod.yaml
+  file: /etc/yardstick/nodes/pod_ixia.yaml
index 71a803d..e07f5f9 100644 (file)
@@ -22,7 +22,13 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -33,9 +39,6 @@ scenarios:
     type: Iteration
     iterations: 10
     interval: 35
-  traffic_options:
-    flow: "../../traffic_profiles/ipv4_1flow_Packets.yaml"
-    imix: "../../traffic_profiles/imix_voice.yaml"
   ixia_profile: ../../traffic_profiles/vfw/vfw_ipv4_profile_1flows.ixncfg
 context:
   type: Node
index 3a17aba..53d4d3d 100644 (file)
@@ -22,7 +22,13 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -33,9 +39,6 @@ scenarios:
     type: Iteration
     iterations: 10
     interval: 35
-  traffic_options:
-    flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
-    imix: ../../traffic_profiles/imix_voice.yaml
 context:
   type: Node
   name: yardstick
index a92a91e..562575b 100644 (file)
@@ -21,8 +21,15 @@ scenarios:
   nodes:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
+    tg__2: trafficgen_2.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__2': 'xe0'}]
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -34,11 +41,8 @@ scenarios:
     type: Iteration
     iterations: 10
     interval: 35
-  traffic_options:
-    flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
-    imix: ../../traffic_profiles/imix_voice.yaml
 context:
   type: Node
   name: yardstick
   nfvi_type: baremetal
-  file: /etc/yardstick/nodes/pod.yaml
+  file: /etc/yardstick/nodes/pod_3node.yaml
index ab2791c..db86221 100644 (file)
@@ -23,7 +23,13 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -34,9 +40,6 @@ scenarios:
     type: Iteration
     iterations: 10
     interval: 35
-  traffic_options:
-    flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
-    imix: ../../traffic_profiles/imix_voice.yaml
 {% endfor %}
 context:
   type: Node
index 1e63300..a138145 100644 (file)
@@ -22,7 +22,13 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -34,9 +40,6 @@ scenarios:
     type: Iteration
     iterations: 28
     interval: 35
-  traffic_options:
-    flow: ../../traffic_profiles/ipv4_1flow_Packets.yaml
-    imix: ../../traffic_profiles/imix_voice.yaml
 context:
   type: Node
   name: yardstick
diff --git a/samples/vnf_samples/nsut/vfw/tc_heat_external_rfc2544_ipv4_1rule_1flow_64B_trex.yaml b/samples/vnf_samples/nsut/vfw/tc_heat_external_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
new file mode 100644 (file)
index 0000000..3e323d9
--- /dev/null
@@ -0,0 +1,81 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+schema: yardstick:task:0.1
+scenarios:
+- type: NSPerf
+  traffic_profile: ../../traffic_profiles/ipv4_throughput.yaml
+  topology: vfw-tg-topology.yaml
+  nodes:
+    tg__1: trafficgen_1.baremetal
+    vnf__1: vnf.yardstick
+  options:
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
+    traffic_type: 4
+    rfc2544:
+      allowed_drop_rate: 0.0001 - 0.0001
+    vnf__1:
+      rules: acl_1rule.yaml
+      vnf_config: {lb_config: 'SW', lb_count: 1, worker_config: '1C/1T', worker_threads: 1}
+  runner:
+    type: Iteration
+    iterations: 10
+    interval: 35
+contexts:
+  # put node context first, so we don't HEAT deploy if node has errors
+  - name: baremetal
+    type: Node
+    file: trex-baremetal.yml
+  - name: yardstick
+    image: yardstick-samplevnfs
+    flavor:
+      vcpus: 10
+      ram: 20480
+      disk: 4
+      extra_specs:
+        hw:cpu_sockets: 1
+        hw:cpu_cores: 10
+        hw:cpu_threads: 1
+    user: ubuntu
+    placement_groups:
+      pgrp1:
+        policy: "availability"
+    servers:
+      vnf:
+        floating_ip: true
+        placement: "pgrp1"
+    networks:
+      mgmt:
+        cidr: '10.0.1.0/24'
+      xe0:
+        cidr: '10.0.2.0/24'
+        vld_id: public_1
+        gateway_ip: 'null'
+        provider: true
+        physical_network: phystenant1
+        port_security_enabled: False
+      xe1:
+        cidr: '10.0.3.0/24'
+        vld_id: private_1
+        gateway_ip: 'null'
+        provider: true
+        physical_network: phystenant2
+        port_security_enabled: False
diff --git a/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex.yaml b/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex.yaml
new file mode 100644 (file)
index 0000000..82e89a2
--- /dev/null
@@ -0,0 +1,84 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+schema: yardstick:task:0.1
+scenarios:
+- type: NSPerf
+  traffic_profile: ../../traffic_profiles/ipv4_throughput.yaml
+  topology: vfw-tg-topology.yaml
+  nodes:
+    tg__1: trafficgen_1.yardstick
+    vnf__1: vnf.yardstick
+  options:
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
+    traffic_type: 4
+    rfc2544:
+      allowed_drop_rate: 0.0001 - 0.0001
+    vnf__1:
+      rules: acl_1rule.yaml
+      vnf_config: {lb_config: 'SW', lb_count: 1, worker_config: '1C/1T', worker_threads: 1}
+  runner:
+    type: Iteration
+    iterations: 10
+    interval: 35
+context:
+  # put node context first, so we don't HEAT deploy if node has errors
+  name: yardstick
+  image: yardstick-samplevnfs
+  flavor:
+    vcpus: 10
+    ram: 20480
+    disk: 4
+    extra_specs:
+      hw:cpu_sockets: 1
+      hw:cpu_cores: 10
+      hw:cpu_threads: 1
+  user: ubuntu
+  placement_groups:
+    pgrp1:
+      policy: "availability"
+  servers:
+    vnf:
+      floating_ip: true
+      placement: "pgrp1"
+    trafficgen_1:
+      floating_ip: true
+      placement: "pgrp1"
+  networks:
+    mgmt:
+      cidr: '10.0.1.0/24'
+    xe0:
+      cidr: '10.0.2.0/24'
+      vld_id: public_1
+      gateway_ip: 'null'
+#      port_security_enabled: False
+      allowed_address_pairs:
+        - ip_address:
+            '0.0.0.0/0'
+    xe1:
+      cidr: '10.0.3.0/24'
+      vld_id: private_1
+      gateway_ip: 'null'
+#      port_security_enabled: False
+      allowed_address_pairs:
+        - ip_address:
+            '0.0.0.0/0'
+
index 7b25139..16996cb 100644 (file)
@@ -22,16 +22,19 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
     traffic_type: 4
     vnf__1:
       cfg: vpe_config
   runner:
     type: Duration
     duration: 4
-  traffic_options:
-    flow: "../../traffic_profiles/ipv4_1flow_Packets_vpe.yaml"
-    imix: "../../traffic_profiles/imix_voice.yaml"
   ixia_profile: ../../traffic_profiles/vpe/HTTP-vPE_IPv4_2Ports.rxf # Need vlan update
 context:
   type: Node
index 4652a62..02346d2 100644 (file)
@@ -22,7 +22,13 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {1518B: 100}
+      public: {1518B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -32,9 +38,6 @@ scenarios:
     type: Iteration
     iterations: 10
     interval: 35
-  traffic_options:
-    flow: "../../traffic_profiles/ipv4_1flow_Packets_vpe.yaml"
-    imix: "../../traffic_profiles/imix_storage.yaml"
 context:
   type: Node
   name: yardstick
index bd64a45..a50ba38 100644 (file)
@@ -22,7 +22,13 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -32,9 +38,6 @@ scenarios:
     type: Iteration
     iterations: 10
     interval: 35
-  traffic_options:
-    flow: "../../traffic_profiles/ipv4_1flow_Packets_vpe.yaml"
-    imix: "../../traffic_profiles/imix_voice.yaml"
 context:
   type: Node
   name: yardstick
index 0257886..e0a7493 100644 (file)
@@ -22,7 +22,13 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -32,9 +38,6 @@ scenarios:
     type: Iteration
     iterations: 10
     interval: 35
-  traffic_options:
-    flow: "../../traffic_profiles/ipv4_1flow_Packets_vpe.yaml"
-    imix: "../../traffic_profiles/imix_voice.yaml"
   ixia_profile: ../../traffic_profiles/vpe/vpe_ipv4_profile_1flows.ixncfg
 context:
   type: Node
index 2c48d0e..57c5128 100644 (file)
@@ -21,8 +21,15 @@ scenarios:
   nodes:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
+    tg__2: trafficgen_2.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 100}
+      public: {64B: 100}
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__2': 'xe0'}]
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -33,9 +40,6 @@ scenarios:
     type: Iteration
     iterations: 10
     interval: 35
-  traffic_options:
-    flow: "../../traffic_profiles/ipv4_1flow_Packets_vpe.yaml"
-    imix: "../../traffic_profiles/imix_voice.yaml"
 context:
   type: Node
   name: yardstick
index 674fa95..6b78574 100644 (file)
@@ -22,7 +22,14 @@ scenarios:
     tg__1: trafficgen_1.yardstick
     vnf__1: vnf.yardstick
   options:
-    packetsize: 64
+    framesize:
+      private: {64B: 5, 128B: 11, 256B: 16, 373B: 10, 570B: 35, 1400B: 10, 1500B: 13}
+      public: {64B: 5, 128B: 3, 256B: 4, 373B: 6, 570B: 8, 1400B: 36, 1500B: 38}
+
+    flow:
+      src_ip: [{'tg__1': 'xe0'}]
+      dst_ip: [{'tg__1': 'xe1'}]
+      count: 1
     traffic_type: 4
     rfc2544:
       allowed_drop_rate: 0.0001 - 0.0001
@@ -32,9 +39,6 @@ scenarios:
     type: Iteration
     iterations: 10
     interval: 35
-  traffic_options:
-    flow: "../../traffic_profiles/ipv4_1flow_Packets_vpe.yaml"
-    imix: "../../traffic_profiles/imix_video.yaml"
 context:
   type: Node
   name: yardstick
diff --git a/samples/vnf_samples/traffic_profiles/imix_storage.yaml b/samples/vnf_samples/traffic_profiles/imix_storage.yaml
deleted file mode 100644 (file)
index 8fd10ec..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2016-2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#imix definition for storage traffic
-#
-# it is a typical case for testing the synthetic VNF performance.
-#
-#percentage of the packets can be less than 100%
-#the traffic in downstream and upstream direction could be different
-
-schema: "nsb:imix:0.1"
-
-imix:
-  private:
-    imix_small: 0    #ipv4 case - 72B should be 0 ipv6 case - 84B
-    imix_128B: 0
-    imix_256B: 0
-    imix_373B: 0
-    imix_570B: 0
-    imix_1400B: 0
-    imix_1500B: 100
-
-  public:
-    imix_small: 0    #ipv4 case - 72B ipv6 - 84B
-    imix_128B: 0
-    imix_256B: 0
-    imix_373B: 0
-    imix_570B: 0
-    imix_1400B: 0
-    imix_1500B: 100
diff --git a/samples/vnf_samples/traffic_profiles/imix_video.yaml b/samples/vnf_samples/traffic_profiles/imix_video.yaml
deleted file mode 100644 (file)
index 36324bf..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright (c) 2016-2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#imix definition for video traffic
-#
-# this is a real traffic profile when video/data traffic only is present
-#
-#percentage of the packets can be less than 100%
-#the traffic in downstream and upstream direction could be different
-#
-#small means ipv4 case - 72B should be 0, ipv6 case - 84B
-
-schema: "nsb:imix:0.1"
-
-imix:
-  private:
-    imix_small: 5    #ipv4 case - 72B should be 0 ipv6 case - 84B
-    imix_128B: 11
-    imix_256B: 16
-    imix_373B: 10
-    imix_570B: 35
-    imix_1400B: 10
-    imix_1500B: 13
-
-  public:
-    imix_small: 5    #ipv4 case - 72B ipv6 - 84B
-    imix_128B: 3
-    imix_256B: 4
-    imix_373B: 6
-    imix_570B: 8
-    imix_1400B: 36
-    imix_1500B: 38
diff --git a/samples/vnf_samples/traffic_profiles/imix_voice.yaml b/samples/vnf_samples/traffic_profiles/imix_voice.yaml
deleted file mode 100644 (file)
index b8f8e53..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2016-2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#imix definition for voice traffic
-#
-# it is a typical case for testing the synthetic VNF performance.
-#
-#percentage of the packets can be less than 100%
-#the traffic in downstream and upstream direction could be different
-
-schema: "nsb:imix:0.1"
-
-imix:
-  private:
-    imix_small: 100    #ipv4 case - 72B should be 0 ipv6 case - 84B
-    imix_128B: 0
-    imix_256B: 0
-    imix_373B: 0
-    imix_570B: 0
-    imix_1400B: 0
-    imix_1500B: 0
-
-  public:
-    imix_small: 100    #ipv4 case - 72B ipv6 - 84B
-    imix_128B: 0
-    imix_256B: 0
-    imix_373B: 0
-    imix_570B: 0
-    imix_1400B: 0
-    imix_1500B: 0
diff --git a/samples/vnf_samples/traffic_profiles/ipv4_1flow_Packets_vpe.yaml b/samples/vnf_samples/traffic_profiles/ipv4_1flow_Packets_vpe.yaml
deleted file mode 100644 (file)
index 8bb913e..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright (c) 2016-2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-flow:
-    srcip4_range_1: '152.16.0.20'
-    dstip4_range_1: '152.40.0.20'
-    srcip4_range_2: '172.16.0.20'
-    dstip4_range_2: '172.40.0.20'
-    count: 1
index 2854826..98624b1 100644 (file)
@@ -49,41 +49,49 @@ private_1:
       ipv4:
         outer_l2:
             framesize:
-                64B: "{{ get(imix, 'imix.private.imix_small', '0') }}"
-                128B: "{{ get(imix, 'imix.private.imix_128B', '0') }}"
-                256B: "{{ get(imix, 'imix.private.imix_256B', '0') }}"
-                373b: "{{ get(imix, 'imix.private.imix_373B', '0') }}"
-                570B: "{{get(imix, 'imix.private.imix_570B', '0') }}"
-                1400B: "{{get(imix, 'imix.private.imix_1400B', '0') }}"
-                1518B: "{{get(imix, 'imix.private.imix_1500B', '0') }}"
+                64B: "{{ get(imix, 'imix.private.64B', '0') }}"
+                128B: "{{ get(imix, 'imix.private.128B', '0') }}"
+                256B: "{{ get(imix, 'imix.private.256B', '0') }}"
+                373b: "{{ get(imix, 'imix.private.373B', '0') }}"
+                512B: "{{ get(imix, 'imix.private.512B', '0') }}"
+                570B: "{{get(imix, 'imix.private.570B', '0') }}"
+                1400B: "{{get(imix, 'imix.private.1400B', '0') }}"
+                1500B: "{{get(imix, 'imix.private.1500B', '0') }}"
+                1518B: "{{get(imix, 'imix.private.1518B', '0') }}"
 
         outer_l3v4:
             proto: "udp"
-            srcip4: "{{get(flow, 'flow.srcip4_range', '1.1.1.1-1.1.255.255') }}"
-            dstip4: "{{get(flow, 'flow.dstip4_range', '90.90.1.1-90.90.255.255') }}"
+            srcip4: "{{get(flow, 'flow.src_ip0', '1.1.1.1-1.1.255.255') }}"
+            dstip4: "{{get(flow, 'flow.dst_ip0', '90.90.1.1-90.90.255.255') }}"
+            count: "{{get(flow, 'flow.count', '1') }}"
             ttl: 32
             dscp: 0
         outer_l4:
-            srcport: "{{get(flow, 'flow.srcport_range', '1234') }}"
-            dstport: "{{get(flow, 'flow.dstport_range', '2001') }}"
+            srcport: "{{get(flow, 'flow.src_port0', '1234-4321') }}"
+            dstport: "{{get(flow, 'flow.dst_port0', '2001-4001') }}"
+            count: "{{get(flow, 'flow.count', '1') }}"
 public_1:
       ipv4:
         outer_l2:
             framesize:
-                64B: "{{ get(imix, 'imix.private.imix_small', '0') }}"
-                128B: "{{ get(imix, 'imix.private.imix_128B', '0') }}"
-                256B: "{{ get(imix, 'imix.private.imix_256B', '0') }}"
-                373b: "{{ get(imix, 'imix.private.imix_373B', '0') }}"
-                570B: "{{get(imix, 'imix.private.imix_570B', '0') }}"
-                1400B: "{{get(imix, 'imix.private.imix_1400B', '0') }}"
-                1518B: "{{get(imix, 'imix.private.imix_1500B', '0') }}"
+                64B: "{{ get(imix, 'imix.public.64B', '0') }}"
+                128B: "{{ get(imix, 'imix.public.128B', '0') }}"
+                256B: "{{ get(imix, 'imix.public.256B', '0') }}"
+                373b: "{{ get(imix, 'imix.public.373B', '0') }}"
+                512B: "{{ get(imix, 'imix.public.512B', '0') }}"
+                570B: "{{get(imix, 'imix.public.570B', '0') }}"
+                1400B: "{{get(imix, 'imix.public.1400B', '0') }}"
+                1500B: "{{get(imix, 'imix.public.1500B', '0') }}"
+                1518B: "{{get(imix, 'imix.public.1518B', '0') }}"
 
         outer_l3v4:
             proto: "udp"
-            srcip4: "{{get(flow, 'flow.dstip4_range', '90.90.1.1-90.90.255.255') }}"
-            dstip4: "{{get(flow, 'flow.srcip4_range', '1.1.1.1-1.1.255.255') }}"
+            srcip4: "{{get(flow, 'flow.dst_ip0', '90.90.1.1-90.90.255.255') }}"
+            dstip4: "{{get(flow, 'flow.src_ip0', '1.1.1.1-1.1.255.255') }}"
+            count: "{{get(flow, 'flow.count', '1') }}"
             ttl: 32
             dscp: 0
         outer_l4:
-            srcport: "{{get(flow, 'flow.dstport_range', '1234') }}"
-            dstport: "{{get(flow, 'flow.srcport_range', '2001') }}"
+            srcport: "{{get(flow, 'flow.dst_port0', '1234-4321') }}"
+            dstport: "{{get(flow, 'flow.src_port0', '2001-4001') }}"
+            count: "{{get(flow, 'flow.count', '1') }}"
index 2ea8d3c..7283b63 100644 (file)
@@ -49,41 +49,49 @@ private_1:
       ipv4:
         outer_l2:
             framesize:
-                64B: "{{ get(imix, 'imix.private.imix_small', '0') }}"
-                128B: "{{ get(imix, 'imix.private.imix_128B', '0') }}"
-                256B: "{{ get(imix, 'imix.private.imix_256B', '0') }}"
-                373b: "{{ get(imix, 'imix.private.imix_373B', '0') }}"
-                570B: "{{get(imix, 'imix.private.imix_570B', '0') }}"
-                1400B: "{{get(imix, 'imix.private.imix_1400B', '0') }}"
-                1518B: "{{get(imix, 'imix.private.imix_1500B', '0') }}"
+                64B: "{{ get(imix, 'imix.private.64B', '0') }}"
+                128B: "{{ get(imix, 'imix.private.128B', '0') }}"
+                256B: "{{ get(imix, 'imix.private.256B', '0') }}"
+                373b: "{{ get(imix, 'imix.private.373B', '0') }}"
+                512B: "{{ get(imix, 'imix.private.512B', '0') }}"
+                570B: "{{get(imix, 'imix.private.570B', '0') }}"
+                1400B: "{{get(imix, 'imix.private.1400B', '0') }}"
+                1500B: "{{get(imix, 'imix.private.1500B', '0') }}"
+                1518B: "{{get(imix, 'imix.private.1518B', '0') }}"
 
         outer_l3v4:
             proto: "udp"
-            srcip4: "{{get(flow, 'flow.srcip4_range', '10.0.2.1-10.0.2.255') }}"
-            dstip4: "{{get(flow, 'flow.dstip4_range', '10.0.3.1-10.0.3.255') }}"
+            srcip4: "{{get(flow, 'flow.src_ip0', '10.0.2.1-10.0.2.255') }}"
+            dstip4: "{{get(flow, 'flow.dst_ip0', '10.0.3.1-10.0.3.255') }}"
+            count: "{{get(flow, 'flow.count', '1') }}"
             ttl: 32
             dscp: 0
         outer_l4:
-            srcport: "{{get(flow, 'flow.srcport_range', '1234') }}"
-            dstport: "{{get(flow, 'flow.dstport_range', '2001') }}"
+            srcport: "{{get(flow, 'flow.src_port0', '1234-4321') }}"
+            dstport: "{{get(flow, 'flow.dst_port0', '2001-4001') }}"
+            count: "{{get(flow, 'flow.count', '1') }}"
 public_1:
       ipv4:
         outer_l2:
             framesize:
-                64B: "{{ get(imix, 'imix.private.imix_small', '0') }}"
-                128B: "{{ get(imix, 'imix.private.imix_128B', '0') }}"
-                256B: "{{ get(imix, 'imix.private.imix_256B', '0') }}"
-                373b: "{{ get(imix, 'imix.private.imix_373B', '0') }}"
-                570B: "{{get(imix, 'imix.private.imix_570B', '0') }}"
-                1400B: "{{get(imix, 'imix.private.imix_1400B', '0') }}"
-                1518B: "{{get(imix, 'imix.private.imix_1500B', '0') }}"
+                64B: "{{ get(imix, 'imix.public.64B', '0') }}"
+                128B: "{{ get(imix, 'imix.public.128B', '0') }}"
+                256B: "{{ get(imix, 'imix.public.256B', '0') }}"
+                373b: "{{ get(imix, 'imix.public.373B', '0') }}"
+                512B: "{{ get(imix, 'imix.public.512B', '0') }}"
+                570B: "{{get(imix, 'imix.public.570B', '0') }}"
+                1400B: "{{get(imix, 'imix.public.1400B', '0') }}"
+                1500B: "{{get(imix, 'imix.public.1500B', '0') }}"
+                1518B: "{{get(imix, 'imix.public.1518B', '0') }}"
 
         outer_l3v4:
             proto: "udp"
-            srcip4: "{{get(flow, 'flow.dstip4_range', '10.0.3.1-10.0.3.255') }}"
-            dstip4: "{{get(flow, 'flow.srcip4_range', '10.0.2.1-10.0.2.255') }}"
+            srcip4: "{{get(flow, 'flow.dst_ip0', '10.0.3.1-10.0.3.255') }}"
+            dstip4: "{{get(flow, 'flow.public_ip0', '10.0.2.1-10.0.2.255') }}"
+            count: "{{get(flow, 'flow.count', '1') }}"
             ttl: 32
             dscp: 0
         outer_l4:
-            srcport: "{{get(flow, 'flow.dstport_range', '1234') }}"
-            dstport: "{{get(flow, 'flow.srcport_range', '2001') }}"
+            srcport: "{{get(flow, 'flow.dst_port0', '1234-4321') }}"
+            dstport: "{{get(flow, 'flow.src_port0', '2001-4001') }}"
+            count: "{{get(flow, 'flow.count', '1') }}"
index e935bdb..233457e 100644 (file)
@@ -49,13 +49,15 @@ private_1:
       ipv4:
         outer_l2:
             framesize:
-                64B: "{{ get(imix, 'imix.private.imix_small', '0') }}"
-                128B: "{{ get(imix, 'imix.private.imix_128B', '0') }}"
-                256B: "{{ get(imix, 'imix.private.imix_256B', '0') }}"
-                373b: "{{ get(imix, 'imix.private.imix_373B', '0') }}"
-                570B: "{{get(imix, 'imix.private.imix_570B', '0') }}"
-                1400B: "{{get(imix, 'imix.private.imix_1400B', '0') }}"
-                1518B: "{{get(imix, 'imix.private.imix_1500B', '0') }}"
+                64B: "{{ get(imix, 'imix.private.64B', '0') }}"
+                128B: "{{ get(imix, 'imix.private.128B', '0') }}"
+                256B: "{{ get(imix, 'imix.private.256B', '0') }}"
+                373b: "{{ get(imix, 'imix.private.373B', '0') }}"
+                512B: "{{ get(imix, 'imix.private.512B', '0') }}"
+                570B: "{{get(imix, 'imix.private.570B', '0') }}"
+                1400B: "{{get(imix, 'imix.private.1400B', '0') }}"
+                1500B: "{{get(imix, 'imix.private.1500B', '0') }}"
+                1518B: "{{get(imix, 'imix.private.1518B', '0') }}"
 
             QinQ:
                 S-VLAN:
@@ -69,14 +71,14 @@ private_1:
 
         outer_l3v4:
             proto: "tcp"
-            srcip4: "{{get(flow, 'flow.srcip4_range_1', '192.168.0.0-192.168.255.255') }}"
-            dstip4: "{{get(flow, 'flow.dstip4_range_1', '192.16.0.0-192.16.0.31') }}"
+            srcip4: "{{get(flow, 'flow.src_ip0', '192.168.0.0-192.168.255.255') }}"
+            dstip4: "{{get(flow, 'flow.dst_ip0', '192.16.0.0-192.16.0.31') }}"
             ttl: 32
             dscp: 32
 
         outer_l4:
-            srcport: "{{get(flow, 'flow.srcport_range', '0') }}"
-            dstport: "{{get(flow, 'flow.dstport_range', '0') }}"
+            srcport: "{{get(flow, 'flow.src_port0', '0') }}"
+            dstport: "{{get(flow, 'flow.dst_port0', '0') }}"
 public_1:
       ipv4:
         outer_l2:
@@ -91,25 +93,27 @@ public_1:
 
         outer_l3v4:
             proto: "tcp"
-            srcip4: "{{get(flow, 'flow.dstip4_range_1', '192.16.0.0-192.16.0.31') }}"
-            dstip4: "{{get(flow, 'flow.srcip4_range_1', '192.168.0.0-192.168.255.255') }}"
+            srcip4: "{{get(flow, 'flow.dst_ip0', '192.16.0.0-192.16.0.31') }}"
+            dstip4: "{{get(flow, 'flow.src_ip0', '192.168.0.0-192.168.255.255') }}"
             ttl: 32
             dscp: 32
 
         outer_l4:
-            srcport: "{{get(flow, 'flow.dstport_range', '0') }}"
-            dstport: "{{get(flow, 'flow.srcport_range', '0') }}"
+            srcport: "{{get(flow, 'flow.dst_port0', '0') }}"
+            dstport: "{{get(flow, 'flow.src_port0', '0') }}"
 private_2:
       ipv4:
         outer_l2:
             framesize:
-                64B: "{{ get(imix, 'imix.private.imix_small', '0') }}"
-                128B: "{{ get(imix, 'imix.private.imix_128B', '0') }}"
-                256B: "{{ get(imix, 'imix.private.imix_256B', '0') }}"
-                373b: "{{ get(imix, 'imix.private.imix_373B', '0') }}"
-                570B: "{{get(imix, 'imix.private.imix_570B', '0') }}"
-                1400B: "{{get(imix, 'imix.private.imix_1400B', '0') }}"
-                1518B: "{{get(imix, 'imix.private.imix_1500B', '0') }}"
+                64B: "{{ get(imix, 'imix.public.64B', '0') }}"
+                128B: "{{ get(imix, 'imix.public.128B', '0') }}"
+                256B: "{{ get(imix, 'imix.public.256B', '0') }}"
+                373b: "{{ get(imix, 'imix.public.373B', '0') }}"
+                512B: "{{ get(imix, 'imix.public.512B', '0') }}"
+                570B: "{{get(imix, 'imix.public.570B', '0') }}"
+                1400B: "{{get(imix, 'imix.public.1400B', '0') }}"
+                1500B: "{{get(imix, 'imix.public.1500B', '0') }}"
+                1518B: "{{get(imix, 'imix.public.1518B', '0') }}"
 
             QinQ:
                 S-VLAN:
@@ -123,14 +127,14 @@ private_2:
 
         outer_l3v4:
             proto: "tcp"
-            srcip4: "{{get(flow, 'flow.srcip4_range_2', '192.168.0.0-192.168.255.255') }}"
-            dstip4: "{{get(flow, 'flow.dstip4_range_2', '192.16.0.0-192.16.0.31') }}"
+            srcip4: "{{get(flow, 'flow.src_ip1', '192.168.0.0-192.168.255.255') }}"
+            dstip4: "{{get(flow, 'flow.dst_ip1', '192.16.0.0-192.16.0.31') }}"
             ttl: 32
             dscp: 32
 
         outer_l4:
-            srcport: "{{get(flow, 'flow.srcport_range', '0') }}"
-            dstport: "{{get(flow, 'flow.dstport_range', '0') }}"
+            srcport: "{{get(flow, 'flow.src_port1', '0') }}"
+            dstport: "{{get(flow, 'flow.dst_port1', '0') }}"
 public_2:
       ipv4:
         outer_l2:
@@ -145,11 +149,11 @@ public_2:
 
         outer_l3v4:
             proto: "tcp"
-            srcip4: "{{get(flow, 'flow.dstip4_range_2', '192.16.0.0-192.16.0.31') }}"
-            dstip4: "{{get(flow, 'flow.srcip4_range_2', '192.168.0.0-192.168.255.255') }}"
+            srcip4: "{{get(flow, 'flow.dst_ip1', '192.16.0.0-192.16.0.31') }}"
+            dstip4: "{{get(flow, 'flow.src_ip1', '192.168.0.0-192.168.255.255') }}"
             ttl: 32
             dscp: 32
 
         outer_l4:
-            srcport: "{{get(flow, 'flow.dstport_range', '0') }}"
-            dstport: "{{get(flow, 'flow.srcport_range', '0') }}"
+            srcport: "{{get(flow, 'flow.dst_port1', '0') }}"
+            dstport: "{{get(flow, 'flow.src_port1', '0') }}"
index 6b213a5..d7531fc 100644 (file)
@@ -45,16 +45,15 @@ private_1:
 
         outer_l3v4:
             proto: "udp"
-            srcip4: "{{get(flow, 'flow.srcip4_range', '1.1.1.1-1.15.255.255') }}"
-            dstip4: "{{get(flow, 'flow.dstip4_range', '90.90.1.1-90.105.255.255') }}"
+            srcip4: "{{get(flow, 'flow.src_ip0', '1.1.1.1-1.15.255.255') }}"
+            dstip4: "{{get(flow, 'flow.dst_ip0', '90.90.1.1-90.105.255.255') }}"
             count: "{{get(flow, 'flow.count', '1') }}"
             ttl: 32
             dscp: 0
         outer_l4:
-            srcport: "{{get(flow, 'flow.srcport_range', '1234') }}"
-            dstport: "{{get(flow, 'flow.dstport_range', '2001') }}"
+            srcport: "{{get(flow, 'flow.src_port0', '1234') }}"
+            dstport: "{{get(flow, 'flow.dst_port0', '2001') }}"
             count: "{{get(flow, 'flow.count', '1') }}"
-
 public_1:
       ipv4:
         outer_l2:
@@ -69,14 +68,14 @@ public_1:
 
         outer_l3v4:
             proto: "udp"
-            srcip4: "{{get(flow, 'flow.dstip4_range', '1.1.1.1-1.15.255.255') }}"
-            dstip4: "{{get(flow, 'flow.srcip4_range', '90.90.1.1-90.105.255.255') }}"
+            srcip4: "{{get(flow, 'flow.dst_ip0', '1.1.1.1-1.15.255.255') }}"
+            dstip4: "{{get(flow, 'flow.src_ip0', '90.90.1.1-90.105.255.255') }}"
             count: "{{get(flow, 'flow.count', '1') }}"
             ttl: 32
             dscp: 0
         outer_l4:
-            srcport: "{{get(flow, 'flow.srcport_range', '1234') }}"
-            dstport: "{{get(flow, 'flow.dstport_range', '2001') }}"
+            srcport: "{{get(flow, 'flow.src_port0', '1234') }}"
+            dstport: "{{get(flow, 'flow.dst_port0', '2001') }}"
             count: "{{get(flow, 'flow.count', '1') }}"
 private_2:
       ipv4:
@@ -92,14 +91,14 @@ private_2:
 
         outer_l3v4:
             proto: "udp"
-            srcip4: "{{get(flow, 'flow.srcip4_range', '1.1.1.1-1.15.255.255') }}"
-            dstip4: "{{get(flow, 'flow.dstip4_range', '90.90.1.1-90.105.255.255') }}"
+            srcip4: "{{get(flow, 'flow.src_ip1', '1.1.1.1-1.15.255.255') }}"
+            dstip4: "{{get(flow, 'flow.dst_ip1', '90.90.1.1-90.105.255.255') }}"
             count: "{{get(flow, 'flow.count', '1') }}"
             ttl: 32
             dscp: 0
         outer_l4:
-            srcport: "{{get(flow, 'flow.srcport_range', '1234') }}"
-            dstport: "{{get(flow, 'flow.dstport_range', '2001') }}"
+            srcport: "{{get(flow, 'flow.src_port1', '1234') }}"
+            dstport: "{{get(flow, 'flow.dst_port1', '2001') }}"
             count: "{{get(flow, 'flow.count', '1') }}"
 public_2:
       ipv4:
@@ -115,12 +114,12 @@ public_2:
 
         outer_l3v4:
             proto: "udp"
-            srcip4: "{{get(flow, 'flow.dstip4_range', '1.1.1.1-1.15.255.255') }}"
-            dstip4: "{{get(flow, 'flow.srcip4_range', '90.90.1.1-90.105.255.255') }}"
+            srcip4: "{{get(flow, 'flow.dst_ip1', '1.1.1.1-1.15.255.255') }}"
+            dstip4: "{{get(flow, 'flow.src_ip1', '90.90.1.1-90.105.255.255') }}"
             count: "{{get(flow, 'flow.count', '1') }}"
             ttl: 32
             dscp: 0
         outer_l4:
-            srcport: "{{get(flow, 'flow.srcport_range', '1234') }}"
-            dstport: "{{get(flow, 'flow.dstport_range', '2001') }}"
+            srcport: "{{get(flow, 'flow.dst_port1', '1234') }}"
+            dstport: "{{get(flow, 'flow.src_port1', '2001') }}"
             count: "{{get(flow, 'flow.count', '1') }}"
index 4a21a42..7468dbd 100644 (file)
@@ -68,14 +68,16 @@ private_1:
 
         outer_l3v4:
             proto: "tcp"
-            srcip4: "{{get(flow, 'flow.srcip4_range_1', '192.168.0.0-192.168.255.255') }}"
-            dstip4: "{{get(flow, 'flow.dstip4_range_1', '192.16.0.0-192.16.0.31') }}"
+            srcip4: "{{get(flow, 'flow.src_ip0', '192.168.0.0-192.168.255.255') }}"
+            dstip4: "{{get(flow, 'flow.dst_ip0', '192.16.0.0-192.16.0.31') }}"
+            count: "{{get(flow, 'flow.count', '1') }}"
             ttl: 32
             dscp: 32
 
         outer_l4:
-            srcport: "{{get(flow, 'flow.srcport_range', '0') }}"
-            dstport: "{{get(flow, 'flow.dstport_range', '0') }}"
+            srcport: "{{get(flow, 'flow.src_port0', '0') }}"
+            dstport: "{{get(flow, 'flow.dst_port0', '0') }}"
+            count: "{{get(flow, 'flow.count', '1') }}"
 public_1:
       ipv4:
         outer_l2:
@@ -90,14 +92,16 @@ public_1:
 
         outer_l3v4:
             proto: "tcp"
-            srcip4: "{{get(flow, 'flow.dstip4_range_1', '192.16.0.0-192.16.0.31') }}"
-            dstip4: "{{get(flow, 'flow.srcip4_range_1', '192.168.0.0-192.168.255.255') }}"
+            srcip4: "{{get(flow, 'flow.dst_ip0', '192.16.0.0-192.16.0.31') }}"
+            dstip4: "{{get(flow, 'flow.src_ip0', '192.168.0.0-192.168.255.255') }}"
+            count: "{{get(flow, 'flow.count', '1') }}"
             ttl: 32
             dscp: 32
 
         outer_l4:
-            srcport: "{{get(flow, 'flow.dstport_range', '0') }}"
-            dstport: "{{get(flow, 'flow.srcport_range', '0') }}"
+            srcport: "{{get(flow, 'flow.dst_port0', '0') }}"
+            dstport: "{{get(flow, 'flow.src_port0', '0') }}"
+            count: "{{get(flow, 'flow.count', '1') }}"
 private_2:
       ipv4:
         outer_l2:
@@ -122,14 +126,15 @@ private_2:
 
         outer_l3v4:
             proto: "tcp"
-            srcip4: "{{get(flow, 'flow.srcip4_range_2', '192.168.0.0-192.168.255.255') }}"
-            dstip4: "{{get(flow, 'flow.dstip4_range_2', '192.16.0.0-192.16.0.31') }}"
+            srcip4: "{{get(flow, 'flow.srcip1', '192.168.0.0-192.168.255.255') }}"
+            dstip4: "{{get(flow, 'flow.dstip1', '192.16.0.0-192.16.0.31') }}"
             ttl: 32
             dscp: 32
 
         outer_l4:
-            srcport: "{{get(flow, 'flow.srcport_range', '0') }}"
-            dstport: "{{get(flow, 'flow.dstport_range', '0') }}"
+            srcport: "{{get(flow, 'flow.src_port1', '0') }}"
+            dstport: "{{get(flow, 'flow.dst_port1', '0') }}"
+            count: "{{get(flow, 'flow.count', '1') }}"
 public_2:
       ipv4:
         outer_l2:
@@ -144,11 +149,13 @@ public_2:
 
         outer_l3v4:
             proto: "tcp"
-            srcip4: "{{get(flow, 'flow.dstip4_range_2', '192.16.0.0-192.16.0.31') }}"
-            dstip4: "{{get(flow, 'flow.srcip4_range_2', '192.168.0.0-192.168.255.255') }}"
+            srcip4: "{{get(flow, 'flow.dst_ip1', '192.16.0.0-192.16.0.31') }}"
+            dstip4: "{{get(flow, 'flow.src_ip1', '192.168.0.0-192.168.255.255') }}"
+            count: "{{get(flow, 'flow.count', '1') }}"
             ttl: 32
             dscp: 32
 
         outer_l4:
-            srcport: "{{get(flow, 'flow.dstport_range', '0') }}"
-            dstport: "{{get(flow, 'flow.srcport_range', '0') }}"
+            srcport: "{{get(flow, 'flow.dst_port1', '0') }}"
+            dstport: "{{get(flow, 'flow.src_port1', '0') }}"
+            count: "{{get(flow, 'flow.count', '1') }}"
diff --git a/tests/ci/apexlake-verify b/tests/ci/apexlake-verify
deleted file mode 100755 (executable)
index 6a69106..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-DPDK_HOST=10.118.36.130
-
-YARDSTICK=/home/user/yardstick
-TESTSUITE=$YARDSTICK/tests/opnfv/test_suites/opnfv_vTC_daily.yaml
-
-: ${INSTALLER_TYPE:='unknown'}
-: ${NODE_NAME:='unknown'}
-: ${DEPLOY_SCENARIO:='unknown'}
-
-commands="
-cd $YARDSTICK
-source /home/user/openrc.dasm
-export INSTALLER_TYPE=$INSTALLER_TYPE
-export NODE_NAME=$NODE_NAME
-export DEPLOY_SCENARIO=$DEPLOY_SCENARIO
-sudo -E yardstick task start --suite $TESTSUITE"
-
-echo "$commands" | ssh -l user $DPDK_HOST 'bash -s'
-exit $?
index 58f5b78..7f8c229 100644 (file)
@@ -18,13 +18,14 @@ description: >
 {% set provider = provider or none %}
 {% set physical_network = physical_network or 'physnet1' %}
 {% set segmentation_id = segmentation_id or none %}
+{% set packetsize = packetsize or 100 %}
 
 scenarios:
 {% for i in range(2) %}
 -
   type: Ping
   options:
-    packetsize: 100
+    packetsize: {{packetsize}}
   host: athena.demo
   target: ares.demo
 
@@ -64,4 +65,4 @@ context:
         {% if segmentation_id %}
       segmentation_id: {{segmentation_id}}
         {% endif %}
-      {% endif %}
\ No newline at end of file
+      {% endif %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc006.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc006.yaml
new file mode 100644 (file)
index 0000000..a35629f
--- /dev/null
@@ -0,0 +1,68 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+schema: "yardstick:task:0.1"
+description: >
+    Yardstick TC006 config file;
+    Measure volume storage IOPS, throughput and latency using fio with job file.
+
+{% set directory = directory or "/FIO_Test" %}
+{% set provider = provider or none %}
+{% set physical_network = physical_network or 'physnet1' %}
+{% set segmentation_id = segmentation_id or none %}
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+  type: Fio
+  options:
+    job_file: "job_file.ini"
+    directory: {{ directory }}
+
+  host: fio.yardstick-TC006
+
+  runner:
+    type: Iteration
+    iterations: 1
+    interval: 1
+
+  sla:
+    read_bw: 6000
+    read_iops: 1500
+    read_lat: 500.1
+    write_bw: 6000
+    write_iops: 1500
+    write_lat: 500.1
+    action: monitor
+
+context:
+  name: yardstick-TC006
+  image: yardstick-image
+  flavor: yardstick-flavor
+  user: ubuntu
+  servers:
+    fio:
+      floating_ip: true
+      volume:
+        name: test-volume
+        size: 200
+      volume_mountpoint: /dev/vdb
+
+  networks:
+    test:
+      cidr: '10.0.1.0/24'
+      {% if provider == "vlan" %}
+      provider: {{provider}}
+      physical_network: {{physical_network}}
+        {% if segmentation_id %}
+      segmentation_id: {{segmentation_id}}
+        {% endif %}
+      {% endif %}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc056.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc056.yaml
new file mode 100644 (file)
index 0000000..7f1dc10
--- /dev/null
@@ -0,0 +1,81 @@
+##############################################################################
+# Copyright (c) 2017 14_ykl@tongji.edu.cn and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+schema: "yardstick:task:0.1"
+description: >
+    Test case for TC056 :OpenStack Controller Messaging Queue Service High
+    Availability.
+
+{% set file = file or '/etc/yardstick/pod.yaml' %}
+{% set attack_host = attack_host or 'node1' %}
+{% set monitor_time = monitor_time or 10 %}
+{% set monitor_number = monitor_number or 3 %}
+
+scenarios:
+-
+  type: ServiceHA
+  options:
+    attackers:
+    - fault_type: "kill-process"
+      process_name: "rabbitmq-server"
+      host: {{attack_host}}
+
+    monitors:
+    - monitor_type: "openstack-cmd"
+      command_name: "openstack image list"
+      monitor_time: {{monitor_time}}
+      monitor_number: {{monitor_number}}
+      sla:
+        max_outage_time: 5
+
+    - monitor_type: "openstack-cmd"
+      command_name: "openstack network list"
+      monitor_time: {{monitor_time}}
+      monitor_number: {{monitor_number}}
+      sla:
+        max_outage_time: 5
+
+    - monitor_type: "openstack-cmd"
+      command_name: "openstack volume list"
+      monitor_time: {{monitor_time}}
+      monitor_number: {{monitor_number}}
+      sla:
+        max_outage_time: 5
+
+    - monitor_type: "openstack-cmd"
+      command_name: "openstack stack list"
+      monitor_time: {{monitor_time}}
+      monitor_number: {{monitor_number}}
+      sla:
+        max_outage_time: 5
+
+    - monitor_type: "process"
+      process_name: "rabbitmq-server"
+      host: {{attack_host}}
+      monitor_time: 20
+      sla:
+        max_recover_time: 20
+
+  nodes:
+    {{attack_host}}: {{attack_host}}.LF
+
+  runner:
+    type: Duration
+    duration: 1
+  sla:
+    outage_time: 5
+    action: monitor
+
+
+context:
+  type: Node
+  name: LF
+  file: {{file}}
+
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc057.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc057.yaml
new file mode 100644 (file)
index 0000000..322e2bd
--- /dev/null
@@ -0,0 +1,179 @@
+##############################################################################
+# Copyright (c) 2017 14_ykl@tongji.edu.cn and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+schema: "yardstick:task:0.1"
+description: >
+    Test case for TC057 :OpenStack Controller Cluster Management Service High
+    Availability;
+    This test case is written by scenario-based HA testing framework.
+
+{% set file = file or '/etc/yardstick/pod.yaml' %}
+{% set vip_mgmt = vip_mgmt or 'vip__management' %}
+{% set vip_vrouter = vip_vrouter or 'vip__vrouter' %}
+{% set attack_host = attack_host or 'node1' %}
+{% set check_host = check_host or 'node2' %}
+{% set monitor_time = monitor_time or 10 %}
+{% set monitor_number = monitor_number or 3 %}
+
+scenarios:
+  -
+    type: "GeneralHA"
+    options:
+      attackers:
+        -
+          fault_type: "general-attacker"
+          host: {{attack_host}}
+          key: "kill-process"
+          attack_key: "kill-corosync"
+          action_parameter:
+            process_name: "corosync"
+
+      monitors:
+        -
+          monitor_type: "openstack-cmd"
+          key: "check-nova-service"
+          command_name: "openstack image list"
+          monitor_time: {{monitor_time}}
+          monitor_number: {{monitor_number}}
+          sla:
+            max_outage_time: 5
+
+        -
+          monitor_type: "openstack-cmd"
+          key: "check-neutron-service"
+          command_name: "openstack network list"
+          monitor_time: {{monitor_time}}
+          monitor_number: {{monitor_number}}
+          sla:
+            max_outage_time: 5
+
+        -
+          monitor_type: "openstack-cmd"
+          key: "check-keystone-service"
+          command_name: "openstack user list"
+          monitor_time: {{monitor_time}}
+          monitor_number: {{monitor_number}}
+          sla:
+            max_outage_time: 5
+
+        -
+          monitor_type: "openstack-cmd"
+          key: "check-heat-service"
+          command_name: "openstack stack list"
+          monitor_time: {{monitor_time}}
+          monitor_number: {{monitor_number}}
+          sla:
+            max_outage_time: 5
+
+      operations:
+        -
+          operation_type: "general-operation"
+          key: "get-mgmt-vip-host"
+          operation_key: "get-vip-host"
+          host: {{check_host}}
+          action_parameter:
+            vip_name: {{vip_mgmt}}
+          return_parameter:
+            all: "$vip_mgmt_host"
+
+        -
+          operation_type: "general-operation"
+          key: "get-router-vip-host"
+          operation_key: "get-vip-host"
+          host: {{check_host}}
+          action_parameter:
+            vip_name: {{vip_vrouter}}
+          return_parameter:
+            all: "$vip_router_host"
+
+      resultCheckers:
+        -
+          checker_type: "general-result-checker"
+          key: "check-rabbitmq-master"
+          checker_key: "pacemaker-resource-checker"
+          host: {{check_host}}
+          parameter:
+            resource_name: "p_rabbitmq-server"
+            resource_host: "$vip_mgmt_host"
+          expectedValue: "Masters"
+          condition: "in"
+
+        -
+          checker_type: "general-result-checker"
+          key: "check-conntrackd-master"
+          checker_key: "pacemaker-resource-checker"
+          host: {{check_host}}
+          parameter:
+            resource_name: "p_conntrackd"
+            resource_host: "$vip_router_host"
+          expectedValue: "Masters"
+          condition: "in"
+
+      steps:
+        -
+          actionKey: "kill-process"
+          actionType: "attacker"
+          index: 1
+
+        -
+          actionKey: "check-nova-service"
+          actionType: "monitor"
+          index: 2
+
+        -
+          actionKey: "check-neutron-service"
+          actionType: "monitor"
+          index: 3
+
+        -
+          actionKey: "check-keystone-service"
+          actionType: "monitor"
+          index: 4
+
+        -
+          actionKey: "check-heat-service"
+          actionType: "monitor"
+          index: 5
+
+        -
+          actionKey: "get-mgmt-vip-host"
+          actionType: "operation"
+          index: 6
+
+        -
+          actionKey: "check-rabbitmq-master"
+          actionType: "resultchecker"
+          index: 7
+
+        -
+          actionKey: "get-router-vip-host"
+          actionType: "operation"
+          index: 8
+
+        -
+          actionKey: "check-conntrackd-master"
+          actionType: "resultchecker"
+          index: 9
+
+
+    nodes:
+      {{attack_host}}: {{attack_host}}.LF
+      {{check_host}}: {{check_host}}.LF
+    runner:
+      type: Duration
+      duration: 1
+    sla:
+      outage_time: 5
+      action: monitor
+
+context:
+  type: Node
+  name: LF
+  file: {{file}}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc058.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc058.yaml
new file mode 100644 (file)
index 0000000..e9feb97
--- /dev/null
@@ -0,0 +1,111 @@
+##############################################################################
+# Copyright (c) 2017 14_ykl@tongji.edu.cn and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+schema: "yardstick:task:0.1"
+description: >
+    Test case for TC058 :OpenStack Controller Virtual Router Service High
+    Availability;
+    This test case is written by scenario-based HA testing framework.
+
+{% set file = file or '/etc/yardstick/pod.yaml' %}
+{% set image = image or 'yardstick-image' %}
+{% set flavor = flavor or 'yardstick-flavor' %}
+{% set attack_host = attack_host or 'node1' %}
+
+scenarios:
+  -
+    type: "GeneralHA"
+    options:
+      attackers:
+        -
+          fault_type: "kill-process"
+          host: {{attack_host}}
+          key: "kill-process"
+          process_name: "neutron-l3-agent"
+
+      monitors:
+        -
+          monitor_type: "process"
+          process_name: "neutron-l3-agent"
+          host: {{attack_host}}
+          key: "monitor-recovery"
+          monitor_time: 20
+          sla:
+            max_recover_time: 20
+
+        -
+          monitor_type: "general-monitor"
+          monitor_key: "ip-status"
+          key: "server-status"
+          monitor_time: 10
+          sla:
+            max_outage_time: 5
+          parameter:
+            ip_address: "$floating_ip"
+
+      operations:
+        -
+          operation_type: "general-operation"
+          key: "get-floatingip"
+          operation_key: "get-floatingip"
+          action_parameter:
+            server_name: "tc058"
+          return_parameter:
+            all: "$floating_ip"
+
+
+      steps:
+        -
+          actionKey: "get-floatingip"
+          actionType: "operation"
+          index: 1
+        -
+          actionKey: "kill-process"
+          actionType: "attacker"
+          index: 2
+
+        -
+          actionKey: "monitor-recovery"
+          actionType: "monitor"
+          index: 3
+
+        -
+          actionKey: "server-status"
+          actionType: "monitor"
+          index: 4
+
+    nodes:
+      {{attack_host}}: {{attack_host}}.LF
+    runner:
+      type: Duration
+      duration: 1
+    sla:
+      outage_time: 5
+      action: monitor
+
+contexts:
+-
+  type: Node
+  name: LF
+  file: {{file}}
+
+-
+  name: demo
+  image: {{image}}
+  flavor: {{flavor}}
+  user: cirros
+
+  servers:
+    tc058:
+      floating_ip: true
+
+  networks:
+    test:
+      cidr: '10.0.1.0/24'
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc078.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc078.yaml
new file mode 100644 (file)
index 0000000..b89f767
--- /dev/null
@@ -0,0 +1,39 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+schema: "yardstick:task:0.1"
+description: >
+    Yardstick TC078 config file;
+    Measure CPU performance using SPEC CPU2006;
+
+{% set file = file or "/etc/yardstick/pod.yaml" %}
+
+scenarios:
+-
+  type: SpecCPU2006
+
+  options:
+      benchmark_subset: int
+
+  host: node1.yardstick-TC078
+
+  runner:
+    type: Iteration
+    iterations: 1
+
+context:
+  type: Node
+  name: yardstick-TC078
+  file: {{ file }}
+
+  env:
+    type: ansible
+    setup: spec_cpu2006_install.yaml
+    teardown: spec_cpu2006_uninstall.yaml
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc079.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc079.yaml
new file mode 100644 (file)
index 0000000..9c15acc
--- /dev/null
@@ -0,0 +1,54 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+schema: "yardstick:task:0.1"
+description: >
+    Yardstick TC079 config file;
+    measure storage and file system performance using bonnie++;
+
+{% set provider = provider or none %}
+{% set physical_network = physical_network or 'physnet1' %}
+{% set segmentation_id = segmentation_id or none %}
+
+scenarios:
+-
+  type: Bonnie++
+  options:
+    file_size: 1024
+    ram_size: 512
+    test_dir: /tmp
+    concurrency: 1
+
+  host: bonnie.yardstick-TC079
+
+  runner:
+    type: Iteration
+    iterations: 1
+
+context:
+  name: yardstick-TC079
+  image: yardstick-image
+  flavor: yardstick-flavor
+  user: ubuntu
+
+  servers:
+    bonnie:
+      floating_ip: true
+
+  networks:
+    test:
+      cidr: '10.0.1.0/24'
+      {% if provider == "vlan" %}
+      provider: {{provider}}
+      physical_network: {{physical_network}}
+        {% if segmentation_id %}
+      segmentation_id: {{segmentation_id}}
+        {% endif %}
+      {% endif %}
similarity index 94%
rename from samples/container_ping_vm.yaml
rename to tests/opnfv/test_cases/opnfv_yardstick_tc081.yaml
index 4b7b64f..d99757e 100644 (file)
@@ -9,7 +9,7 @@
 
 ---
 # Sample benchmark task config file
-# measure network latency using ping in container
+# measure network latency using ping betwwen container and VM
 
 schema: "yardstick:task:0.1"
 
diff --git a/tests/opnfv/test_suites/opnfv_k8-nosdn-lb-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_k8-nosdn-lb-noha_daily.yaml
new file mode 100644 (file)
index 0000000..08a0758
--- /dev/null
@@ -0,0 +1,18 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+# k8 nosdn lb noha daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "k8-nosdn-lb-noha"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+  file_name: opnfv_yardstick_tc080.yaml
index e69de29..a468b27 100644 (file)
@@ -0,0 +1,76 @@
+# Copyright (c) 2017 Intel Corporation\r
+#\r
+# Licensed under the Apache License, Version 2.0 (the "License");\r
+# you may not use this file except in compliance with the License.\r
+# You may obtain a copy of the License at\r
+#\r
+#      http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+# See the License for the specific language governing permissions and\r
+# limitations under the License.\r
+\r
+from __future__ import absolute_import\r
+import mock\r
+\r
+\r
+STL_MOCKS = {\r
+    'trex_stl_lib': mock.MagicMock(),\r
+    'trex_stl_lib.base64': mock.MagicMock(),\r
+    'trex_stl_lib.binascii': mock.MagicMock(),\r
+    'trex_stl_lib.collections': mock.MagicMock(),\r
+    'trex_stl_lib.copy': mock.MagicMock(),\r
+    'trex_stl_lib.datetime': mock.MagicMock(),\r
+    'trex_stl_lib.functools': mock.MagicMock(),\r
+    'trex_stl_lib.imp': mock.MagicMock(),\r
+    'trex_stl_lib.inspect': mock.MagicMock(),\r
+    'trex_stl_lib.json': mock.MagicMock(),\r
+    'trex_stl_lib.linecache': mock.MagicMock(),\r
+    'trex_stl_lib.math': mock.MagicMock(),\r
+    'trex_stl_lib.os': mock.MagicMock(),\r
+    'trex_stl_lib.platform': mock.MagicMock(),\r
+    'trex_stl_lib.pprint': mock.MagicMock(),\r
+    'trex_stl_lib.random': mock.MagicMock(),\r
+    'trex_stl_lib.re': mock.MagicMock(),\r
+    'trex_stl_lib.scapy': mock.MagicMock(),\r
+    'trex_stl_lib.socket': mock.MagicMock(),\r
+    'trex_stl_lib.string': mock.MagicMock(),\r
+    'trex_stl_lib.struct': mock.MagicMock(),\r
+    'trex_stl_lib.sys': mock.MagicMock(),\r
+    'trex_stl_lib.threading': mock.MagicMock(),\r
+    'trex_stl_lib.time': mock.MagicMock(),\r
+    'trex_stl_lib.traceback': mock.MagicMock(),\r
+    'trex_stl_lib.trex_stl_async_client': mock.MagicMock(),\r
+    'trex_stl_lib.trex_stl_client': mock.MagicMock(),\r
+    'trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),\r
+    'trex_stl_lib.trex_stl_ext': mock.MagicMock(),\r
+    'trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),\r
+    'trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),\r
+    'trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),\r
+    'trex_stl_lib.trex_stl_port': mock.MagicMock(),\r
+    'trex_stl_lib.trex_stl_stats': mock.MagicMock(),\r
+    'trex_stl_lib.trex_stl_streams': mock.MagicMock(),\r
+    'trex_stl_lib.trex_stl_types': mock.MagicMock(),\r
+    'trex_stl_lib.types': mock.MagicMock(),\r
+    'trex_stl_lib.utils': mock.MagicMock(),\r
+    'trex_stl_lib.utils.argparse': mock.MagicMock(),\r
+    'trex_stl_lib.utils.collections': mock.MagicMock(),\r
+    'trex_stl_lib.utils.common': mock.MagicMock(),\r
+    'trex_stl_lib.utils.json': mock.MagicMock(),\r
+    'trex_stl_lib.utils.os': mock.MagicMock(),\r
+    'trex_stl_lib.utils.parsing_opts': mock.MagicMock(),\r
+    'trex_stl_lib.utils.pwd': mock.MagicMock(),\r
+    'trex_stl_lib.utils.random': mock.MagicMock(),\r
+    'trex_stl_lib.utils.re': mock.MagicMock(),\r
+    'trex_stl_lib.utils.string': mock.MagicMock(),\r
+    'trex_stl_lib.utils.sys': mock.MagicMock(),\r
+    'trex_stl_lib.utils.text_opts': mock.MagicMock(),\r
+    'trex_stl_lib.utils.text_tables': mock.MagicMock(),\r
+    'trex_stl_lib.utils.texttable': mock.MagicMock(),\r
+    'trex_stl_lib.warnings': mock.MagicMock(),\r
+    'trex_stl_lib.yaml': mock.MagicMock(),\r
+    'trex_stl_lib.zlib': mock.MagicMock(),\r
+    'trex_stl_lib.zmq': mock.MagicMock(),\r
+}\r
diff --git a/tests/unit/benchmark/contexts/standalone/__init__.py b/tests/unit/benchmark/contexts/standalone/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
 # limitations under the License.
 
 from __future__ import absolute_import
+
 import os
-import mock
 import unittest
 
-from yardstick.benchmark.contexts import ovsdpdk
+import mock
+
+from yardstick.benchmark.contexts.standalone import ovsdpdk
 
 NIC_INPUT = {
     'interface': {},
@@ -226,11 +228,11 @@ class OvsdpdkTestCase(unittest.TestCase):
             mock_ovs = mock.Mock()
             ssh_mock.put = mock.Mock()
             ovs_obj.check_output = mock.Mock(return_value=(0, "vm1"))
-            with mock.patch("yardstick.benchmark.contexts.ovsdpdk.time"):
+            with mock.patch("yardstick.benchmark.contexts.standalone.ovsdpdk.time"):
                 self.assertIsNone(ovs_obj.setup_ovs_context(PCIS, NIC_DETAILS, DRIVER))
 
     @mock.patch(
-        'yardstick.benchmark.contexts.ovsdpdk',
+        'yardstick.benchmark.contexts.standalone.ovsdpdk',
         return_value="Domain vm1 created from /tmp/vm_ovs.xml")
     def test_is_vm_created(self, NIC_INPUT):
         with mock.patch("yardstick.ssh.SSH") as ssh:
 # limitations under the License.
 
 from __future__ import absolute_import
+
 import os
-import mock
 import unittest
 
-from yardstick.benchmark.contexts import sriov
+import mock
+
+from yardstick.benchmark.contexts.standalone import sriov
 
 NIC_INPUT = {
     'interface': {},
@@ -185,7 +187,7 @@ class SriovTestCase(unittest.TestCase):
                 nic_details['vf_pci'][i] = sriov_obj.get_vf_datas.return_value
                 vf_pci = [[], []]
                 vf_pci[i] = sriov_obj.get_vf_datas.return_value
-            with mock.patch("yardstick.benchmark.contexts.sriov.time"):
+            with mock.patch("yardstick.benchmark.contexts.standalone.sriov.time"):
                 self.assertIsNotNone(sriov_obj.configure_nics_for_sriov(DRIVER, NIC_DETAILS))
 
     def test_setup_sriov_context(self):
@@ -224,7 +226,7 @@ class SriovTestCase(unittest.TestCase):
                 mock.Mock(return_value=(0, {}, ""))
             ssh_mock.put = mock.Mock()
             sriov_obj.check_output = mock.Mock(return_value=(1, {}))
-            with mock.patch("yardstick.benchmark.contexts.sriov.time"):
+            with mock.patch("yardstick.benchmark.contexts.standalone.sriov.time"):
                 self.assertIsNone(sriov_obj.setup_sriov_context(PCIS, nic_details, DRIVER))
 
     def test_setup_sriov_context_vm_already_present(self):
@@ -263,11 +265,11 @@ class SriovTestCase(unittest.TestCase):
                 mock.Mock(return_value=(0, {}, ""))
             ssh_mock.put = mock.Mock()
             sriov_obj.check_output = mock.Mock(return_value=(0, "vm1"))
-            with mock.patch("yardstick.benchmark.contexts.sriov.time"):
+            with mock.patch("yardstick.benchmark.contexts.standalone.sriov.time"):
                 self.assertIsNone(sriov_obj.setup_sriov_context(PCIS, nic_details, DRIVER))
 
     @mock.patch(
-        'yardstick.benchmark.contexts.sriov',
+        'yardstick.benchmark.contexts.standalone.sriov',
         return_value="Domain vm1 created from /tmp/vm_sriov.xml")
     def test_is_vm_created(self, NIC_INPUT):
         with mock.patch("yardstick.ssh.SSH") as ssh:
index d13e284..b1402a1 100644 (file)
 # Unittest for yardstick.benchmark.contexts.standalone
 
 from __future__ import absolute_import
+
 import os
 import unittest
+
 import mock
 
 from yardstick.benchmark.contexts import standalone
-from yardstick.benchmark.contexts import sriov
-from yardstick.benchmark.contexts import ovsdpdk
+from yardstick.benchmark.contexts.standalone import ovsdpdk, sriov
 
 MOCKS = {
     'yardstick.benchmark.contexts': mock.MagicMock(),
-    'yardstick.benchmark.contexts.sriov': mock.MagicMock(),
-    'yardstick.benchmark.contexts.ovsdpdk': mock.MagicMock(),
+    'yardstick.benchmark.contexts.standalone.sriov': mock.MagicMock(),
+    'yardstick.benchmark.contexts.standalone.ovsdpdk': mock.MagicMock(),
     'yardstick.benchmark.contexts.standalone': mock.MagicMock(),
 }
 
 
-@mock.patch('yardstick.benchmark.contexts.ovsdpdk.time')
+@mock.patch('yardstick.benchmark.contexts.standalone.ovsdpdk.time')
 @mock.patch('yardstick.benchmark.contexts.standalone.time')
-@mock.patch('yardstick.benchmark.contexts.sriov.time')
+@mock.patch('yardstick.benchmark.contexts.standalone.sriov.time')
 class StandaloneContextTestCase(unittest.TestCase):
     NODES_SAMPLE = "nodes_sample_new.yaml"
     NODES_SAMPLE_SRIOV = "nodes_sample_new_sriov.yaml"
@@ -564,7 +565,7 @@ class StandaloneContextTestCase(unittest.TestCase):
         self.assertIsNone(self.test_context.undeploy())
 
     def test_get_nfvi_obj_sriov(self, mock_sriov_time, mock_standlalone_time, mock_ovsdpdk_time):
-        with mock.patch('yardstick.benchmark.contexts.sriov'):
+        with mock.patch('yardstick.benchmark.contexts.standalone.sriov'):
             attrs = {
                 'name': 'sriov',
                 'file': self._get_file_abspath(self.NODES_SAMPLE)
@@ -589,7 +590,7 @@ class StandaloneContextTestCase(unittest.TestCase):
             self.assertIsNotNone(self.test_context.get_nfvi_obj())
 
     def test_get_nfvi_obj_ovs(self, mock_sriov_time, mock_standlalone_time, mock_ovsdpdk_time):
-        with mock.patch('yardstick.benchmark.contexts.ovsdpdk'):
+        with mock.patch('yardstick.benchmark.contexts.standalone.ovsdpdk'):
             attrs = {
                 'name': 'ovs',
                 'file': self._get_file_abspath(self.NODES_SAMPLE_OVSDPDK)
index 9cfe6e1..8fab5a7 100644 (file)
 #
 
 from __future__ import absolute_import
-import unittest
-from contextlib import contextmanager
 
+import unittest
 import mock
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
 
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
index de2170b..244a5e7 100644 (file)
@@ -67,4 +67,5 @@ class ScenarioGeneralTestCase(unittest.TestCase):
         ins.director = mock_obj
         ins.director.data = {}
         ins.run({})
+        ins.pass_flag = True
         ins.teardown()
diff --git a/tests/unit/benchmark/scenarios/lib/test_attach_volume.py b/tests/unit/benchmark/scenarios/lib/test_attach_volume.py
new file mode 100644 (file)
index 0000000..e699240
--- /dev/null
@@ -0,0 +1,33 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+import mock
+
+from yardstick.benchmark.scenarios.lib.attach_volume import AttachVolume
+
+
+class AttachVolumeTestCase(unittest.TestCase):
+
+    @mock.patch('yardstick.common.openstack_utils.attach_server_volume')
+    def test_attach_volume(self, mock_attach_server_volume):
+        options = {
+                'volume_id': '123-456-000',
+                'server_id': '000-123-456'
+        }
+        args = {"options": options}
+        obj = AttachVolume(args, {})
+        obj.run({})
+        self.assertTrue(mock_attach_server_volume.called)
+
+def main():
+    unittest.main()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_floating_ip.py b/tests/unit/benchmark/scenarios/lib/test_create_floating_ip.py
new file mode 100644 (file)
index 0000000..72dbcd7
--- /dev/null
@@ -0,0 +1,34 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+import mock
+
+from yardstick.benchmark.scenarios.lib.create_floating_ip import CreateFloatingIp
+
+
+class CreateFloatingIpTestCase(unittest.TestCase):
+
+    @mock.patch('yardstick.common.openstack_utils.create_floating_ip')
+    @mock.patch('yardstick.common.openstack_utils.get_network_id')
+    @mock.patch('yardstick.common.openstack_utils.get_neutron_client')
+    def test_create_floating_ip(self, mock_create_floating_ip, mock_get_network_id, mock_get_neutron_client):
+        options = {}
+        args = {"options": options}
+        obj = CreateFloatingIp(args, {})
+        obj.run({})
+        self.assertTrue(mock_create_floating_ip.called)
+        self.assertTrue(mock_get_network_id.called)
+        self.assertTrue(mock_get_neutron_client.called)
+
+def main():
+    unittest.main()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_keypair.py b/tests/unit/benchmark/scenarios/lib/test_create_keypair.py
new file mode 100644 (file)
index 0000000..99e6b9a
--- /dev/null
@@ -0,0 +1,35 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+import mock
+import paramiko
+
+from yardstick.benchmark.scenarios.lib.create_keypair import CreateKeypair
+
+
+class CreateKeypairTestCase(unittest.TestCase):
+
+    @mock.patch('yardstick.common.openstack_utils.create_keypair')
+    def test_create_keypair(self, mock_create_keypair):
+        options = {
+            'key_name': 'yardstick_key',
+            'key_path': '/tmp/yardstick_key'
+        }
+        args = {"options": options}
+        obj = CreateKeypair(args, {})
+        obj.run({})
+        self.assertTrue(mock_create_keypair.called)
+
+
+def main():
+    unittest.main()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_network.py b/tests/unit/benchmark/scenarios/lib/test_create_network.py
new file mode 100644 (file)
index 0000000..8e7d8b5
--- /dev/null
@@ -0,0 +1,39 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+import mock
+import paramiko
+
+from yardstick.benchmark.scenarios.lib.create_network import CreateNetwork
+
+
+class CreateNetworkTestCase(unittest.TestCase):
+
+    @mock.patch('yardstick.common.openstack_utils.get_neutron_client')
+    @mock.patch('yardstick.common.openstack_utils.create_neutron_net')
+    def test_create_network(self, mock_get_neutron_client, mock_create_neutron_net):
+        options = {
+          'openstack_paras': {
+             'name': 'yardstick_net',
+             'admin_state_up': 'True'
+          }
+        }
+        args = {"options": options}
+        obj = CreateNetwork(args, {})
+        obj.run({})
+        self.assertTrue(mock_get_neutron_client.called)
+        self.assertTrue(mock_create_neutron_net.called)
+
+
+def main():
+    unittest.main()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_port.py b/tests/unit/benchmark/scenarios/lib/test_create_port.py
new file mode 100644 (file)
index 0000000..3b2aa22
--- /dev/null
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+import mock
+import paramiko
+
+from yardstick.benchmark.scenarios.lib.create_port import CreatePort
+
+
+class CreatePortTestCase(unittest.TestCase):
+
+    @mock.patch('yardstick.common.openstack_utils.get_neutron_client')
+    def test_create_port(self, mock_get_neutron_client):
+        options = {
+          'openstack_paras': {
+             'name': 'yardstick_port'
+          }
+        }
+        args = {"options": options}
+        obj = CreatePort(args, {})
+        obj.run({})
+        self.assertTrue(mock_get_neutron_client.called)
+
+
+def main():
+    unittest.main()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_router.py b/tests/unit/benchmark/scenarios/lib/test_create_router.py
new file mode 100644 (file)
index 0000000..b956a36
--- /dev/null
@@ -0,0 +1,39 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+import mock
+import paramiko
+
+from yardstick.benchmark.scenarios.lib.create_router import CreateRouter
+
+
+class CreateRouterTestCase(unittest.TestCase):
+
+    @mock.patch('yardstick.common.openstack_utils.get_neutron_client')
+    @mock.patch('yardstick.common.openstack_utils.create_neutron_router')
+    def test_create_router(self, mock_get_neutron_client, mock_create_neutron_router):
+        options = {
+          'openstack_paras': {
+             'admin_state_up': 'True',
+             'name': 'yardstick_router'
+          }
+        }
+        args = {"options": options}
+        obj = CreateRouter(args, {})
+        obj.run({})
+        self.assertTrue(mock_get_neutron_client.called)
+        self.assertTrue(mock_create_neutron_router.called)
+
+
+def main():
+    unittest.main()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_sec_group.py b/tests/unit/benchmark/scenarios/lib/test_create_sec_group.py
new file mode 100644 (file)
index 0000000..b962f7f
--- /dev/null
@@ -0,0 +1,39 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+import mock
+import paramiko
+
+from yardstick.benchmark.scenarios.lib.create_sec_group import CreateSecgroup
+
+
+class CreateSecGroupTestCase(unittest.TestCase):
+
+    @mock.patch('yardstick.common.openstack_utils.get_neutron_client')
+    @mock.patch('yardstick.common.openstack_utils.create_security_group_full')
+    def test_create_sec_group(self, mock_get_neutron_client, mock_create_security_group_full):
+        options = {
+          'openstack_paras': {
+             'sg_name': 'yardstick_sec_group',
+             'description': 'security group for yardstick manual VM'
+          }
+        }
+        args = {"options": options}
+        obj = CreateSecgroup(args, {})
+        obj.run({})
+        self.assertTrue(mock_get_neutron_client.called)
+        self.assertTrue(mock_create_security_group_full.called)
+
+
+def main():
+    unittest.main()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_create_subnet.py b/tests/unit/benchmark/scenarios/lib/test_create_subnet.py
new file mode 100644 (file)
index 0000000..0154755
--- /dev/null
@@ -0,0 +1,41 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+import mock
+import paramiko
+
+from yardstick.benchmark.scenarios.lib.create_subnet import CreateSubnet
+
+
+class CreateSubnetTestCase(unittest.TestCase):
+
+    @mock.patch('yardstick.common.openstack_utils.get_neutron_client')
+    @mock.patch('yardstick.common.openstack_utils.create_neutron_subnet')
+    def test_create_subnet(self, mock_get_neutron_client, mock_create_neutron_subnet):
+        options = {
+          'openstack_paras': {
+             'network_id': '123-123-123',
+             'name': 'yardstick_subnet',
+             'cidr': '10.10.10.0/24',
+             'ip_version': '4'
+          }
+        }
+        args = {"options": options}
+        obj = CreateSubnet(args, {})
+        obj.run({})
+        self.assertTrue(mock_get_neutron_client.called)
+        self.assertTrue(mock_create_neutron_subnet.called)
+
+
+def main():
+    unittest.main()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_delete_floating_ip.py b/tests/unit/benchmark/scenarios/lib/test_delete_floating_ip.py
new file mode 100644 (file)
index 0000000..7592c80
--- /dev/null
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+import mock
+import paramiko
+
+from yardstick.benchmark.scenarios.lib.delete_floating_ip import DeleteFloatingIp
+
+
+class DeleteFloatingIpTestCase(unittest.TestCase):
+
+    @mock.patch('yardstick.common.openstack_utils.get_nova_client')
+    @mock.patch('yardstick.common.openstack_utils.delete_floating_ip')
+    def test_delete_floating_ip(self, mock_get_nova_client, mock_delete_floating_ip):
+        options = {
+            'floating_ip_id': '123-123-123'
+        }
+        args = {"options": options}
+        obj = DeleteFloatingIp(args, {})
+        obj.run({})
+        self.assertTrue(mock_get_nova_client.called)
+        self.assertTrue(mock_delete_floating_ip.called)
+
+
+def main():
+    unittest.main()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py b/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py
new file mode 100644 (file)
index 0000000..9663fe9
--- /dev/null
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+import mock
+import paramiko
+
+from yardstick.benchmark.scenarios.lib.delete_keypair import DeleteKeypair
+
+
+class DeleteKeypairTestCase(unittest.TestCase):
+
+    @mock.patch('yardstick.common.openstack_utils.get_nova_client')
+    @mock.patch('yardstick.common.openstack_utils.delete_keypair')
+    def test_detach_volume(self, mock_get_nova_client, mock_delete_keypair):
+        options = {
+            'key_name': 'yardstick_key'
+        }
+        args = {"options": options}
+        obj = DeleteKeypair(args, {})
+        obj.run({})
+        self.assertTrue(mock_get_nova_client.called)
+        self.assertTrue(mock_delete_keypair.called)
+
+
+def main():
+    unittest.main()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_delete_volume.py b/tests/unit/benchmark/scenarios/lib/test_delete_volume.py
new file mode 100644 (file)
index 0000000..a11d012
--- /dev/null
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+import mock
+import paramiko
+
+from yardstick.benchmark.scenarios.lib.delete_volume import DeleteVolume
+
+
+class DeleteVolumeTestCase(unittest.TestCase):
+
+    @mock.patch('yardstick.common.openstack_utils.get_cinder_client')
+    @mock.patch('yardstick.common.openstack_utils.delete_volume')
+    def test_delete_volume(self, mock_get_cinder_client, mock_delete_volume):
+        options = {
+            'volume_id': '123-123-123'
+        }
+        args = {"options": options}
+        obj = DeleteVolume(args, {})
+        obj.run({})
+        self.assertTrue(mock_get_cinder_client.called)
+        self.assertTrue(mock_delete_volume.called)
+
+
+def main():
+    unittest.main()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tests/unit/benchmark/scenarios/lib/test_detach_volume.py b/tests/unit/benchmark/scenarios/lib/test_detach_volume.py
new file mode 100644 (file)
index 0000000..0cffcba
--- /dev/null
@@ -0,0 +1,35 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import unittest
+import mock
+import paramiko
+
+from yardstick.benchmark.scenarios.lib.detach_volume import DetachVolume
+
+
+class DetachVolumeTestCase(unittest.TestCase):
+
+    @mock.patch('yardstick.common.openstack_utils.detach_volume')
+    def test_detach_volume(self, mock_detach_volume):
+        options = {
+            'server_id': '321-321-321',
+            'volume_id': '123-123-123'
+        }
+        args = {"options": options}
+        obj = DetachVolume(args, {})
+        obj.run({})
+        self.assertTrue(mock_detach_volume.called)
+
+
+def main():
+    unittest.main()
+
+
+if __name__ == '__main__':
+    main()
index 32ba255..0ca31d4 100644 (file)
@@ -132,7 +132,7 @@ class PktgenTestCase(unittest.TestCase):
         p._iptables_get_result = mock_iptables_result
 
         sample_output = '{"packets_per_second": 9753, "errors": 0, \
-            "packets_sent": 149776, "packetsize": 60, "flows": 110}'
+            "packets_sent": 149776, "packetsize": 60, "flows": 110, "ppm": 3179}'
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
 
         p.run(result)
@@ -159,7 +159,7 @@ class PktgenTestCase(unittest.TestCase):
         p._iptables_get_result = mock_iptables_result
 
         sample_output = '{"packets_per_second": 9753, "errors": 0, \
-            "packets_sent": 149776, "packetsize": 60, "flows": 110}'
+            "packets_sent": 149776, "packetsize": 60, "flows": 110, "ppm": 3179}'
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
 
         p.run(result)
@@ -648,7 +648,7 @@ class PktgenTestCase(unittest.TestCase):
         p._iptables_get_result = mock_iptables_result
 
         sample_output = '{"packets_per_second": 9753, "errors": 0, \
-            "packets_sent": 149300, "flows": 110}'
+            "packets_sent": 149300, "flows": 110, "ppm": 0}'
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
 
         p.run(result)
@@ -693,7 +693,7 @@ class PktgenTestCase(unittest.TestCase):
         p._iptables_get_result = mock_iptables_result
 
         sample_output = '{"packets_per_second": 9753, "errors": 0, \
-            "packets_sent": 149300, "flows": 110}'
+            "packets_sent": 149300, "flows": 110, "ppm": 0}'
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
 
         p.run(result)
@@ -730,7 +730,7 @@ class PktgenTestCase(unittest.TestCase):
         p._iptables_get_result = mock_iptables_result
 
         sample_output = '{"packets_per_second": 9753, "errors": 0, \
-            "packets_sent": 149300, "flows": 110}'
+            "packets_sent": 149300, "flows": 110, "ppm": 0}'
         mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
 
         p.run(result)
index 84b42c8..df5047a 100644 (file)
@@ -24,72 +24,14 @@ import errno
 import unittest
 import mock
 
+from tests.unit import STL_MOCKS
 from yardstick.benchmark.scenarios.networking.vnf_generic import \
     SshManager, NetworkServiceTestCase, IncorrectConfig, \
-    IncorrectSetup, open_relative_file
+    open_relative_file
 from yardstick.network_services.collector.subscriber import Collector
 from yardstick.network_services.vnf_generic.vnf.base import \
     GenericTrafficGen, GenericVNF
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
 
 COMPLETE_TREX_VNFD = {
     'vnfd:vnfd-catalog': {
@@ -375,6 +317,9 @@ class TestNetworkServiceTestCase(unittest.TestCase):
                     'allowed_drop_rate': '0.8 - 1',
                 },
             },
+            'options': {
+                'framesize': {'64B': 100}
+            },
             'runner': {
                 'object': 'NetworkServiceTestCase',
                 'interval': 35,
@@ -414,17 +359,40 @@ class TestNetworkServiceTestCase(unittest.TestCase):
     def test___init__(self):
         assert self.topology
 
+    def test__get_ip_flow_range(self):
+        self.scenario_cfg["traffic_options"]["flow"] = \
+            self._get_file_abspath("ipv4_1flow_Packets_vpe.yaml")
+        result = '152.16.100.1-152.16.100.254'
+        self.assertEqual(result, self.s._get_ip_flow_range({"tg__1": 'xe0'}))
+
     def test___get_traffic_flow(self):
         self.scenario_cfg["traffic_options"]["flow"] = \
             self._get_file_abspath("ipv4_1flow_Packets_vpe.yaml")
-        result = {'flow': {'dstip4_range': '152.40.0.20',
-                           'srcip4_range': '152.16.0.20', 'count': 1}}
+        self.scenario_cfg["options"] = {}
+        self.scenario_cfg['options'] = {
+            'flow': {
+              'src_ip': [
+                {
+                  'tg__1': 'xe0',
+                },
+              ],
+              'dst_ip': [
+                {
+                  'tg__1': 'xe1',
+                },
+              ],
+              'public_ip': ['1.1.1.1'],
+            },
+        }
+        result = {'flow': {'dst_ip0': '152.16.40.1-152.16.40.254',
+                           'src_ip0': '152.16.100.1-152.16.100.254'}}
+
         self.assertEqual(result, self.s._get_traffic_flow())
 
     def test___get_traffic_flow_error(self):
         self.scenario_cfg["traffic_options"]["flow"] = \
             "ipv4_1flow_Packets_vpe.yaml1"
-        self.assertEqual({}, self.s._get_traffic_flow())
+        self.assertEqual({'flow': {}}, self.s._get_traffic_flow())
 
     def test_get_vnf_imp(self):
         vnfd = COMPLETE_TREX_VNFD['vnfd:vnfd-catalog']['vnfd'][0]['class-name']
@@ -471,7 +439,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
                 mock.Mock(return_value=(1, SYS_CLASS_NET + IP_ADDR_SHOW, ""))
             ssh.from_node.return_value = ssh_mock
 
-            with self.assertRaises(IncorrectSetup):
+            with self.assertRaises(IncorrectConfig):
                 self.s.map_topology_to_infrastructure()
 
     def test_map_topology_to_infrastructure_config_invalid(self):
@@ -586,7 +554,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
 
     def test___get_traffic_imix_exception(self):
         with mock.patch.dict(self.scenario_cfg["traffic_options"], {'imix': ''}):
-            self.assertEqual({}, self.s._get_traffic_imix())
+            self.assertEqual({'imix': {'64B': 100}}, self.s._get_traffic_imix())
 
     def test__fill_traffic_profile(self):
         with mock.patch.dict("sys.modules", STL_MOCKS):
@@ -694,11 +662,11 @@ class TestNetworkServiceTestCase(unittest.TestCase):
     def test_probe_missing_values(self):
         netdevs = self.SAMPLE_NETDEVS.copy()
         network = {'local_mac': '0a:de:ad:be:ef:f5'}
-        NetworkServiceTestCase._probe_missing_values(netdevs, network, set())
+        NetworkServiceTestCase._probe_missing_values(netdevs, network)
         assert network['vpci'] == '0000:0b:00.0'
 
         network = {'local_mac': '0a:de:ad:be:ef:f4'}
-        NetworkServiceTestCase._probe_missing_values(netdevs, network, set())
+        NetworkServiceTestCase._probe_missing_values(netdevs, network)
         assert network['vpci'] == '0000:00:19.0'
 
     def test_open_relative_path(self):
index 3b9f99b..de5bae2 100644 (file)
@@ -28,8 +28,6 @@ from yardstick.benchmark.scenarios.networking import vsperf_dpdk
 
 @mock.patch('yardstick.benchmark.scenarios.networking.vsperf_dpdk.subprocess')
 @mock.patch('yardstick.benchmark.scenarios.networking.vsperf_dpdk.ssh')
-@mock.patch("yardstick.benchmark.scenarios.networking.vsperf_dpdk.open",
-            mock.mock_open())
 class VsperfDPDKTestCase(unittest.TestCase):
 
     def setUp(self):
index 55e4438..17594b9 100644 (file)
@@ -55,6 +55,20 @@ class FioTestCase(unittest.TestCase):
         self.assertIsNotNone(p.client)
         self.assertEqual(p.setup_done, True)
 
+    def test_fio_job_file_successful_setup(self, mock_ssh):
+
+        options = {
+            'job_file': 'job_file.ini',
+            'directory': '/FIO_Test'
+        }
+        args = {'options': options}
+        p = fio.Fio(args, self.ctx)
+        p.setup()
+
+        mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+        self.assertIsNotNone(p.client)
+        self.assertEqual(p.setup_done, True)
+
     def test_fio_successful_no_sla(self, mock_ssh):
 
         options = {
index 6632262..923ec4a 100644 (file)
@@ -20,6 +20,7 @@ from itertools import product, chain
 import mock
 from six.moves import configparser
 
+import yardstick
 from yardstick.common import utils
 from yardstick.common import constants
 
@@ -45,47 +46,25 @@ class IterSubclassesTestCase(unittest.TestCase):
         self.assertEqual([B, C, D], list(utils.itersubclasses(A)))
 
 
-class TryAppendModuleTestCase(unittest.TestCase):
-
-    @mock.patch('yardstick.common.utils.importutils')
-    def test_try_append_module_not_in_modules(self, mock_importutils):
-
-        modules = {}
-        name = 'foo'
-        utils.try_append_module(name, modules)
-        mock_importutils.import_module.assert_called_with(name)
-
-    @mock.patch('yardstick.common.utils.importutils')
-    def test_try_append_module_already_in_modules(self, mock_importutils):
-
-        modules = {'foo'}
-        name = 'foo'
-        utils.try_append_module(name, modules)
-        self.assertFalse(mock_importutils.import_module.called)
-
-
 class ImportModulesFromPackageTestCase(unittest.TestCase):
 
     @mock.patch('yardstick.common.utils.os.walk')
-    @mock.patch('yardstick.common.utils.try_append_module')
-    def test_import_modules_from_package_no_mod(self, mock_append, mock_walk):
-
-        sep = os.sep
+    def test_import_modules_from_package_no_mod(self, mock_walk):
+        yardstick_root = os.path.dirname(os.path.dirname(yardstick.__file__))
         mock_walk.return_value = ([
-            ('..' + sep + 'foo', ['bar'], ['__init__.py']),
-            ('..' + sep + 'foo' + sep + 'bar', [], ['baz.txt', 'qux.rst'])
+            (os.path.join(yardstick_root, 'foo'), ['bar'], ['__init__.py']),
+            (os.path.join(yardstick_root, 'foo', 'bar'), [], ['baz.txt', 'qux.rst'])
         ])
 
         utils.import_modules_from_package('foo.bar')
-        self.assertFalse(mock_append.called)
 
     @mock.patch('yardstick.common.utils.os.walk')
     @mock.patch('yardstick.common.utils.importutils')
     def test_import_modules_from_package(self, mock_importutils, mock_walk):
 
-        sep = os.sep
+        yardstick_root = os.path.dirname(os.path.dirname(yardstick.__file__))
         mock_walk.return_value = ([
-            ('foo' + sep + '..' + sep + 'bar', [], ['baz.py'])
+            (os.path.join(yardstick_root, 'foo', os.pardir, 'bar'), [], ['baz.py'])
         ])
 
         utils.import_modules_from_package('foo.bar')
index b896685..608f317 100644 (file)
@@ -198,6 +198,8 @@ class TestMultiPortConfig(unittest.TestCase):
         opnfv_vnf.get_ports_gateway6 = mock.Mock(return_value=u'1.1.1.1')
         opnfv_vnf.get_netmask_gateway6 = mock.Mock(return_value=u'255.255.255.0')
         opnfv_vnf.txrx_pipeline = ''
+        opnfv_vnf.vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+        opnfv_vnf.interfaces = opnfv_vnf.vnfd['vdu'][0]['external-interface']
         opnfv_vnf.rules = ''
         self.assertIsNotNone(opnfv_vnf.generate_rule_config())
         opnfv_vnf.rules = 'new'
index cb26fd0..072f06e 100644 (file)
@@ -108,13 +108,13 @@ class TestResourceProfile(unittest.TestCase):
     def test_get_cpu_data(self):
         reskey = ["", "cpufreq", "cpufreq-0"]
         value = "metric:10"
-        val = self.resource_profile.get_cpu_data(reskey, value)
+        val = self.resource_profile.get_cpu_data(reskey[1], reskey[2], value)
         self.assertIsNotNone(val)
 
     def test_get_cpu_data_error(self):
         reskey = ["", "", ""]
         value = "metric:10"
-        val = self.resource_profile.get_cpu_data(reskey, value)
+        val = self.resource_profile.get_cpu_data(reskey[0], reskey[1], value)
         self.assertEqual(val, ('error', 'Invalid', '', ''))
 
     def test__start_collectd(self):
index 8b44719..8484317 100644 (file)
 #
 
 from __future__ import absolute_import
+
 import unittest
 import mock
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
 
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
index 2e1b6f4..5110439 100644 (file)
 from __future__ import absolute_import
 import unittest
 import mock
-import runpy
 
 from oslo_serialization import jsonutils
 
 from yardstick.network_services.traffic_profile import http_ixload
+from yardstick.network_services.traffic_profile.http_ixload import \
+    join_non_strings, validate_non_string_sequence
+
+
+class TestJoinNonStrings(unittest.TestCase):
+
+    def test_validate_non_string_sequence(self):
+        self.assertEqual(validate_non_string_sequence([1, 2, 3]), [1, 2, 3])
+        self.assertIsNone(validate_non_string_sequence('123'))
+        self.assertIsNone(validate_non_string_sequence(1))
+
+        self.assertEqual(validate_non_string_sequence(1, 2), 2)
+        self.assertEqual(validate_non_string_sequence(1, default=2), 2)
+
+        with self.assertRaises(RuntimeError):
+            validate_non_string_sequence(1, raise_exc=RuntimeError)
+
+    def test_join_non_strings(self):
+        self.assertEqual(join_non_strings(':'), '')
+        self.assertEqual(join_non_strings(':', 'a'), 'a')
+        self.assertEqual(join_non_strings(':', 'a', 2, 'c'), 'a:2:c')
+        self.assertEqual(join_non_strings(':', ['a', 2, 'c']), 'a:2:c')
+        self.assertEqual(join_non_strings(':', 'abc'), 'abc')
 
 
 class TestIxLoadTrafficGen(unittest.TestCase):
index 6dba64a..b2cb9df 100644 (file)
@@ -20,65 +20,7 @@ from __future__ import division
 import unittest
 import mock
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
 
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
index 252c655..be172f2 100644 (file)
 #
 
 from __future__ import absolute_import
-import unittest
 
+import unittest
 import mock
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
 
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
index 74e6121..72b8670 100644 (file)
 #
 
 from __future__ import absolute_import
-import unittest
 
+import unittest
 import mock
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
 
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
index a2ad033..14223da 100644 (file)
 #
 
 from __future__ import absolute_import
-import unittest
 
+import unittest
 import mock
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
 
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
index 19e6ff8..3572987 100644 (file)
 #
 
 from __future__ import absolute_import
-import unittest
 
+import unittest
 import mock
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
 
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
index dcaf43d..aef0b93 100644 (file)
 
 from __future__ import absolute_import
 from __future__ import division
+
 import unittest
 import mock
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
+
 
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
@@ -115,9 +59,9 @@ class TestRFC2544Profile(unittest.TestCase):
                            'outer_l3v4': {'dstip4': '1.1.1.1-1.15.255.255',
                                           'proto': 'udp',
                                           'srcip4': '90.90.1.1-90.105.255.255',
-                                          'dscp': 0, 'ttl': 32},
+                                          'dscp': 0, 'ttl': 32, 'count': 1},
                            'outer_l4': {'srcport': '2001',
-                                        'dsrport': '1234'}}},
+                               'dsrport': '1234', 'count': 1}}},
                'private_1': {'ipv4':
                            {'outer_l2': {'framesize':
                                          {'64B': '100', '1518B': '0',
@@ -127,9 +71,9 @@ class TestRFC2544Profile(unittest.TestCase):
                             'outer_l3v4': {'dstip4': '9.9.1.1-90.105.255.255',
                                            'proto': 'udp',
                                            'srcip4': '1.1.1.1-1.15.255.255',
-                                           'dscp': 0, 'ttl': 32},
+                                           'dscp': 0, 'ttl': 32, 'count': 1},
                             'outer_l4': {'dstport': '2001',
-                                         'srcport': '1234'}}},
+                                'srcport': '1234', 'count': 1}}},
                'schema': 'isb:traffic_profile:0.1'}
 
     def test___init__(self):
index fd769e6..9a78c36 100644 (file)
 #
 
 from __future__ import absolute_import
-import unittest
 
+import unittest
 import mock
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
+
 
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
@@ -117,9 +60,11 @@ class TestTrexProfile(unittest.TestCase):
                                    'outer_l3v4': {'dstip4': '1.1.1.1-1.1.2.2',
                                                   'proto': 'udp',
                                                   'srcip4': '9.9.1.1-90.1.2.2',
-                                                  'dscp': 0, 'ttl': 32},
+                                                  'dscp': 0, 'ttl': 32,
+                                                  'count': 1},
                                    'outer_l4': {'srcport': '2001',
-                                                'dsrport': '1234'}}},
+                                                'dsrport': '1234',
+                                                'count': 1}}},
                'private': {'ipv4':
                            {'outer_l2': {'framesize':
                                          {'64B': '100', '1518B': '0',
@@ -131,9 +76,10 @@ class TestTrexProfile(unittest.TestCase):
                             'outer_l3v4': {'dstip4': '9.9.1.1-90.105.255.255',
                                            'proto': 'udp',
                                            'srcip4': '1.1.1.1-1.15.255.255',
-                                           'dscp': 0, 'ttl': 32},
+                                           'dscp': 0, 'ttl': 32, 'count': 1},
                             'outer_l4': {'dstport': '2001',
-                                         'srcport': '1234'}}},
+                                         'srcport': '1234',
+                                         'count': 1}}},
                'schema': 'isb:traffic_profile:0.1'}
     PROFILE_v6 = {'description': 'Traffic profile to run RFC2544 latency',
                   'name': 'rfc2544',
@@ -149,9 +95,11 @@ class TestTrexProfile(unittest.TestCase):
                                       'outer_l3v4': {'dstip6': '0064:ff9b:0:0:0:0:9810:6414-0064:ff9b:0:0:0:0:9810:6420',
                                                      'proto': 'udp',
                                                      'srcip6': '0064:ff9b:0:0:0:0:9810:2814-0064:ff9b:0:0:0:0:9810:2820',
-                                                     'dscp': 0, 'ttl': 32},
+                                                     'dscp': 0, 'ttl': 32,
+                                                     'count': 1},
                                       'outer_l4': {'srcport': '2001',
-                                                   'dsrport': '1234'}}},
+                                                   'dsrport': '1234',
+                                                   'count': 1}}},
                   'private':
                   {'ipv6': {'outer_l2': {'framesize':
                                          {'64B': '100', '1518B': '0',
@@ -163,9 +111,11 @@ class TestTrexProfile(unittest.TestCase):
                             'outer_l3v4': {'dstip6': '0064:ff9b:0:0:0:0:9810:2814-0064:ff9b:0:0:0:0:9810:2820',
                                            'proto': 'udp',
                                            'srcip6': '0064:ff9b:0:0:0:0:9810:6414-0064:ff9b:0:0:0:0:9810:6420',
-                                           'dscp': 0, 'ttl': 32},
+                                           'dscp': 0, 'ttl': 32,
+                                           'count': 1},
                             'outer_l4': {'dstport': '2001',
-                                         'srcport': '1234'}}},
+                                         'srcport': '1234',
+                                         'count': 1}}},
                   'schema': 'isb:traffic_profile:0.1'}
 
     def test___init__(self):
index a63a59d..7570067 100644 (file)
 #
 
 from __future__ import absolute_import
+
 import unittest
 import mock
 import os
 
+from tests.unit import STL_MOCKS
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
 
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
index bf226d2..f214d66 100644 (file)
@@ -19,68 +19,10 @@ from __future__ import absolute_import
 
 import os
 import unittest
-
 import mock
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
+
 
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
index 53481dd..b74e5d9 100644 (file)
 #
 
 from __future__ import absolute_import
+
 import unittest
 from contextlib import contextmanager
-
 import mock
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
+
 
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
index 90ec3f3..98eccae 100644 (file)
@@ -23,69 +23,10 @@ import unittest
 from collections import OrderedDict
 from itertools import repeat, chain
 from contextlib import contextmanager
-
 import mock
 
+from tests.unit import STL_MOCKS
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
 
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
index 453100b..c727cb7 100644 (file)
@@ -19,71 +19,13 @@ from __future__ import absolute_import
 
 import os
 import unittest
-
 import mock
 from copy import deepcopy
 
-SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
+from tests.unit import STL_MOCKS
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+
+SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
 
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
index 07a862a..455e44e 100644 (file)
 # Unittest for yardstick.network_services.vnf_generic.vnf.sample_vnf
 
 from __future__ import absolute_import
+
 import unittest
 import mock
 from copy import deepcopy
 
 from tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
+from tests.unit import STL_MOCKS
 from yardstick.benchmark.contexts.base import Context
 from yardstick.network_services.nfvi.resource import ResourceProfile
 from yardstick.network_services.traffic_profile.base import TrafficProfile
@@ -34,66 +36,6 @@ class MockError(BaseException):
     pass
 
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
-
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
 stl_patch.start()
index cda4412..5c81aa8 100644 (file)
 #
 
 from __future__ import absolute_import
+
 import unittest
 import mock
 import subprocess
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
+
 
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
index 949bfb3..45bbfae 100644 (file)
 #
 
 from __future__ import absolute_import
+
 import unittest
 import mock
 from multiprocessing import Queue
 
 from tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
+from tests.unit import STL_MOCKS
 
 SSH_HELPER = "yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper"
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
-
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
 stl_patch.start()
index 1a01b9e..12abadf 100644 (file)
 #
 
 from __future__ import absolute_import
+
 import unittest
 import mock
 
 from tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
+from tests.unit import STL_MOCKS
 
 
 SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
 NAME = 'vnf__1'
 
-
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
-
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
 stl_patch.start()
index 8f7f057..ca8150c 100644 (file)
 #
 
 from __future__ import absolute_import
+
 import os
 import unittest
 import mock
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+
+from tests.unit import STL_MOCKS
+
 
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
index 7dc3038..ad8c649 100644 (file)
 #
 
 from __future__ import absolute_import
+
 import unittest
 import mock
 
-SSH_HELPER = "yardstick.ssh.SSH"
+from tests.unit import STL_MOCKS
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+
+SSH_HELPER = "yardstick.ssh.SSH"
 
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
@@ -289,7 +233,7 @@ class TestTrexTrafficGenRFC(unittest.TestCase):
     def test_collect_kpi(self, ssh):
         mock_ssh(ssh)
         trex_traffic_gen = TrexTrafficGenRFC('vnf1', self.VNFD_0)
-        self.assertIsNone(trex_traffic_gen.collect_kpi())
+        self.assertEqual(trex_traffic_gen.collect_kpi(), {})
 
     @mock.patch(SSH_HELPER)
     def test_listen_traffic(self, ssh):
index 6fb5d08..65370df 100644 (file)
 #
 
 from __future__ import absolute_import
+
 import unittest
 import mock
 
 from tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
+from tests.unit import STL_MOCKS
 
 
 NAME = 'vnf_1'
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
-
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
 stl_patch.start()
index 08bf06b..f0d75d5 100644 (file)
 #
 
 from __future__ import absolute_import
+
 import unittest
 import mock
 import os
 
+from tests.unit import STL_MOCKS
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
 
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
index d817b16..7dae89f 100644 (file)
 #
 
 from __future__ import absolute_import
+
 import unittest
 import mock
 import os
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+from tests.unit import STL_MOCKS
+
 
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
index 80b4a51..5e66390 100644 (file)
 #
 
 from __future__ import absolute_import
+import six.moves.configparser as configparser
 
 import os
 import unittest
-
-import six.moves.configparser as configparser
 import mock
 from multiprocessing import Process, Queue
 
+from tests.unit import STL_MOCKS
 from yardstick.network_services.vnf_generic.vnf.base import QueueFileWrapper
 
-SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
 
-STL_MOCKS = {
-    'stl': mock.MagicMock(),
-    'stl.trex_stl_lib': mock.MagicMock(),
-    'stl.trex_stl_lib.base64': mock.MagicMock(),
-    'stl.trex_stl_lib.binascii': mock.MagicMock(),
-    'stl.trex_stl_lib.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.copy': mock.MagicMock(),
-    'stl.trex_stl_lib.datetime': mock.MagicMock(),
-    'stl.trex_stl_lib.functools': mock.MagicMock(),
-    'stl.trex_stl_lib.imp': mock.MagicMock(),
-    'stl.trex_stl_lib.inspect': mock.MagicMock(),
-    'stl.trex_stl_lib.json': mock.MagicMock(),
-    'stl.trex_stl_lib.linecache': mock.MagicMock(),
-    'stl.trex_stl_lib.math': mock.MagicMock(),
-    'stl.trex_stl_lib.os': mock.MagicMock(),
-    'stl.trex_stl_lib.platform': mock.MagicMock(),
-    'stl.trex_stl_lib.pprint': mock.MagicMock(),
-    'stl.trex_stl_lib.random': mock.MagicMock(),
-    'stl.trex_stl_lib.re': mock.MagicMock(),
-    'stl.trex_stl_lib.scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.socket': mock.MagicMock(),
-    'stl.trex_stl_lib.string': mock.MagicMock(),
-    'stl.trex_stl_lib.struct': mock.MagicMock(),
-    'stl.trex_stl_lib.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.threading': mock.MagicMock(),
-    'stl.trex_stl_lib.time': mock.MagicMock(),
-    'stl.trex_stl_lib.traceback': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_async_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_exceptions': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_ext': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_jsonrpc_client': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_interface': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_packet_builder_scapy': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_port': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_stats': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_streams': mock.MagicMock(),
-    'stl.trex_stl_lib.trex_stl_types': mock.MagicMock(),
-    'stl.trex_stl_lib.types': mock.MagicMock(),
-    'stl.trex_stl_lib.utils': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.argparse': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.collections': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.common': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.json': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.os': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.parsing_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.pwd': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.random': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.re': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.string': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.sys': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_opts': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.text_tables': mock.MagicMock(),
-    'stl.trex_stl_lib.utils.texttable': mock.MagicMock(),
-    'stl.trex_stl_lib.warnings': mock.MagicMock(),
-    'stl.trex_stl_lib.yaml': mock.MagicMock(),
-    'stl.trex_stl_lib.zlib': mock.MagicMock(),
-    'stl.trex_stl_lib.zmq': mock.MagicMock(),
-}
+SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
 
 STLClient = mock.MagicMock()
 stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
index c8d53e3..e52c107 100644 (file)
@@ -163,7 +163,8 @@ class HeatContext(Context):
                                  network.physical_network,
                                  network.provider,
                                  network.segmentation_id,
-                                 network.port_security_enabled)
+                                 network.port_security_enabled,
+                                 network.network_type)
             template.add_subnet(network.subnet_stack_name, network.stack_name,
                                 network.subnet_cidr,
                                 network.enable_dhcp,
index aa144ab..ee7ea7d 100644 (file)
@@ -40,3 +40,7 @@ stress-cpu:
 block-io:
   inject_script: ha_tools/disk/block_io.bash
   recovery_script: ha_tools/disk/recovery_disk_io.bash
+
+kill-corosync:
+  inject_script: ha_tools/fault_process_kill.bash
+  recovery_script: ha_tools/node/reboot_node.bash
\ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/node/reboot_node.bash b/yardstick/benchmark/scenarios/availability/ha_tools/node/reboot_node.bash
new file mode 100644 (file)
index 0000000..1ee8c9c
--- /dev/null
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+##############################################################################
+# (c) OPNFV, Yin Kanglin and others.
+# 14_ykl@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# reboot node
+
+reboot
\ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/nova/get_server_floatingip.bash b/yardstick/benchmark/scenarios/availability/ha_tools/nova/get_server_floatingip.bash
new file mode 100644 (file)
index 0000000..78dd276
--- /dev/null
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+##############################################################################
+# (c) OPNFV, Yin Kanglin and others.
+# 14_ykl@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# get floating ip of a serer
+# parameter: $1 - server name
+
+set -e
+
+if [ $OS_INSECURE ] && [ "$(echo $OS_INSECURE | tr '[:upper:]' '[:lower:]')" = "true" ]; then
+    SECURE="--insecure"
+else
+    SECURE=""
+fi
+
+openstack ${SECURE} server list -f value | grep $1 | awk '{print $5}'
\ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/nova/list_servers.bash b/yardstick/benchmark/scenarios/availability/ha_tools/nova/list_servers.bash
new file mode 100644 (file)
index 0000000..0f67c02
--- /dev/null
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+##############################################################################
+# (c) OPNFV, Yin Kanglin and others.
+# 14_ykl@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# list servers
+
+set -e
+
+if [ $OS_INSECURE ] && [ "$(echo $OS_INSECURE | tr '[:upper:]' '[:lower:]')" = "true" ]; then
+    SECURE="--insecure"
+else
+    SECURE=""
+fi
+
+openstack ${SECURE} server list
\ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/pacemaker/get_resource_status.bash b/yardstick/benchmark/scenarios/availability/ha_tools/pacemaker/get_resource_status.bash
new file mode 100644 (file)
index 0000000..68707cf
--- /dev/null
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+##############################################################################
+# (c) OPNFV, Yin Kanglin and others.
+# 14_ykl@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# get pacemaker resource status
+
+pcs resource show
\ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/pacemaker/get_resource_status_host.bash b/yardstick/benchmark/scenarios/availability/ha_tools/pacemaker/get_resource_status_host.bash
new file mode 100644 (file)
index 0000000..7a02ccf
--- /dev/null
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+##############################################################################
+# (c) OPNFV, Yin Kanglin and others.
+# 14_ykl@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# get pacemaker resource status of hosts
+# parameter: $1 - resource name $2 status
+
+pcs resource show | grep $1 -A 3 | grep $2
\ No newline at end of file
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/pacemaker/get_vip_host.bash b/yardstick/benchmark/scenarios/availability/ha_tools/pacemaker/get_vip_host.bash
new file mode 100644 (file)
index 0000000..f4870fd
--- /dev/null
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+##############################################################################
+# (c) OPNFV, Yin Kanglin and others.
+# 14_ykl@tongji.edu.cn
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# get vip host in pacemaker
+# parameter: $1 - virtual ip name
+
+pcs resource show| grep -w $1 | awk '{print $4}'
\ No newline at end of file
index 1c39385..dc51691 100644 (file)
@@ -25,4 +25,13 @@ swift-download-file:
 
 nova-create-flavor:
   action_script: ha_tools/nova/create_flavor.bash
-  rollback_script: ha_tools/nova/delete_flavor.bash
\ No newline at end of file
+  rollback_script: ha_tools/nova/delete_flavor.bash
+
+get-floatingip:
+  action_script: ha_tools/nova/get_server_floatingip.bash
+  rollback_script: ha_tools/nova/list_servers.bash
+
+get-vip-host:
+  action_script: ha_tools/pacemaker/get_vip_host.bash
+  rollback_script: ha_tools/pacemaker/get_resource_status.bash
+
index 0494a71..451cc0f 100644 (file)
@@ -18,4 +18,6 @@ service-checker:
 nova-instance-checker:
   verify_script: ha_tools/nova/show_instances.bash
 nova-flavor-checker:
-  verify_script: ha_tools/nova/show_flavors.bash
\ No newline at end of file
+  verify_script: ha_tools/nova/show_flavors.bash
+pacemaker-resource-checker:
+  verify_script: ha_tools/pacemaker/get_resource_status_host.bash
\ No newline at end of file
index 17ad79f..c7ed1d6 100644 (file)
@@ -26,6 +26,7 @@ class ScenarioGeneral(base.Scenario):
         self.scenario_cfg = scenario_cfg
         self.context_cfg = context_cfg
         self.intermediate_variables = {}
+        self.pass_flag = True
 
     def setup(self):
         self.director = Director(self.scenario_cfg, self.context_cfg)
@@ -63,6 +64,7 @@ class ScenarioGeneral(base.Scenario):
             if v == 0:
                 result['sla_pass'] = 0
                 verify_result = False
+                self.pass_flag = False
                 LOG.info(
                     "\033[92m The service process not found in the host \
 envrioment, the HA test case NOT pass")
@@ -74,9 +76,12 @@ envrioment, the HA test case NOT pass")
                 "the HA test case PASS! \033[0m")
         else:
             result['sla_pass'] = 0
+            self.pass_flag = False
             LOG.info(
                 "\033[91m Aoh, the HA test case FAIL,"
                 "please check the detail debug information! \033[0m")
 
     def teardown(self):
         self.director.knockoff()
+
+        assert self.pass_flag, "The HA test case NOT passed"
index 2f0012e..d0f5e9e 100755 (executable)
@@ -29,6 +29,7 @@ class ServiceHA(base.Scenario):
         self.context_cfg = context_cfg
         self.setup_done = False
         self.data = {}
+        self.pass_flag = True
 
     def setup(self):
         """scenario setup"""
@@ -73,6 +74,7 @@ class ServiceHA(base.Scenario):
         for k, v in self.data.items():
             if v == 0:
                 result['sla_pass'] = 0
+                self.pass_flag = False
                 LOG.info("The service process not found in the host envrioment, \
 the HA test case NOT pass")
                 return
@@ -81,6 +83,7 @@ the HA test case NOT pass")
             LOG.info("The HA test case PASS the SLA")
         else:
             result['sla_pass'] = 0
+            self.pass_flag = False
         assert sla_pass is True, "The HA test case NOT pass the SLA"
 
         return
@@ -90,6 +93,8 @@ the HA test case NOT pass")
         for attacker in self.attackers:
             attacker.recover()
 
+        assert self.pass_flag, "The HA test case NOT passed"
+
 
 def _test():    # pragma: no cover
     """internal test function"""
index 6fef622..d288fcb 100644 (file)
@@ -51,6 +51,8 @@ def build_shell_command(param_config, remote=True, intermediate_variables=None):
 
 
 def read_stdout_item(stdout, key):
+    if key == "all":
+        return stdout
     for item in stdout.splitlines():
         if key in item:
             attributes = item.split("|")
index 68741a9..d49638f 100644 (file)
@@ -27,12 +27,11 @@ run_capacity()
     # Number of logical cores
     THREAD=$(grep 'processor' /proc/cpuinfo | sort -u | wc -l)
     # Total memory size
-    MEMORY=$(grep 'MemTotal' /proc/meminfo | sort -u)
-    ME=$(echo $MEMORY | awk '/ /{printf "%s %s", $2, $3}')
+    MEMORY=$(grep 'MemTotal' /proc/meminfo | sort -u | awk '{print $2}')
+
     # Cache size per CPU
-    CACHE=$(grep 'cache size' /proc/cpuinfo | sort -u)
-    CA=$(echo $CACHE | awk '/ /{printf "%s", $4}')
-    CACHES=$[$CA * $CPU]
+    CACHE=$(grep 'cache size' /proc/cpuinfo | sort -u | awk '{print $4}')
+    CACHES=$[$CACHE * $CPU]
     HT_Value=$[$HT_Para * $CORES]
     if [ $HT_Value -eq $THREAD ]; then
         HT_OPEN=1
@@ -48,8 +47,8 @@ output_json()
         \"Cpu_number\":\"$CPU\", \
         \"Core_number\":\"$CORES\", \
         \"Thread_number\":\"$THREAD\", \
-        \"Memory_size\": \"$ME\", \
-        \"Cache_size\": \"$CACHES KB\", \
+        \"Memory_size\": \"$MEMORY\", \
+        \"Cache_size\": \"$CACHES\", \
         \"HT_Open\": \"$HT_OPEN\" \
     }"
 }
index cee87a5..6c0446b 100644 (file)
@@ -41,10 +41,18 @@ class QemuMigrate(base.Scenario):
 
     def _put_files(self, client):
         setup_options = self.scenario_cfg["setup_options"]
+        rpm_dir = setup_options["rpm_dir"]
         script_dir = setup_options["script_dir"]
+        image_dir = setup_options["image_dir"]
+        LOG.debug("Send RPMs from %s to workspace %s",
+                  rpm_dir, self.WORKSPACE)
+        client.put(rpm_dir, self.WORKSPACE, recursive=True)
         LOG.debug("Send scripts from %s to workspace %s",
                   script_dir, self.WORKSPACE)
         client.put(script_dir, self.WORKSPACE, recursive=True)
+        LOG.debug("Send guest image from %s to workspace %s",
+                  image_dir, self.WORKSPACE)
+        client.put(image_dir, self.WORKSPACE, recursive=True)
 
     def _run_setup_cmd(self, client, cmd):
         LOG.debug("Run cmd: %s", cmd)
@@ -143,10 +151,17 @@ def _test():    # pragma: no cover
         "qmp_sock_dst": "/tmp/qmp-sock-dst",
         "max_down_time": 0.10
     }
+    sla = {
+        "max_totaltime": 10,
+        "max_downtime": 0.10,
+        "max_setuptime": 0.50,
+    }
     args = {
-        "options": options
+        "options": options,
+        "sla": sla
     }
     result = {}
+
     migrate = QemuMigrate(args, ctx)
     migrate.run(result)
     print(result)
diff --git a/yardstick/benchmark/scenarios/lib/attach_volume.py b/yardstick/benchmark/scenarios/lib/attach_volume.py
new file mode 100644 (file)
index 0000000..8812496
--- /dev/null
@@ -0,0 +1,53 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class AttachVolume(base.Scenario):
+    """Attach a volmeu to an instance"""
+
+    __scenario_type__ = "AttachVolume"
+
+    def __init__(self, scenario_cfg, context_cfg):
+        self.scenario_cfg = scenario_cfg
+        self.context_cfg = context_cfg
+        self.options = self.scenario_cfg['options']
+
+        self.server_id = self.options.get("server_id", "TestServer")
+        self.volume_id = self.options.get("volume_id", None)
+
+        self.setup_done = False
+
+    def setup(self):
+        """scenario setup"""
+
+        self.setup_done = True
+
+    def run(self, result):
+        """execute the test"""
+
+        if not self.setup_done:
+            self.setup()
+
+        status = op_utils.attach_server_volume(self.server_id,
+                                               self.volume_id)
+
+        if status:
+            LOG.info("Attach volume to server successful!")
+        else:
+            LOG.info("Attach volume to server failed!")
diff --git a/yardstick/benchmark/scenarios/lib/create_floating_ip.py b/yardstick/benchmark/scenarios/lib/create_floating_ip.py
new file mode 100644 (file)
index 0000000..328566d
--- /dev/null
@@ -0,0 +1,60 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+import os
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class CreateFloatingIp(base.Scenario):
+    """Create an OpenStack floating ip"""
+
+    __scenario_type__ = "CreateFloatingIp"
+
+    def __init__(self, scenario_cfg, context_cfg):
+        self.scenario_cfg = scenario_cfg
+        self.context_cfg = context_cfg
+        self.ext_net_id = os.getenv("EXTERNAL_NETWORK", "external")
+
+        self.neutron_client = op_utils.get_neutron_client()
+        self.setup_done = False
+
+    def setup(self):
+        """scenario setup"""
+
+        self.setup_done = True
+
+    def run(self, result):
+        """execute the test"""
+
+        if not self.setup_done:
+            self.setup()
+
+        net_id = op_utils.get_network_id(self.neutron_client, self.ext_net_id)
+        floating_info = op_utils.create_floating_ip(self.neutron_client,
+                                                    extnet_id=net_id)
+        if floating_info:
+            LOG.info("Creating floating ip successful!")
+        else:
+            LOG.error("Creating floating ip failed!")
+
+        try:
+            keys = self.scenario_cfg.get('output', '').split()
+        except KeyError:
+            pass
+        else:
+            values = [floating_info["fip_id"], floating_info["fip_addr"]]
+            return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/create_keypair.py b/yardstick/benchmark/scenarios/lib/create_keypair.py
new file mode 100644 (file)
index 0000000..2185bfa
--- /dev/null
@@ -0,0 +1,71 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+import paramiko
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class CreateKeypair(base.Scenario):
+    """Create an OpenStack keypair"""
+
+    __scenario_type__ = "CreateKeypair"
+
+    def __init__(self, scenario_cfg, context_cfg):
+        self.scenario_cfg = scenario_cfg
+        self.context_cfg = context_cfg
+        self.options = self.scenario_cfg['options']
+
+        self.key_name = self.options.get("key_name", "yardstick_key")
+        self.key_filename = self.options.get("key_path", "/tmp/yardstick_key")
+
+        self.setup_done = False
+
+    def setup(self):
+        """scenario setup"""
+
+        self.setup_done = True
+
+    def run(self, result):
+        """execute the test"""
+
+        if not self.setup_done:
+            self.setup()
+
+        rsa_key = paramiko.RSAKey.generate(bits=2048, progress_func=None)
+        rsa_key.write_private_key_file(self.key_filename)
+        print("Writing %s ..." % self.key_filename)
+        with open(self.key_filename + ".pub", "w") as pubkey_file:
+            pubkey_file.write(
+                "%s %s\n" % (rsa_key.get_name(), rsa_key.get_base64()))
+        del rsa_key
+
+        keypair = op_utils.create_keypair(self.key_name,
+                                          self.key_filename + ".pub")
+
+        if keypair:
+            result.update({"keypair_create": 1})
+            LOG.info("Create keypair successful!")
+        else:
+            result.update({"keypair_create": 0})
+            LOG.info("Create keypair failed!")
+        try:
+            keys = self.scenario_cfg.get('output', '').split()
+        except KeyError:
+            pass
+        else:
+            values = [keypair.id]
+            return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/create_network.py b/yardstick/benchmark/scenarios/lib/create_network.py
new file mode 100644 (file)
index 0000000..cffff13
--- /dev/null
@@ -0,0 +1,64 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class CreateNetwork(base.Scenario):
+    """Create an OpenStack network"""
+
+    __scenario_type__ = "CreateNetwork"
+
+    def __init__(self, scenario_cfg, context_cfg):
+        self.scenario_cfg = scenario_cfg
+        self.context_cfg = context_cfg
+        self.options = self.scenario_cfg['options']
+
+        self.openstack = self.options.get("openstack_paras", None)
+
+        self.neutron_client = op_utils.get_neutron_client()
+
+        self.setup_done = False
+
+    def setup(self):
+        """scenario setup"""
+
+        self.setup_done = True
+
+    def run(self, result):
+        """execute the test"""
+
+        if not self.setup_done:
+            self.setup()
+
+        openstack_paras = {'network': self.openstack}
+        network_id = op_utils.create_neutron_net(self.neutron_client,
+                                                 openstack_paras)
+        if network_id:
+            result.update({"network_create": 1})
+            LOG.info("Create network successful!")
+        else:
+            result.update({"network_create": 0})
+            LOG.error("Create network failed!")
+
+        try:
+            keys = self.scenario_cfg.get('output', '').split()
+        except KeyError:
+            pass
+        else:
+            values = [network_id]
+            return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/create_port.py b/yardstick/benchmark/scenarios/lib/create_port.py
new file mode 100644 (file)
index 0000000..6a3a23a
--- /dev/null
@@ -0,0 +1,66 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class CreatePort(base.Scenario):
+    """Create an OpenStack flavor"""
+
+    __scenario_type__ = "CreatePort"
+
+    def __init__(self, scenario_cfg, context_cfg):
+        self.scenario_cfg = scenario_cfg
+        self.context_cfg = context_cfg
+        self.options = self.scenario_cfg['options']
+
+        self.openstack = self.options.get("openstack_paras", None)
+
+        self.neutron_client = op_utils.get_neutron_client()
+
+        self.setup_done = False
+
+    def setup(self):
+        """scenario setup"""
+
+        self.setup_done = True
+
+    def run(self, result):
+        """execute the test"""
+
+        if not self.setup_done:
+            self.setup()
+
+        openstack_paras = {'port': self.openstack}
+        port = self.neutron_client.create_port(openstack_paras)
+
+        if port:
+            result.update({"Port_Create": 1})
+            LOG.info("Create Port successful!")
+        else:
+            result.update({"Port_Create": 0})
+            LOG.error("Create Port failed!")
+
+        check_result = port['port']['id']
+
+        try:
+            keys = self.scenario_cfg.get('output', '').split()
+        except KeyError:
+            pass
+        else:
+            values = [check_result]
+            return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/create_router.py b/yardstick/benchmark/scenarios/lib/create_router.py
new file mode 100644 (file)
index 0000000..9aa57eb
--- /dev/null
@@ -0,0 +1,66 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class CreateRouter(base.Scenario):
+    """Create an OpenStack router"""
+
+    __scenario_type__ = "CreateRouter"
+
+    def __init__(self, scenario_cfg, context_cfg):
+        self.scenario_cfg = scenario_cfg
+        self.context_cfg = context_cfg
+        self.options = self.scenario_cfg['options']
+
+        self.openstack = self.options.get("openstack_paras", None)
+
+        self.neutron_client = op_utils.get_neutron_client()
+
+        self.setup_done = False
+
+    def setup(self):
+        """scenario setup"""
+
+        self.setup_done = True
+
+    def run(self, result):
+        """execute the test"""
+
+        if not self.setup_done:
+            self.setup()
+
+        openstack_paras = {'router': self.openstack}
+        router_id = op_utils.create_neutron_router(self.neutron_client,
+                                                   openstack_paras)
+        if router_id:
+            result.update({"network_create": 1})
+            LOG.info("Create router successful!")
+        else:
+            result.update({"network_create": 0})
+            LOG.error("Create router failed!")
+
+        check_result = router_id
+
+        try:
+            keys = self.scenario_cfg.get('output', '').split()
+        except KeyError:
+            pass
+        else:
+            values = [check_result]
+            return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/create_sec_group.py b/yardstick/benchmark/scenarios/lib/create_sec_group.py
new file mode 100644 (file)
index 0000000..3d1aec9
--- /dev/null
@@ -0,0 +1,65 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class CreateSecgroup(base.Scenario):
+    """Create an OpenStack security group"""
+
+    __scenario_type__ = "CreateSecgroup"
+
+    def __init__(self, scenario_cfg, context_cfg):
+        self.scenario_cfg = scenario_cfg
+        self.context_cfg = context_cfg
+        self.options = self.scenario_cfg['options']
+
+        self.sg_name = self.options.get("sg_name", "yardstick_sec_group")
+        self.description = self.options.get("description", None)
+        self.neutron_client = op_utils.get_neutron_client()
+
+        self.setup_done = False
+
+    def setup(self):
+        """scenario setup"""
+
+        self.setup_done = True
+
+    def run(self, result):
+        """execute the test"""
+
+        if not self.setup_done:
+            self.setup()
+
+        sg_id = op_utils.create_security_group_full(self.neutron_client,
+                                                    sg_name=self.sg_name,
+                                                    sg_description=self.description)
+
+        if sg_id:
+            result.update({"sg_create": 1})
+            LOG.info("Create security group successful!")
+        else:
+            result.update({"sg_create": 0})
+            LOG.error("Create security group failed!")
+
+        try:
+            keys = self.scenario_cfg.get('output', '').split()
+        except KeyError:
+            pass
+        else:
+            values = [sg_id]
+            return self._push_to_outputs(keys, values)
index 45c0bfd..273b004 100644 (file)
@@ -59,8 +59,10 @@ class CreateServer(base.Scenario):
         vm = op_utils.create_instance_and_wait_for_active(self.openstack)
 
         if vm:
+            result.update({"instance_create": 1})
             LOG.info("Create server successful!")
         else:
+            result.update({"instance_create": 0})
             LOG.error("Create server failed!")
 
         try:
diff --git a/yardstick/benchmark/scenarios/lib/create_subnet.py b/yardstick/benchmark/scenarios/lib/create_subnet.py
new file mode 100644 (file)
index 0000000..c34af8a
--- /dev/null
@@ -0,0 +1,66 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class CreateSubnet(base.Scenario):
+    """Create an OpenStack flavor"""
+
+    __scenario_type__ = "CreateSubnet"
+
+    def __init__(self, scenario_cfg, context_cfg):
+        self.scenario_cfg = scenario_cfg
+        self.context_cfg = context_cfg
+        self.options = self.scenario_cfg['options']
+
+        self.openstack = self.options.get("openstack_paras", None)
+
+        self.neutron_client = op_utils.get_neutron_client()
+
+        self.setup_done = False
+
+    def setup(self):
+        """scenario setup"""
+
+        self.setup_done = True
+
+    def run(self, result):
+        """execute the test"""
+
+        if not self.setup_done:
+            self.setup()
+
+        openstack_paras = {'subnets': [self.openstack]}
+        subnet_id = op_utils.create_neutron_subnet(self.neutron_client,
+                                                   openstack_paras)
+        if subnet_id:
+            result.update({"subnet_create": 1})
+            LOG.info("Create subnet successful!")
+        else:
+            result.update({"subnet_create": 0})
+            LOG.error("Create subnet failed!")
+
+        check_result = subnet_id
+
+        try:
+            keys = self.scenario_cfg.get('output', '').split()
+        except KeyError:
+            pass
+        else:
+            values = [check_result]
+            return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/delete_floating_ip.py b/yardstick/benchmark/scenarios/lib/delete_floating_ip.py
new file mode 100644 (file)
index 0000000..4314952
--- /dev/null
@@ -0,0 +1,54 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class DeleteFloatingIp(base.Scenario):
+    """Delete an OpenStack floating ip """
+
+    __scenario_type__ = "DeleteFloatingIp"
+
+    def __init__(self, scenario_cfg, context_cfg):
+        self.scenario_cfg = scenario_cfg
+        self.context_cfg = context_cfg
+        self.options = self.scenario_cfg['options']
+
+        self.floating_ip_id = self.options.get("floating_ip_id", None)
+
+        self.nova_client = op_utils.get_nova_client()
+        self.setup_done = False
+
+    def setup(self):
+        """scenario setup"""
+
+        self.setup_done = True
+
+    def run(self, result):
+        """execute the test"""
+
+        if not self.setup_done:
+            self.setup()
+
+        status = op_utils.delete_floating_ip(nova_client=self.nova_client,
+                                             floatingip_id=self.floating_ip_id)
+        if status:
+            result.update({"delete_floating_ip": 1})
+            LOG.info("Delete floating ip successful!")
+        else:
+            result.update({"delete_floating_ip": 0})
+            LOG.error("Delete floating ip failed!")
diff --git a/yardstick/benchmark/scenarios/lib/delete_keypair.py b/yardstick/benchmark/scenarios/lib/delete_keypair.py
new file mode 100644 (file)
index 0000000..1351399
--- /dev/null
@@ -0,0 +1,56 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class DeleteKeypair(base.Scenario):
+    """Delete an OpenStack keypair"""
+
+    __scenario_type__ = "DeleteKeypair"
+
+    def __init__(self, scenario_cfg, context_cfg):
+        self.scenario_cfg = scenario_cfg
+        self.context_cfg = context_cfg
+        self.options = self.scenario_cfg['options']
+
+        self.key_name = self.options.get("key_name", "yardstick_key")
+
+        self.nova_client = op_utils.get_nova_client()
+
+        self.setup_done = False
+
+    def setup(self):
+        """scenario setup"""
+
+        self.setup_done = True
+
+    def run(self, result):
+        """execute the test"""
+
+        if not self.setup_done:
+            self.setup()
+
+        status = op_utils.delete_keypair(self.nova_client,
+                                         self.key_name)
+
+        if status:
+            result.update({"delete_keypair": 1})
+            LOG.info("Delete keypair successful!")
+        else:
+            result.update({"delete_keypair": 0})
+            LOG.info("Delete keypair failed!")
diff --git a/yardstick/benchmark/scenarios/lib/delete_volume.py b/yardstick/benchmark/scenarios/lib/delete_volume.py
new file mode 100644 (file)
index 0000000..ea2b858
--- /dev/null
@@ -0,0 +1,55 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class DeleteVolume(base.Scenario):
+    """Delete an OpenStack volume"""
+
+    __scenario_type__ = "DeleteVolume"
+
+    def __init__(self, scenario_cfg, context_cfg):
+        self.scenario_cfg = scenario_cfg
+        self.context_cfg = context_cfg
+        self.options = self.scenario_cfg['options']
+
+        self.volume_id = self.options.get("volume_id", None)
+
+        self.cinder_client = op_utils.get_cinder_client()
+
+        self.setup_done = False
+
+    def setup(self):
+        """scenario setup"""
+
+        self.setup_done = True
+
+    def run(self, result):
+        """execute the test"""
+
+        if not self.setup_done:
+            self.setup()
+
+        status = op_utils.delete_volume(self.cinder_client, self.volume_id)
+
+        if status:
+            result.update({"delete_volume": 1})
+            LOG.info("Delete volume successful!")
+        else:
+            result.update({"delete_volume": 0})
+            LOG.info("Delete volume failed!")
diff --git a/yardstick/benchmark/scenarios/lib/detach_volume.py b/yardstick/benchmark/scenarios/lib/detach_volume.py
new file mode 100644 (file)
index 0000000..0b02a3a
--- /dev/null
@@ -0,0 +1,54 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from yardstick.benchmark.scenarios import base
+import yardstick.common.openstack_utils as op_utils
+
+LOG = logging.getLogger(__name__)
+
+
+class DetachVolume(base.Scenario):
+    """Detach a volume from an instance"""
+
+    __scenario_type__ = "DetachVolume"
+
+    def __init__(self, scenario_cfg, context_cfg):
+        self.scenario_cfg = scenario_cfg
+        self.context_cfg = context_cfg
+        self.options = self.scenario_cfg['options']
+
+        self.server_id = self.options.get("server_id", "TestServer")
+        self.volume_id = self.options.get("volume_id", None)
+
+        self.setup_done = False
+
+    def setup(self):
+        """scenario setup"""
+
+        self.setup_done = True
+
+    def run(self, result):
+        """execute the test"""
+
+        if not self.setup_done:
+            self.setup()
+
+        status = op_utils.detach_volume(self.server_id, self.volume_id)
+
+        if status:
+            result.update({"detach_volume": 1})
+            LOG.info("Detach volume from server successful!")
+        else:
+            result.update({"detach_volume": 0})
+            LOG.info("Detach volume from server failed!")
index 1e0a5fc..a9e7aa6 100644 (file)
@@ -11,6 +11,7 @@ from __future__ import print_function
 
 import os
 import logging
+import math
 
 import pkg_resources
 from oslo_serialization import jsonutils
@@ -357,15 +358,15 @@ class Pktgen(base.Scenario):
 
         result.update(jsonutils.loads(stdout))
 
-        result['packets_received'] = self._iptables_get_result()
+        received = result['packets_received'] = self._iptables_get_result()
+        sent = result['packets_sent']
         result['packetsize'] = packetsize
+        # compatible with python3 /
+        ppm = math.ceil(1000000.0 * (sent - received) / sent)
+
+        result['ppm'] = ppm
 
         if "sla" in self.scenario_cfg:
-            sent = result['packets_sent']
-            received = result['packets_received']
-            ppm = 1000000 * (sent - received) / sent
-            # if ppm is 1, then 11 out of 10 million is no pass
-            ppm += (sent - received) % sent > 0
             LOG.debug("Lost packets %d - Lost ppm %d", (sent - received), ppm)
             sla_max_ppm = int(self.scenario_cfg["sla"]["max_ppm"])
             assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; " \
index 599835d..4510bcf 100644 (file)
 """ NSPerf specific scenario definition """
 
 from __future__ import absolute_import
-import logging
 
+import logging
 import errno
-import os
 
+import ipaddress
+import os
+import sys
 import re
 from itertools import chain
+
+import six
 from operator import itemgetter
 from collections import defaultdict
 
@@ -31,8 +35,10 @@ from yardstick.network_services.collector.subscriber import Collector
 from yardstick.network_services.vnf_generic import vnfdgen
 from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
 from yardstick.network_services.traffic_profile.base import TrafficProfile
+from yardstick.network_services.utils import get_nsb_option
 from yardstick import ssh
 
+
 LOG = logging.getLogger(__name__)
 
 
@@ -126,19 +132,50 @@ class NetworkServiceTestCase(base.Scenario):
         self.collector = None
         self.traffic_profile = None
 
+    def _get_ip_flow_range(self, ip_start_range):
+
+        node_name, range_or_interface = next(iter(ip_start_range.items()), (None, '0.0.0.0'))
+        if node_name is not None:
+            node = self.context_cfg["nodes"].get(node_name, {})
+            try:
+                # the ip_range is the interface name
+                interface = node.get("interfaces", {})[range_or_interface]
+            except KeyError:
+                ip = "0.0.0.0"
+                mask = "255.255.255.0"
+            else:
+                ip = interface["local_ip"]
+                # we can't default these values, they must both exist to be valid
+                mask = interface["netmask"]
+
+            ipaddr = ipaddress.ip_network(six.text_type('{}/{}'.format(ip, mask)), strict=False)
+            hosts = list(ipaddr.hosts())
+            ip_addr_range = "{}-{}".format(hosts[0], hosts[-1])
+        else:
+            # we are manually specifying the range
+            ip_addr_range = range_or_interface
+        return ip_addr_range
+
     def _get_traffic_flow(self):
+        flow = {}
         try:
-            with open(self.scenario_cfg["traffic_options"]["flow"]) as fflow:
-                flow = yaml_load(fflow)
-        except (KeyError, IOError, OSError):
+            fflow = self.scenario_cfg["options"]["flow"]
+            for index, src in enumerate(fflow.get("src_ip", [])):
+                flow["src_ip{}".format(index)] = self._get_ip_flow_range(src)
+
+            for index, dst in enumerate(fflow.get("dst_ip", [])):
+                flow["dst_ip{}".format(index)] = self._get_ip_flow_range(dst)
+
+            for index, publicip in enumerate(fflow.get("publicip", [])):
+                flow["public_ip{}".format(index)] = publicip
+        except KeyError:
             flow = {}
-        return flow
+        return {"flow": flow}
 
     def _get_traffic_imix(self):
         try:
-            with open(self.scenario_cfg["traffic_options"]["imix"]) as fimix:
-                imix = yaml_load(fimix)
-        except (KeyError, IOError, OSError):
+            imix = {"imix": self.scenario_cfg['options']['framesize']}
+        except KeyError:
             imix = {}
         return imix
 
@@ -265,8 +302,25 @@ class NetworkServiceTestCase(base.Scenario):
         for dpdk_port_num, netdev in enumerate(s):
             netdev['dpdk_port_num'] = dpdk_port_num
 
+    def _probe_netdevs(self, node, node_dict):
+        cmd = "PATH=$PATH:/sbin:/usr/sbin ip addr show"
+        netdevs = {}
+        with SshManager(node_dict) as conn:
+            if conn:
+                exit_status = conn.execute(cmd)[0]
+                if exit_status != 0:
+                    raise IncorrectSetup("Node's %s lacks ip tool." % node)
+                exit_status, stdout, _ = conn.execute(
+                    self.FIND_NETDEVICE_STRING)
+                if exit_status != 0:
+                    raise IncorrectSetup(
+                        "Cannot find netdev info in sysfs" % node)
+                netdevs = node_dict['netdevs'] = self.parse_netdev_info(stdout)
+        return netdevs
+
     @classmethod
-    def _probe_missing_values(cls, netdevs, network, missing):
+    def _probe_missing_values(cls, netdevs, network):
+
         mac_lower = network['local_mac'].lower()
         for netdev in netdevs.values():
             if netdev['address'].lower() != mac_lower:
@@ -288,36 +342,30 @@ class NetworkServiceTestCase(base.Scenario):
         """
         for node, node_dict in self.context_cfg["nodes"].items():
 
-            cmd = "PATH=$PATH:/sbin:/usr/sbin ip addr show"
-            with SshManager(node_dict) as conn:
-                exit_status = conn.execute(cmd)[0]
-                if exit_status != 0:
-                    raise IncorrectSetup("Node's %s lacks ip tool." % node)
-                exit_status, stdout, _ = conn.execute(
-                    self.FIND_NETDEVICE_STRING)
-                if exit_status != 0:
-                    raise IncorrectSetup(
-                        "Cannot find netdev info in sysfs" % node)
-                netdevs = node_dict['netdevs'] = self.parse_netdev_info(
-                    stdout)
-
-                for network in node_dict["interfaces"].values():
-                    missing = self.TOPOLOGY_REQUIRED_KEYS.difference(network)
-                    if not missing:
-                        continue
-
-                    try:
-                        self._probe_missing_values(netdevs, network,
-                                                   missing)
-                    except KeyError:
-                        pass
-                    else:
-                        missing = self.TOPOLOGY_REQUIRED_KEYS.difference(
-                            network)
-                    if missing:
-                        raise IncorrectConfig(
-                            "Require interface fields '%s' not found, topology file "
-                            "corrupted" % ', '.join(missing))
+            for network in node_dict["interfaces"].values():
+                missing = self.TOPOLOGY_REQUIRED_KEYS.difference(network)
+                if not missing:
+                    continue
+
+                # only ssh probe if there are missing values
+                # ssh probe won't work on Ixia, so we had better define all our values
+                try:
+                    netdevs = self._probe_netdevs(node, node_dict)
+                except (SSHError, SSHTimeout):
+                    raise IncorrectConfig(
+                        "Unable to probe missing interface fields '%s', on node %s "
+                        "SSH Error" % (', '.join(missing), node))
+                try:
+                    self._probe_missing_values(netdevs, network)
+                except KeyError:
+                    pass
+                else:
+                    missing = self.TOPOLOGY_REQUIRED_KEYS.difference(
+                        network)
+                if missing:
+                    raise IncorrectConfig(
+                        "Require interface fields '%s' not found, topology file "
+                        "corrupted" % ', '.join(missing))
 
         # 3. Use topology file to find connections & resolve dest address
         self._resolve_topology()
@@ -393,6 +441,9 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
         :param context_cfg:
         :return:
         """
+        trex_lib_path = get_nsb_option('trex_client_lib')
+        sys.path[:] = list(chain([trex_lib_path], (x for x in sys.path if x != trex_lib_path)))
+
         if scenario_cfg is None:
             scenario_cfg = self.scenario_cfg
 
@@ -440,7 +491,6 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
             for vnf in chain(traffic_runners, non_traffic_runners):
                 LOG.info("Instantiating %s", vnf.name)
                 vnf.instantiate(self.scenario_cfg, self.context_cfg)
-            for vnf in chain(traffic_runners, non_traffic_runners):
                 LOG.info("Waiting for %s to instantiate", vnf.name)
                 vnf.wait_for_instantiate()
         except RuntimeError:
@@ -473,7 +523,7 @@ printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
         for vnf in self.vnfs:
             # Result example:
             # {"VNF1: { "tput" : [1000, 999] }, "VNF2": { "latency": 100 }}
-            LOG.debug("vnf")
+            LOG.debug("collect KPI for %s", vnf.name)
             result.update(self.collector.get_kpi(vnf))
 
     def teardown(self):
index b99e342..98fe269 100644 (file)
@@ -28,6 +28,14 @@ class Fio(base.Scenario):
         type:    string
         unit:    na
         default: /home/ubuntu/data.raw
+    job_file - fio job configuration file
+        type:    string
+        unit:    na
+        default: None
+    directory - mount directoey for test volume
+        type:    string
+        unit:    na
+        default: None
     bs - block size used for the io units
         type:    int
         unit:    bytes
@@ -71,20 +79,42 @@ class Fio(base.Scenario):
     def __init__(self, scenario_cfg, context_cfg):
         self.scenario_cfg = scenario_cfg
         self.context_cfg = context_cfg
+        self.options = self.scenario_cfg["options"]
         self.setup_done = False
 
     def setup(self):
         """scenario setup"""
-        self.target_script = pkg_resources.resource_filename(
-            "yardstick.benchmark.scenarios.storage",
-            Fio.TARGET_SCRIPT)
         host = self.context_cfg["host"]
 
         self.client = ssh.SSH.from_node(host, defaults={"user": "root"})
         self.client.wait(timeout=600)
 
-        # copy script to host
-        self.client._put_file_shell(self.target_script, '~/fio.sh')
+        self.job_file = self.options.get("job_file", None)
+
+        if self.job_file:
+            self.job_file_script = pkg_resources.resource_filename(
+                "yardstick.resources", 'files/' + self.job_file)
+
+            # copy script to host
+            self.client._put_file_shell(self.job_file_script, '~/job_file.ini')
+
+        else:
+            self.target_script = pkg_resources.resource_filename(
+                "yardstick.benchmark.scenarios.storage", Fio.TARGET_SCRIPT)
+
+            # copy script to host
+            self.client._put_file_shell(self.target_script, '~/fio.sh')
+
+        mount_dir = self.options.get("directory", None)
+
+        if mount_dir:
+            LOG.debug("Formating volume...")
+            self.client.execute("sudo mkfs.ext4 /dev/vdb")
+            cmd = "sudo mkdir %s" % mount_dir
+            self.client.execute(cmd)
+            LOG.debug("Mounting volume at: %s", mount_dir)
+            cmd = "sudo mount /dev/vdb %s" % mount_dir
+            self.client.execute(cmd)
 
         self.setup_done = True
 
@@ -92,57 +122,69 @@ class Fio(base.Scenario):
         """execute the benchmark"""
         default_args = "-ioengine=libaio -group_reporting -time_based -time_based " \
             "--output-format=json"
+        timeout = 3600
 
         if not self.setup_done:
             self.setup()
 
-        options = self.scenario_cfg["options"]
-        filename = options.get("filename", "/home/ubuntu/data.raw")
-        bs = options.get("bs", "4k")
-        iodepth = options.get("iodepth", "1")
-        rw = options.get("rw", "write")
-        ramp_time = options.get("ramp_time", 20)
-        size = options.get("size", "1g")
-        direct = options.get("direct", "1")
-        numjobs = options.get("numjobs", "1")
-        rwmixwrite = options.get("rwmixwrite", 50)
-        name = "yardstick-fio"
-        # if run by a duration runner
-        duration_time = self.scenario_cfg["runner"].get("duration", None) \
-            if "runner" in self.scenario_cfg else None
-        # if run by an arithmetic runner
-        arithmetic_time = options.get("duration", None)
-        if duration_time:
-            runtime = duration_time
-        elif arithmetic_time:
-            runtime = arithmetic_time
+        if self.job_file:
+            cmd = "sudo fio job_file.ini --output-format=json"
         else:
-            runtime = 30
+            filename = self.options.get("filename", "/home/ubuntu/data.raw")
+            bs = self.options.get("bs", "4k")
+            iodepth = self.options.get("iodepth", "1")
+            rw = self.options.get("rw", "write")
+            ramp_time = self.options.get("ramp_time", 20)
+            size = self.options.get("size", "1g")
+            direct = self.options.get("direct", "1")
+            numjobs = self.options.get("numjobs", "1")
+            rwmixwrite = self.options.get("rwmixwrite", 50)
+            name = "yardstick-fio"
+            # if run by a duration runner
+            duration_time = self.scenario_cfg["runner"].get("duration", None) \
+                if "runner" in self.scenario_cfg else None
+            # if run by an arithmetic runner
+            arithmetic_time = self.options.get("duration", None)
+            if duration_time:
+                runtime = duration_time
+            elif arithmetic_time:
+                runtime = arithmetic_time
+            else:
+                runtime = 30
+            # Set timeout, so that the cmd execution does not exit incorrectly
+            # when the test run time is last long
+            timeout = int(ramp_time) + int(runtime) + 600
+
+            cmd_args = "-filename=%s -direct=%s -bs=%s -iodepth=%s -rw=%s -rwmixwrite=%s " \
+                       "-size=%s -ramp_time=%s -numjobs=%s -runtime=%s -name=%s %s" \
+                       % (filename, direct, bs, iodepth, rw, rwmixwrite, size, ramp_time, numjobs,
+                          runtime, name, default_args)
+            cmd = "sudo bash fio.sh %s %s" % (filename, cmd_args)
 
-        cmd_args = "-filename=%s -direct=%s -bs=%s -iodepth=%s -rw=%s -rwmixwrite=%s " \
-                   "-size=%s -ramp_time=%s -numjobs=%s -runtime=%s -name=%s %s" \
-                   % (filename, direct, bs, iodepth, rw, rwmixwrite, size, ramp_time, numjobs,
-                      runtime, name, default_args)
-        cmd = "sudo bash fio.sh %s %s" % (filename, cmd_args)
         LOG.debug("Executing command: %s", cmd)
-        # Set timeout, so that the cmd execution does not exit incorrectly
-        # when the test run time is last long
-        timeout = int(ramp_time) + int(runtime) + 600
         status, stdout, stderr = self.client.execute(cmd, timeout=timeout)
         if status:
             raise RuntimeError(stderr)
 
         raw_data = jsonutils.loads(stdout)
 
-        # The bandwidth unit is KB/s, and latency unit is us
-        if rw in ["read", "randread", "rw", "randrw"]:
+        if self.job_file:
             result["read_bw"] = raw_data["jobs"][0]["read"]["bw"]
             result["read_iops"] = raw_data["jobs"][0]["read"]["iops"]
             result["read_lat"] = raw_data["jobs"][0]["read"]["lat"]["mean"]
-        if rw in ["write", "randwrite", "rw", "randrw"]:
             result["write_bw"] = raw_data["jobs"][0]["write"]["bw"]
             result["write_iops"] = raw_data["jobs"][0]["write"]["iops"]
             result["write_lat"] = raw_data["jobs"][0]["write"]["lat"]["mean"]
+        else:
+            # The bandwidth unit is KB/s, and latency unit is us
+            if rw in ["read", "randread", "rw", "randrw"]:
+                result["read_bw"] = raw_data["jobs"][0]["read"]["bw"]
+                result["read_iops"] = raw_data["jobs"][0]["read"]["iops"]
+                result["read_lat"] = raw_data["jobs"][0]["read"]["lat"]["mean"]
+            if rw in ["write", "randwrite", "rw", "randrw"]:
+                result["write_bw"] = raw_data["jobs"][0]["write"]["bw"]
+                result["write_iops"] = raw_data["jobs"][0]["write"]["iops"]
+                result["write_lat"] = raw_data["jobs"][0]["write"]["lat"]["mean"]
 
         if "sla" in self.scenario_cfg:
             sla_error = ""
index f963782..96db6e1 100644 (file)
@@ -17,7 +17,7 @@ OUTPUT_FILE=/tmp/storagecapacity-out.log
 # run disk_size test
 run_disk_size()
 {
-    fdisk -l | grep '^Disk.*bytes$' | awk -F [:,\ ] '{print $2,$7}' > $OUTPUT_FILE
+    fdisk -l | grep '^Disk.*bytes' | awk -F [:,\ ] '{print $2,$7}' > $OUTPUT_FILE
 }
 
 # write the disk size to stdout in json format
@@ -35,7 +35,7 @@ output_disk_size()
 run_block_size()
 {
     echo -n "" > $OUTPUT_FILE
-    blkdevices=`fdisk -l | grep '^Disk.*bytes$' | awk -F [:,\ ] '{print $2}'`
+    blkdevices=`fdisk -l | grep '^Disk.*bytes' | awk -F [:,\ ] '{print $2}'`
     blkdevices=($blkdevices)
     for bd in "${blkdevices[@]}";do
         blk_size=`blockdev --getbsz $bd`
index e5e8497..b416f42 100644 (file)
@@ -59,6 +59,7 @@ if not SERVER_IP:
 
 # dir
 CONF_DIR = get_param('dir.conf', '/etc/yardstick')
+IMAGE_DIR = get_param('dir.images', '/home/opnfv/images/')
 REPOS_DIR = get_param('dir.repos', '/home/opnfv/repos/yardstick')
 RELENG_DIR = get_param('dir.releng', '/home/opnfv/repos/releng')
 LOG_DIR = get_param('dir.log', '/tmp/yardstick/')
index d86aee1..c862a6b 100644 (file)
@@ -11,6 +11,7 @@ from __future__ import absolute_import
 
 import os
 import time
+import sys
 import logging
 
 from keystoneauth1 import loading
@@ -264,6 +265,15 @@ def create_aggregate_with_host(nova_client, aggregate_name, av_zone,
         return True
 
 
+def create_keypair(nova_client, name, key_path=None):    # pragma: no cover
+    try:
+        with open(key_path) as fpubkey:
+            keypair = get_nova_client().keypairs.create(name=name, public_key=fpubkey.read())
+            return keypair
+    except Exception:
+        log.exception("Error [create_keypair(nova_client)]")
+
+
 def create_instance(json_body):    # pragma: no cover
     try:
         return get_nova_client().servers.create(**json_body)
@@ -290,6 +300,17 @@ def create_instance_and_wait_for_active(json_body):    # pragma: no cover
     return None
 
 
+def attach_server_volume(server_id, volume_id, device=None):    # pragma: no cover
+    try:
+        get_nova_client().volumes.create_server_volume(server_id, volume_id, device)
+    except Exception:
+        log.exception("Error [attach_server_volume(nova_client, '%s', '%s')]",
+                      server_id, volume_id)
+        return False
+    else:
+        return True
+
+
 def delete_instance(nova_client, instance_id):      # pragma: no cover
     try:
         nova_client.servers.force_delete(instance_id)
@@ -403,6 +424,15 @@ def delete_flavor(flavor_id):    # pragma: no cover
         return True
 
 
+def delete_keypair(nova_client, key):     # pragma: no cover
+    try:
+        nova_client.keypairs.delete(key=key)
+        return True
+    except Exception:
+        log.exception("Error [delete_keypair(nova_client)]")
+        return False
+
+
 # *********************************************
 #   NEUTRON
 # *********************************************
@@ -417,6 +447,171 @@ def get_port_id_by_ip(neutron_client, ip_address):      # pragma: no cover
         'fixed_ips') if j['ip_address'] == ip_address), None)
 
 
+def create_neutron_net(neutron_client, json_body):      # pragma: no cover
+    try:
+        network = neutron_client.create_network(body=json_body)
+        return network['network']['id']
+    except Exception:
+        log.error("Error [create_neutron_net(neutron_client)]")
+        raise Exception("operation error")
+        return None
+
+
+def create_neutron_subnet(neutron_client, json_body):      # pragma: no cover
+    try:
+        subnet = neutron_client.create_subnet(body=json_body)
+        return subnet['subnets'][0]['id']
+    except Exception:
+        log.error("Error [create_neutron_subnet")
+        raise Exception("operation error")
+        return None
+
+
+def create_neutron_router(neutron_client, json_body):      # pragma: no cover
+    try:
+        router = neutron_client.create_router(json_body)
+        return router['router']['id']
+    except Exception:
+        log.error("Error [create_neutron_router(neutron_client)]")
+        raise Exception("operation error")
+        return None
+
+
+def create_floating_ip(neutron_client, extnet_id):      # pragma: no cover
+    props = {'floating_network_id': extnet_id}
+    try:
+        ip_json = neutron_client.create_floatingip({'floatingip': props})
+        fip_addr = ip_json['floatingip']['floating_ip_address']
+        fip_id = ip_json['floatingip']['id']
+    except Exception:
+        log.error("Error [create_floating_ip(neutron_client)]")
+        return None
+    return {'fip_addr': fip_addr, 'fip_id': fip_id}
+
+
+def delete_floating_ip(nova_client, floatingip_id):      # pragma: no cover
+    try:
+        nova_client.floating_ips.delete(floatingip_id)
+        return True
+    except Exception:
+        log.error("Error [delete_floating_ip(nova_client, '%s')]" % floatingip_id)
+        return False
+
+
+def get_security_groups(neutron_client):      # pragma: no cover
+    try:
+        security_groups = neutron_client.list_security_groups()[
+            'security_groups']
+        return security_groups
+    except Exception:
+        log.error("Error [get_security_groups(neutron_client)]")
+        return None
+
+
+def get_security_group_id(neutron_client, sg_name):      # pragma: no cover
+    security_groups = get_security_groups(neutron_client)
+    id = ''
+    for sg in security_groups:
+        if sg['name'] == sg_name:
+            id = sg['id']
+            break
+    return id
+
+
+def create_security_group(neutron_client, sg_name, sg_description):      # pragma: no cover
+    json_body = {'security_group': {'name': sg_name,
+                                    'description': sg_description}}
+    try:
+        secgroup = neutron_client.create_security_group(json_body)
+        return secgroup['security_group']
+    except Exception:
+        log.error("Error [create_security_group(neutron_client, '%s', "
+                  "'%s')]" % (sg_name, sg_description))
+        return None
+
+
+def create_secgroup_rule(neutron_client, sg_id, direction, protocol,
+                         port_range_min=None, port_range_max=None,
+                         **json_body):      # pragma: no cover
+    # We create a security group in 2 steps
+    # 1 - we check the format and set the json body accordingly
+    # 2 - we call neturon client to create the security group
+
+    # Format check
+    json_body.update({'security_group_rule': {'direction': direction,
+                     'security_group_id': sg_id, 'protocol': protocol}})
+    # parameters may be
+    # - both None => we do nothing
+    # - both Not None => we add them to the json description
+    # but one cannot be None is the other is not None
+    if (port_range_min is not None and port_range_max is not None):
+        # add port_range in json description
+        json_body['security_group_rule']['port_range_min'] = port_range_min
+        json_body['security_group_rule']['port_range_max'] = port_range_max
+        log.debug("Security_group format set (port range included)")
+    else:
+        # either both port range are set to None => do nothing
+        # or one is set but not the other => log it and return False
+        if port_range_min is None and port_range_max is None:
+            log.debug("Security_group format set (no port range mentioned)")
+        else:
+            log.error("Bad security group format."
+                      "One of the port range is not properly set:"
+                      "range min: {},"
+                      "range max: {}".format(port_range_min,
+                                             port_range_max))
+            return False
+
+    # Create security group using neutron client
+    try:
+        neutron_client.create_security_group_rule(json_body)
+        return True
+    except Exception:
+        log.exception("Impossible to create_security_group_rule,"
+                      "security group rule probably already exists")
+        return False
+
+
+def create_security_group_full(neutron_client,
+                               sg_name, sg_description):      # pragma: no cover
+    sg_id = get_security_group_id(neutron_client, sg_name)
+    if sg_id != '':
+        log.info("Using existing security group '%s'..." % sg_name)
+    else:
+        log.info("Creating security group  '%s'..." % sg_name)
+        SECGROUP = create_security_group(neutron_client,
+                                         sg_name,
+                                         sg_description)
+        if not SECGROUP:
+            log.error("Failed to create the security group...")
+            return None
+
+        sg_id = SECGROUP['id']
+
+        log.debug("Security group '%s' with ID=%s created successfully."
+                  % (SECGROUP['name'], sg_id))
+
+        log.debug("Adding ICMP rules in security group '%s'..."
+                  % sg_name)
+        if not create_secgroup_rule(neutron_client, sg_id,
+                                    'ingress', 'icmp'):
+            log.error("Failed to create the security group rule...")
+            return None
+
+        log.debug("Adding SSH rules in security group '%s'..."
+                  % sg_name)
+        if not create_secgroup_rule(
+                neutron_client, sg_id, 'ingress', 'tcp', '22', '22'):
+            log.error("Failed to create the security group rule...")
+            return None
+
+        if not create_secgroup_rule(
+                neutron_client, sg_id, 'egress', 'tcp', '22', '22'):
+            log.error("Failed to create the security group rule...")
+            return None
+    return sg_id
+
+
 # *********************************************
 #   GLANCE
 # *********************************************
@@ -491,3 +686,33 @@ def create_volume(cinder_client, volume_name, volume_size,
         log.exception("Error [create_volume(cinder_client, %s)]",
                       (volume_name, volume_size))
         return None
+
+
+def delete_volume(cinder_client, volume_id, forced=False):      # pragma: no cover
+    try:
+        if forced:
+            try:
+                cinder_client.volumes.detach(volume_id)
+            except:
+                log.error(sys.exc_info()[0])
+            cinder_client.volumes.force_delete(volume_id)
+        else:
+            while True:
+                volume = get_cinder_client().volumes.get(volume_id)
+                if volume.status.lower() == 'available':
+                    break
+            cinder_client.volumes.delete(volume_id)
+        return True
+    except Exception:
+        log.exception("Error [delete_volume(cinder_client, '%s')]" % volume_id)
+        return False
+
+
+def detach_volume(server_id, volume_id):      # pragma: no cover
+    try:
+        get_nova_client().volumes.delete_server_volume(server_id, volume_id)
+        return True
+    except Exception:
+        log.exception("Error [detach_server_volume(nova_client, '%s', '%s')]",
+                      server_id, volume_id)
+        return False
index c7ae9c1..1d7ea07 100644 (file)
@@ -70,26 +70,26 @@ def itersubclasses(cls, _seen=None):
                 yield sub
 
 
-def try_append_module(name, modules):
-    if name not in modules:
-        modules[name] = importutils.import_module(name)
-
-
 def import_modules_from_package(package):
     """Import modules from package and append into sys.modules
 
     :param: package - Full package name. For example: rally.deploy.engines
     """
-    path = [os.path.dirname(yardstick.__file__), ".."] + package.split(".")
-    path = os.path.join(*path)
+    yardstick_root = os.path.dirname(os.path.dirname(yardstick.__file__))
+    path = os.path.join(yardstick_root, *package.split("."))
     for root, dirs, files in os.walk(path):
-        for filename in files:
-            if filename.startswith("__") or not filename.endswith(".py"):
-                continue
-            new_package = ".".join(root.split(os.sep)).split("....")[1]
-            module_name = "%s.%s" % (new_package, filename[:-3])
+        matches = (filename for filename in files if filename.endswith(".py") and
+                   not filename.startswith("__"))
+        new_package = os.path.relpath(root, yardstick_root).replace(os.sep, ".")
+        module_names = set(
+            ("{}.{}".format(new_package, filename.rsplit(".py", 1)[0]) for filename in matches))
+        # find modules which haven't already been imported
+        missing_modules = module_names.difference(sys.modules)
+        logger.debug("importing %s", missing_modules)
+        # we have already checked for already imported modules, so we don't need to check again
+        for module_name in missing_modules:
             try:
-                try_append_module(module_name, sys.modules)
+                sys.modules[module_name] = importutils.import_module(module_name)
             except ImportError:
                 logger.exception("unable to import %s", module_name)
 
index a5ba6c3..8c21754 100644 (file)
@@ -13,6 +13,9 @@
 # limitations under the License.
 
 
+import io
+
+
 class CpuSysCores(object):
 
     def __init__(self, connection=""):
@@ -20,8 +23,9 @@ class CpuSysCores(object):
         self.connection = connection
 
     def _open_cpuinfo(self):
-        lines = []
-        lines = self.connection.execute("cat /proc/cpuinfo")[1].split(u'\n')
+        cpuinfo = io.BytesIO()
+        self.connection.get_file_obj("/proc/cpuinfo", cpuinfo)
+        lines = cpuinfo.getvalue().decode('utf-8').splitlines()
         return lines
 
     def _get_core_details(self, lines):
index dbaa47c..ae54510 100644 (file)
@@ -36,13 +36,13 @@ link {0} up
 
 ACTION_TEMPLATE = """\
 p action add {0} accept
-p action add {0} fwd
+p action add {0} fwd {0}
 p action add {0} count
 """
 
 FW_ACTION_TEMPLATE = """\
 p action add {0} accept
-p action add {0} fwd
+p action add {0} fwd {0}
 p action add {0} count
 p action add {0} conntrack
 """
@@ -87,9 +87,18 @@ class MultiPortConfig(object):
         return default
 
     @staticmethod
-    def make_ip_addr(ip, mask_len):
+    def make_ip_addr(ip, mask):
+        """
+        :param ip: ip adddress
+        :type ip: str
+        :param mask: /24 prefix of 255.255.255.0 netmask
+        :type mask: str
+        :return: interface
+        :rtype: IPv4Interface
+        """
+
         try:
-            return ipaddress.ip_interface(six.text_type('/'.join([ip, mask_len])))
+            return ipaddress.ip_interface(six.text_type('/'.join([ip, mask])))
         except (TypeError, ValueError):
             # None so we can skip later
             return None
@@ -213,7 +222,7 @@ class MultiPortConfig(object):
             return
 
         try:
-            self.start_core = 'h{}'.format(int(self.start_core))
+            self.start_core = '{}h'.format(int(self.start_core))
         except ValueError:
             self.start_core = int(self.start_core[:-1]) + 1
 
@@ -279,18 +288,19 @@ class MultiPortConfig(object):
             for port in port_pair:
                 port_num = int(port[-1])
                 interface = self.interfaces[port_num]
-                # port0_ip = ipaddress.ip_interface(six.text_type(
-                #     "%s/%s" % (interface["virtual-interface"]["local_ip"],
-                #                interface["virtual-interface"]["netmask"])))
+                # We must use the dst because we are on the VNF and we need to
+                # reach the TG.
                 dst_port0_ip = \
                     ipaddress.ip_interface(six.text_type(
                         "%s/%s" % (interface["virtual-interface"]["dst_ip"],
                                    interface["virtual-interface"]["netmask"])))
                 arp_vars = {
-                    "port0_dst_ip_hex": ip_to_hex(dst_port0_ip.ip.exploded),
+                    "port0_dst_ip_hex": ip_to_hex(dst_port0_ip.network.network_address.exploded),
                     "port0_netmask_hex": ip_to_hex(dst_port0_ip.network.netmask.exploded),
+                    # this is the port num that contains port0 subnet and next_hop_ip_hex
                     "port_num": port_num,
                     # next hop is dst in this case
+                    # must be within subnet
                     "next_hop_ip_hex": ip_to_hex(dst_port0_ip.ip.exploded),
                 }
                 arp_config.append(arp_route_tbl_tmpl.format(**arp_vars))
@@ -302,20 +312,25 @@ class MultiPortConfig(object):
         self.swq += self.lb_count
         swq_out_str = self.make_range_str('SWQ{}', self.swq, offset=self.lb_count)
         self.swq += self.lb_count
-        mac_iter = (self.interfaces[int(x[-1])]['virtual-interface']['local_mac']
-                    for port_pair in self.port_pair_list for x in port_pair)
+        # ports_mac_list is disabled for some reason
+        # mac_iter = (self.interfaces[int(x[-1])]['virtual-interface']['local_mac']
+        #             for port_pair in self.port_pair_list for x in port_pair)
         pktq_in_iter = ('RXQ{}'.format(float(x[0][-1])) for x in self.port_pair_list)
 
         arpicmp_data = {
             'core': self.gen_core(self.start_core),
             'pktq_in': swq_in_str,
             'pktq_out': swq_out_str,
-            'ports_mac_list': ' '.join(mac_iter),
+            # we need to disable ports_mac_list?
+            # it looks like ports_mac_list is no longer required
+            # 'ports_mac_list': ' '.join(mac_iter),
             'pktq_in_prv': ' '.join(pktq_in_iter),
             'prv_to_pub_map': self.set_priv_to_pub_mapping(),
             'arp_route_tbl': self.generate_arp_route_tbl(),
-            # can't use empty string, defaul to ()
-            'nd_route_tbl': "()",
+            # nd_route_tbl must be set or we get segault on random OpenStack IPv6 traffic
+            # 'nd_route_tbl': "(0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)"
+            # safe default?  route discard prefix to localhost
+            'nd_route_tbl': "(0100::,64,0,::1)"
         }
         self.pktq_out_os = swq_out_str.split(' ')
         # why?
@@ -520,12 +535,13 @@ class MultiPortConfig(object):
         arp_config = []
         for port_pair in self.port_pair_list:
             for port in port_pair:
-                gateway = self.get_ports_gateway(port)
-                # omit entries with no gateway
-                if not gateway:
-                    continue
+                # ignore gateway, always use TG IP
+                # gateway = self.get_ports_gateway(port)
                 dst_mac = self.interfaces[int(port[-1])]["virtual-interface"]["dst_mac"]
-                arp_config.append((port[-1], gateway, dst_mac, self.txrx_pipeline))
+                dst_ip = self.interfaces[int(port[-1])]["virtual-interface"]["dst_ip"]
+                # arp_config.append((port[-1], gateway, dst_mac, self.txrx_pipeline))
+                # so dst_mac is the TG dest mac, so we need TG dest IP.
+                arp_config.append((port[-1], dst_ip, dst_mac, self.txrx_pipeline))
 
         return '\n'.join(('p {3} arpadd {0} {1} {2}'.format(*values) for values in arp_config))
 
@@ -533,12 +549,12 @@ class MultiPortConfig(object):
         arp_config6 = []
         for port_pair in self.port_pair_list:
             for port in port_pair:
-                gateway6 = self.get_ports_gateway6(port)
-                # omit entries with no gateway
-                if not gateway6:
-                    continue
+                # ignore gateway, always use TG IP
+                # gateway6 = self.get_ports_gateway6(port)
                 dst_mac6 = self.interfaces[int(port[-1])]["virtual-interface"]["dst_mac"]
-                arp_config6.append((port[-1], gateway6, dst_mac6, self.txrx_pipeline))
+                dst_ip6 = self.interfaces[int(port[-1])]["virtual-interface"]["dst_ip"]
+                # arp_config6.append((port[-1], gateway6, dst_mac6, self.txrx_pipeline))
+                arp_config6.append((port[-1], dst_ip6, dst_mac6, self.txrx_pipeline))
 
         return '\n'.join(('p {3} arpadd {0} {1} {2}'.format(*values) for values in arp_config6))
 
@@ -556,13 +572,17 @@ class MultiPortConfig(object):
         return ''.join((template.format(port) for port in port_list))
 
     def get_ip_from_port(self, port):
-        return self.make_ip_addr(self.get_ports_gateway(port), self.get_netmask_gateway(port))
+        # we can't use gateway because in OpenStack gateways interfer with floating ip routing
+        # return self.make_ip_addr(self.get_ports_gateway(port), self.get_netmask_gateway(port))
+        ip = self.interfaces[port]["virtual-interface"]["local_ip"]
+        netmask = self.interfaces[port]["virtual-interface"]["netmask"]
+        return self.make_ip_addr(ip, netmask)
 
-    def get_ip_and_prefixlen_from_ip_of_port(self, port):
+    def get_network_and_prefixlen_from_ip_of_port(self, port):
         ip_addr = self.get_ip_from_port(port)
         # handle cases with no gateway
         if ip_addr:
-            return ip_addr.ip.exploded, ip_addr.network.prefixlen
+            return ip_addr.network.network_address.exploded, ip_addr.network.prefixlen
         else:
             return None, None
 
@@ -576,25 +596,25 @@ class MultiPortConfig(object):
             src_port = int(port_pair[0][-1])
             dst_port = int(port_pair[1][-1])
 
-            src_ip, src_prefix_len = self.get_ip_and_prefixlen_from_ip_of_port(port_pair[0])
-            dst_ip, dst_prefix_len = self.get_ip_and_prefixlen_from_ip_of_port(port_pair[1])
-            # ignore entires with empty values
-            if all((src_ip, src_prefix_len, dst_ip, dst_prefix_len)):
-                new_rules.append((cmd, self.txrx_pipeline, src_ip, src_prefix_len,
-                                  dst_ip, dst_prefix_len, dst_port))
-                new_rules.append((cmd, self.txrx_pipeline, dst_ip, dst_prefix_len,
-                                  src_ip, src_prefix_len, src_port))
-
-            src_ip = self.get_ports_gateway6(port_pair[0])
-            src_prefix_len = self.get_netmask_gateway6(port_pair[0])
-            dst_ip = self.get_ports_gateway6(port_pair[1])
-            dst_prefix_len = self.get_netmask_gateway6(port_pair[0])
+            src_net, src_prefix_len = self.get_network_and_prefixlen_from_ip_of_port(src_port)
+            dst_net, dst_prefix_len = self.get_network_and_prefixlen_from_ip_of_port(dst_port)
             # ignore entires with empty values
-            if all((src_ip, src_prefix_len, dst_ip, dst_prefix_len)):
-                new_ipv6_rules.append((cmd, self.txrx_pipeline, src_ip, src_prefix_len,
-                                       dst_ip, dst_prefix_len, dst_port))
-                new_ipv6_rules.append((cmd, self.txrx_pipeline, dst_ip, dst_prefix_len,
-                                       src_ip, src_prefix_len, src_port))
+            if all((src_net, src_prefix_len, dst_net, dst_prefix_len)):
+                new_rules.append((cmd, self.txrx_pipeline, src_net, src_prefix_len,
+                                  dst_net, dst_prefix_len, dst_port))
+                new_rules.append((cmd, self.txrx_pipeline, dst_net, dst_prefix_len,
+                                  src_net, src_prefix_len, src_port))
+
+            # src_net = self.get_ports_gateway6(port_pair[0])
+            # src_prefix_len = self.get_netmask_gateway6(port_pair[0])
+            # dst_net = self.get_ports_gateway6(port_pair[1])
+            # dst_prefix_len = self.get_netmask_gateway6(port_pair[0])
+            # # ignore entires with empty values
+            # if all((src_net, src_prefix_len, dst_net, dst_prefix_len)):
+            #     new_ipv6_rules.append((cmd, self.txrx_pipeline, src_net, src_prefix_len,
+            #                            dst_net, dst_prefix_len, dst_port))
+            #     new_ipv6_rules.append((cmd, self.txrx_pipeline, dst_net, dst_prefix_len,
+            #                            src_net, src_prefix_len, src_port))
 
         acl_apply = "\np %s applyruleset" % cmd
         new_rules_config = '\n'.join(pattern.format(*values) for values
@@ -607,7 +627,9 @@ class MultiPortConfig(object):
         script_data = {
             'link_config': self.generate_link_config(),
             'arp_config': self.generate_arp_config(),
-            'arp_config6': self.generate_arp_config6(),
+            # disable IPv6 for now
+            # 'arp_config6': self.generate_arp_config6(),
+            'arp_config6': "",
             'actions': '',
             'rules': '',
         }
index ce09b65..48bcd31 100644 (file)
@@ -27,7 +27,7 @@ from oslo_config import cfg
 
 from yardstick import ssh
 from yardstick.network_services.nfvi.collectd import AmqpConsumer
-from yardstick.network_services.utils import provision_tool
+from yardstick.network_services.utils import get_nsb_option
 
 LOG = logging.getLogger(__name__)
 
@@ -73,18 +73,18 @@ class ResourceProfile(object):
 
     @classmethod
     def parse_simple_resource(cls, key, value):
-        return {'/'.join(key): value.split(":")[1]}
+        reskey = "/".join(rkey for rkey in key if "nsb_stats" not in rkey)
+        return {reskey: value.split(":")[1]}
 
     @classmethod
-    def get_cpu_data(cls, key_split, value):
+    def get_cpu_data(cls, res_key0, res_key1, value):
         """ Get cpu topology of the host """
         pattern = r"-(\d+)"
-        if "cpufreq" in key_split[0]:
-            metric = key_split[0]
-            source = key_split[1]
+
+        if 'cpufreq' in res_key0:
+            metric, source = res_key0, res_key1
         else:
-            metric = key_split[1]
-            source = key_split[0]
+            metric, source = res_key1, res_key0
 
         match = re.search(pattern, source, re.MULTILINE)
         if not match:
@@ -128,7 +128,8 @@ class ResourceProfile(object):
             res_key1 = next(res_key_iter)
 
             if "cpu" in res_key0 or "intel_rdt" in res_key0:
-                cpu_key, name, metric, testcase = self.get_cpu_data(key_split, value)
+                cpu_key, name, metric, testcase = \
+                    self.get_cpu_data(res_key0, res_key1, value)
                 if cpu_key in core_list:
                     result["cpu"].setdefault(cpu_key, {}).update({name: metric})
 
@@ -136,16 +137,16 @@ class ResourceProfile(object):
                 result["memory"].update({res_key1: value.split(":")[0]})
 
             elif "hugepages" in res_key0:
-                result["hugepages"].update(self.parse_hugepages(key, value))
+                result["hugepages"].update(self.parse_hugepages(key_split, value))
 
             elif "dpdkstat" in res_key0:
-                result["dpdkstat"].update(self.parse_dpdkstat(key, value))
+                result["dpdkstat"].update(self.parse_dpdkstat(key_split, value))
 
             elif "virt" in res_key1:
-                result["virt"].update(self.parse_virt(key, value))
+                result["virt"].update(self.parse_virt(key_split, value))
 
             elif "ovs_stats" in res_key0:
-                result["ovs_stats"].update(self.parse_ovs_stats(key, value))
+                result["ovs_stats"].update(self.parse_ovs_stats(key_split, value))
 
         result["timestamp"] = testcase
 
@@ -153,13 +154,16 @@ class ResourceProfile(object):
 
     def amqp_process_for_nfvi_kpi(self):
         """ amqp collect and return nfvi kpis """
-        if self.amqp_client is None:
+        if self.amqp_client is None and self.enable:
             self.amqp_client = \
                 multiprocessing.Process(target=self.run_collectd_amqp)
             self.amqp_client.start()
 
     def amqp_collect_nfvi_kpi(self):
         """ amqp collect and return nfvi kpis """
+        if not self.enable:
+            return {}
+
         metric = {}
         while not self._queue.empty():
             metric.update(self._queue.get())
@@ -192,12 +196,21 @@ class ResourceProfile(object):
         self._provide_config_file(bin_path, 'collectd.conf', kwargs)
 
     def _start_collectd(self, connection, bin_path):
-        LOG.debug("Starting collectd to collect NFVi stats")
-        # temp disable
-        return
         connection.execute('sudo pkill -9 collectd')
-        collectd = os.path.join(bin_path, "collectd.sh")
-        provision_tool(connection, collectd)
+        bin_path = get_nsb_option("bin_path")
+        collectd_path = os.path.join(bin_path, "collectd", "collectd")
+        exit_status = connection.execute("which %s > /dev/null 2>&1" % collectd_path)[0]
+        if exit_status != 0:
+            LOG.warning("%s is not present disabling", collectd_path)
+            # disable auto-provisioning because it requires Internet access
+            # collectd_installer = os.path.join(bin_path, "collectd.sh")
+            # provision_tool(connection, collectd)
+            # http_proxy = os.environ.get('http_proxy', '')
+            # https_proxy = os.environ.get('https_proxy', '')
+            # connection.execute("sudo %s '%s' '%s'" % (
+            #     collectd_installer, http_proxy, https_proxy))
+            return
+        LOG.debug("Starting collectd to collect NFVi stats")
         self._prepare_collectd_conf(bin_path)
 
         # Reset amqp queue
@@ -209,15 +222,8 @@ class ResourceProfile(object):
         connection.execute("sudo rabbitmqctl start_app")
         connection.execute("sudo service rabbitmq-server restart")
 
-        # Run collectd
-
-        http_proxy = os.environ.get('http_proxy', '')
-        https_proxy = os.environ.get('https_proxy', '')
-        connection.execute("sudo %s '%s' '%s'" %
-                           (collectd, http_proxy, https_proxy))
         LOG.debug("Start collectd service.....")
-        connection.execute(
-            "sudo %s" % os.path.join(bin_path, "collectd", "collectd"))
+        connection.execute("sudo %s" % collectd_path)
         LOG.debug("Done")
 
     def initiate_systemagent(self, bin_path):
index ebc1e61..b7cd037 100644 (file)
 from __future__ import absolute_import
 
 from yardstick.network_services.traffic_profile.base import TrafficProfile
-from stl.trex_stl_lib.trex_stl_streams import STLTXCont
-from stl.trex_stl_lib.trex_stl_client import STLStream
-from stl.trex_stl_lib.trex_stl_packet_builder_scapy import STLPktBuilder
-from stl.trex_stl_lib import api as Pkt
+from trex_stl_lib.trex_stl_streams import STLTXCont
+from trex_stl_lib.trex_stl_client import STLStream
+from trex_stl_lib.trex_stl_packet_builder_scapy import STLPktBuilder
+from trex_stl_lib import api as Pkt
 
 
 class FixedProfile(TrafficProfile):
index 8a4f97f..3480565 100644 (file)
@@ -18,6 +18,7 @@ from __future__ import print_function
 import sys
 import os
 import logging
+import collections
 
 # ixload uses its own py2. So importing jsonutils fails. So adding below
 # workaround to support call from yardstick
@@ -26,8 +27,16 @@ try:
 except ImportError:
     import json as jsonutils
 
-from yardstick.common.utils import join_non_strings
-from yardstick.common.utils import ErrorClass
+
+class ErrorClass(object):
+
+    def __init__(self, *args, **kwargs):
+        if 'test' not in kwargs:
+            raise RuntimeError
+
+    def __getattr__(self, item):
+        raise AttributeError
+
 
 try:
     from IxLoad import IxLoad, StatCollectorUtils
@@ -80,11 +89,25 @@ Incoming stats: Time interval: %s
 """
 
 
+def validate_non_string_sequence(value, default=None, raise_exc=None):
+    if isinstance(value, collections.Sequence) and not isinstance(value, str):
+        return value
+    if raise_exc:
+        raise raise_exc
+    return default
+
+
+def join_non_strings(separator, *non_strings):
+    try:
+        non_strings = validate_non_string_sequence(non_strings[0], raise_exc=RuntimeError)
+    except (IndexError, RuntimeError):
+        pass
+    return str(separator).join(str(non_string) for non_string in non_strings)
+
+
 class IXLOADHttpTest(object):
 
     def __init__(self, test_input):
-        self.test_input = jsonutils.loads(test_input)
-        self.parse_run_test()
         self.ix_load = None
         self.stat_utils = None
         self.remote_server = None
@@ -94,6 +117,8 @@ class IXLOADHttpTest(object):
         self.chassis = None
         self.card = None
         self.ports_to_reassign = None
+        self.test_input = jsonutils.loads(test_input)
+        self.parse_run_test()
 
     @staticmethod
     def format_ports_for_reassignment(ports):
@@ -291,4 +316,5 @@ def main(args):
         ixload_obj.start_http_test()
 
 if __name__ == '__main__':
+    LOG.info("Start http_ixload test")
     main(sys.argv)
index b07bc9d..a3b8036 100644 (file)
@@ -17,9 +17,9 @@ from __future__ import absolute_import
 from __future__ import division
 import logging
 
-from stl.trex_stl_lib.trex_stl_client import STLStream
-from stl.trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
-from stl.trex_stl_lib.trex_stl_streams import STLTXCont
+from trex_stl_lib.trex_stl_client import STLStream
+from trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
+from trex_stl_lib.trex_stl_streams import STLTXCont
 
 from yardstick.network_services.traffic_profile.traffic_profile \
     import TrexProfile
index 3e1f8d8..7bbe892 100644 (file)
@@ -21,16 +21,17 @@ from random import SystemRandom
 import six
 
 from yardstick.network_services.traffic_profile.base import TrafficProfile
-from stl.trex_stl_lib.trex_stl_client import STLStream
-from stl.trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
-from stl.trex_stl_lib.trex_stl_streams import STLTXCont
-from stl.trex_stl_lib.trex_stl_streams import STLProfile
-from stl.trex_stl_lib.trex_stl_packet_builder_scapy import STLVmWrFlowVar
-from stl.trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFlowVar
-from stl.trex_stl_lib.trex_stl_packet_builder_scapy import STLPktBuilder
-from stl.trex_stl_lib.trex_stl_packet_builder_scapy import STLScVmRaw
-from stl.trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFixIpv4
-from stl.trex_stl_lib import api as Pkt
+from trex_stl_lib.trex_stl_client import STLStream
+from trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
+from trex_stl_lib.trex_stl_streams import STLTXCont
+from trex_stl_lib.trex_stl_streams import STLProfile
+from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmWrFlowVar
+from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFlowVarRepeatableRandom
+from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFlowVar
+from trex_stl_lib.trex_stl_packet_builder_scapy import STLPktBuilder
+from trex_stl_lib.trex_stl_packet_builder_scapy import STLScVmRaw
+from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFixIpv4
+from trex_stl_lib import api as Pkt
 
 
 class TrexProfile(TrafficProfile):
@@ -132,7 +133,7 @@ class TrexProfile(TrafficProfile):
                                                 pkt_offset='Ether.dst')
             self.vm_flow_vars.append(stl_vm_wr_flow_var)
 
-    def set_src_ip4(self, src_ip4):
+    def set_src_ip4(self, src_ip4, count=1):
         """ set source ipv4 address fields """
         src_ips = src_ip4.split('-')
         min_value = src_ips[0]
@@ -141,12 +142,12 @@ class TrexProfile(TrafficProfile):
             src_ip4 = min_value
             self._set_ip_fields(src=src_ip4)
         else:
-            stl_vm_flow_var = STLVmFlowVar(name="ip4_src",
-                                           min_value=min_value,
-                                           max_value=max_value,
-                                           size=4,
-                                           op='random',
-                                           step=1)
+            stl_vm_flow_var = STLVmFlowVarRepeatableRandom(name="ip4_src",
+                                                           min_value=min_value,
+                                                           max_value=max_value,
+                                                           size=4,
+                                                           limit=int(count),
+                                                           seed=0x1235)
             self.vm_flow_vars.append(stl_vm_flow_var)
             stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='ip4_src',
                                                 pkt_offset='IP.src')
@@ -154,7 +155,7 @@ class TrexProfile(TrafficProfile):
             stl_vm_fix_ipv4 = STLVmFixIpv4(offset="IP")
             self.vm_flow_vars.append(stl_vm_fix_ipv4)
 
-    def set_dst_ip4(self, dst_ip4):
+    def set_dst_ip4(self, dst_ip4, count=1):
         """ set destination ipv4 address fields """
         dst_ips = dst_ip4.split('-')
         min_value = dst_ips[0]
@@ -163,12 +164,12 @@ class TrexProfile(TrafficProfile):
             dst_ip4 = min_value
             self._set_ip_fields(dst=dst_ip4)
         else:
-            stl_vm_flow_var = STLVmFlowVar(name="dst_ip4",
-                                           min_value=min_value,
-                                           max_value=max_value,
-                                           size=4,
-                                           op='random',
-                                           step=1)
+            stl_vm_flow_var = STLVmFlowVarRepeatableRandom(name="dst_ip4",
+                                                           min_value=min_value,
+                                                           max_value=max_value,
+                                                           size=4,
+                                                           limit=int(count),
+                                                           seed=0x1235)
             self.vm_flow_vars.append(stl_vm_flow_var)
             stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='dst_ip4',
                                                 pkt_offset='IP.dst')
@@ -240,7 +241,7 @@ class TrexProfile(TrafficProfile):
                                                 pkt_offset='IP.tos')
             self.vm_flow_vars.append(stl_vm_wr_flow_var)
 
-    def set_src_port(self, src_port):
+    def set_src_port(self, src_port, count=1):
         """ set packet source port """
         src_ports = str(src_port).split('-')
         min_value = int(src_ports[0])
@@ -250,18 +251,18 @@ class TrexProfile(TrafficProfile):
             self._set_udp_fields(sport=src_port)
         else:
             max_value = int(src_ports[1])
-            stl_vm_flow_var = STLVmFlowVar(name="port_src",
-                                           min_value=min_value,
-                                           max_value=max_value,
-                                           size=2,
-                                           op='random',
-                                           step=1)
+            stl_vm_flow_var = STLVmFlowVarRepeatableRandom(name="port_src",
+                                                           min_value=min_value,
+                                                           max_value=max_value,
+                                                           size=2,
+                                                           limit=int(count),
+                                                           seed=0x1235)
             self.vm_flow_vars.append(stl_vm_flow_var)
             stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='port_src',
                                                 pkt_offset=self.udp_sport)
             self.vm_flow_vars.append(stl_vm_wr_flow_var)
 
-    def set_dst_port(self, dst_port):
+    def set_dst_port(self, dst_port, count=1):
         """ set packet destnation port """
         dst_ports = str(dst_port).split('-')
         min_value = int(dst_ports[0])
@@ -271,12 +272,13 @@ class TrexProfile(TrafficProfile):
             self._set_udp_fields(dport=dst_port)
         else:
             max_value = int(dst_ports[1])
-            stl_vm_flow_var = STLVmFlowVar(name="port_dst",
-                                           min_value=min_value,
-                                           max_value=max_value,
-                                           size=2,
-                                           op='random',
-                                           step=1)
+            stl_vm_flow_var = \
+                STLVmFlowVarRepeatableRandom(name="port_dst",
+                                             min_value=min_value,
+                                             max_value=max_value,
+                                             size=2,
+                                             limit=int(count),
+                                             seed=0x1235)
             self.vm_flow_vars.append(stl_vm_flow_var)
             stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='port_dst',
                                                 pkt_offset=self.udp_dport)
@@ -335,9 +337,9 @@ class TrexProfile(TrafficProfile):
         if 'dscp' in outer_l3v4:
             self.set_dscp(outer_l3v4['dscp'])
         if 'srcip4' in outer_l3v4:
-            self.set_src_ip4(outer_l3v4['srcip4'])
+            self.set_src_ip4(outer_l3v4['srcip4'], outer_l3v4['count'])
         if 'dstip4' in outer_l3v4:
-            self.set_dst_ip4(outer_l3v4['dstip4'])
+            self.set_dst_ip4(outer_l3v4['dstip4'], outer_l3v4['count'])
 
     def set_outer_l3v6_fields(self, outer_l3v6):
         """ setup outer l3v6 fields from traffic profile """
@@ -367,9 +369,9 @@ class TrexProfile(TrafficProfile):
     def set_outer_l4_fields(self, outer_l4):
         """ setup outer l4 fields from traffic profile """
         if 'srcport' in outer_l4:
-            self.set_src_port(outer_l4['srcport'])
+            self.set_src_port(outer_l4['srcport'], outer_l4['count'])
         if 'dstport' in outer_l4:
-            self.set_dst_port(outer_l4['dstport'])
+            self.set_dst_port(outer_l4['dstport'], outer_l4['count'])
 
     def generate_imix_data(self, packet_definition):
         """ generate packet size for a given traffic profile """
index 0264bbc..d52e27c 100644 (file)
@@ -30,7 +30,10 @@ OPTS = [
                help='bin_path for VNFs location.'),
     cfg.StrOpt('trex_path',
                default=os.path.join(NSB_ROOT, 'trex/scripts'),
-               help='trex automation lib pathh.'),
+               help='trex automation lib path.'),
+    cfg.StrOpt('trex_client_lib',
+               default=os.path.join(NSB_ROOT, 'trex_client/stl'),
+               help='trex python library path.'),
 ]
 CONF.register_opts(OPTS, group="nsb")
 
index 0434f6a..7a75683 100644 (file)
@@ -37,9 +37,9 @@ from yardstick.network_services.vnf_generic.vnf.base import QueueFileWrapper
 from yardstick.network_services.vnf_generic.vnf.base import GenericTrafficGen
 from yardstick.network_services.utils import get_nsb_option
 
-from stl.trex_stl_lib.trex_stl_client import STLClient
-from stl.trex_stl_lib.trex_stl_client import LoggerApi
-from stl.trex_stl_lib.trex_stl_exceptions import STLError
+from trex_stl_lib.trex_stl_client import STLClient
+from trex_stl_lib.trex_stl_client import LoggerApi
+from trex_stl_lib.trex_stl_exceptions import STLError
 
 from yardstick.ssh import AutoConnectSSH
 
index c504900..353d31f 100644 (file)
@@ -46,7 +46,7 @@ IXLOAD_CONFIG_TEMPLATE = '''\
     },
     "remote_server": "%s",
     "result_dir": "%s",
-    "ixload_cfg": '"C:/Results/%s"
+    "ixload_cfg": "C:/Results/%s"
 }'''
 
 IXLOAD_CMD = "{ixloadpy} {http_ixload} {args}"
@@ -130,7 +130,7 @@ class IxLoadTrafficGen(SampleVNFTrafficGen):
         for interface in self.vnfd_helper.interfaces:
             vpci_list = interface['virtual-interface']["vpci"].split(":")
             card = vpci_list[0]
-            ports.append(vpci_list[1])
+            ports.append(str(vpci_list[1]))
 
         for csv_file in glob.iglob(self.ssh_helper.join_bin_path('*.csv')):
             os.unlink(csv_file)
@@ -142,6 +142,7 @@ class IxLoadTrafficGen(SampleVNFTrafficGen):
             os.path.basename(self.resource_helper.resource_file_name))
 
         http_ixload_path = os.path.join(VNF_PATH, "../../traffic_profile")
+
         cmd = IXLOAD_CMD.format(
             ixloadpy=os.path.join(ixia_config["py_bin_path"], "ixloadpython"),
             http_ixload=os.path.join(http_ixload_path, "http_ixload.py"),
@@ -168,7 +169,10 @@ class IxLoadTrafficGen(SampleVNFTrafficGen):
 
     def instantiate(self, scenario_cfg, context_cfg):
         super(IxLoadTrafficGen, self).instantiate(scenario_cfg, context_cfg)
-        self.done = False
+
+    def wait_for_instantiate(self):
+        # not needed for Ixload
+        pass
 
     def terminate(self):
         call(["pkill", "-9", "http_ixload.py"])
index 4abe060..78d2bd8 100644 (file)
@@ -63,8 +63,8 @@ class IxiaResourceHelper(ClientResourceHelper):
 
     def _build_ports(self):
         # self.generate_port_pairs(self.topology)
-        self.priv_ports = [int(x[0][-1]) for x in self.tg_port_pairs]
-        self.pub_ports = [int(x[1][-1]) for x in self.tg_port_pairs]
+        self.priv_ports = [int(x[0][2:]) for x in self.tg_port_pairs]
+        self.pub_ports = [int(x[1][2:]) for x in self.tg_port_pairs]
         self.my_ports = list(set(self.priv_ports).union(set(self.pub_ports)))
 
     def get_stats(self, *args, **kwargs):
index 5480608..15c9c0e 100644 (file)
@@ -52,8 +52,8 @@ class TrexRfcResourceHelper(TrexResourceHelper):
     def _build_ports(self):
         self.tg_port_pairs, self.networks = MultiPortConfig.get_port_pairs(
             self.vnfd_helper.interfaces)
-        self.priv_ports = [int(x[0][-1]) for x in self.tg_port_pairs]
-        self.pub_ports = [int(x[1][-1]) for x in self.tg_port_pairs]
+        self.priv_ports = [int(x[0][2:]) for x in self.tg_port_pairs]
+        self.pub_ports = [int(x[1][2:]) for x in self.tg_port_pairs]
         self.my_ports = list(set(chain(self.priv_ports, self.pub_ports)))
 
     def _run_traffic_once(self, traffic_profile):
@@ -98,7 +98,7 @@ class TrexRfcResourceHelper(TrexResourceHelper):
 
     def collect_kpi(self):
         self.rfc2544_helper.iteration.value += 1
-        super(TrexRfcResourceHelper, self).collect_kpi()
+        return super(TrexRfcResourceHelper, self).collect_kpi()
 
 
 class TrexTrafficGenRFC(TrexTrafficGen):
index c21a474..8c7b1e4 100644 (file)
@@ -265,7 +265,7 @@ name (i.e. %s).\
             self.resources[name]['properties']['mountpoint'] = mountpoint
 
     def add_network(self, name, physical_network='physnet1', provider=None,
-                    segmentation_id=None, port_security_enabled=None):
+                    segmentation_id=None, port_security_enabled=None, network_type=None):
         """add to the template a Neutron Net"""
         log.debug("adding Neutron::Net '%s'", name)
         if provider is None:
@@ -280,12 +280,14 @@ name (i.e. %s).\
                 'type': 'OS::Neutron::ProviderNet',
                 'properties': {
                     'name': name,
-                    'network_type': 'vlan',
+                    'network_type': 'flat' if network_type is None else network_type,
                     'physical_network': physical_network,
                 },
             }
             if segmentation_id:
                 self.resources[name]['properties']['segmentation_id'] = segmentation_id
+                if network_type is None:
+                    self.resources[name]['properties']['network_type'] = 'vlan'
         # if port security is not defined then don't add to template:
         # some deployments don't have port security plugin installed
         if port_security_enabled is not None:
index 8ac3eaa..a024cf6 100644 (file)
@@ -423,6 +423,12 @@ class SSH(object):
             if mode is not None:
                 sftp.chmod(remotepath, mode)
 
+    def get_file_obj(self, remotepath, file_obj):
+        client = self._get_client()
+
+        with client.open_sftp() as sftp:
+            sftp.getfo(remotepath, file_obj)
+
 
 class AutoConnectSSH(SSH):
 
@@ -471,6 +477,10 @@ class AutoConnectSSH(SSH):
         self._connect()
         return super(AutoConnectSSH, self).put_file_obj(file_obj, remote_path, mode)
 
+    def get_file_obj(self, remote_path, file_obj):
+        self._connect()
+        return super(AutoConnectSSH, self).get_file_obj(remote_path, file_obj)
+
     def provision_tool(self, tool_path, tool_file=None):
         self._connect()
         return super(AutoConnectSSH, self).provision_tool(tool_path, tool_file)