Merge "Add pod.yaml files for Apex"
authorRoss Brattain <ross.b.brattain@intel.com>
Tue, 8 May 2018 08:57:04 +0000 (08:57 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Tue, 8 May 2018 08:57:04 +0000 (08:57 +0000)
223 files changed:
INFO
INFO.yaml
ansible/build_yardstick_image.yml
ansible/image_uploaders/upload_yardstick_image.yml
ansible/infra_deploy.yml
ansible/install.yaml [new file with mode: 0644]
ansible/multi_port_baremetal_ixia_correlated_test.yaml
ansible/multi_port_baremetal_ixia_test.yaml
ansible/nsb_setup.yml
ansible/roles/add_repos_jumphost/tasks/Debian.yml [new file with mode: 0644]
ansible/roles/add_repos_jumphost/tasks/main.yml [moved from ansible/install_dependencies.yml with 81% similarity]
ansible/roles/add_repos_jumphost/vars/main.yml [new file with mode: 0644]
ansible/roles/configure_gui/tasks/main.yml [new file with mode: 0644]
ansible/roles/configure_nginx/tasks/main.yml [new file with mode: 0644]
ansible/roles/configure_nginx/templates/yardstick.conf.j2 [new file with mode: 0644]
ansible/roles/configure_rabbitmq/tasks/main.yml [new file with mode: 0644]
ansible/roles/configure_uwsgi/tasks/main.yml [new file with mode: 0644]
ansible/roles/configure_uwsgi/templates/yardstick.ini.j2 [new file with mode: 0644]
ansible/roles/docker/tasks/Debian.yml
ansible/roles/docker/vars/main.yml
ansible/roles/download_collectd/defaults/main.yml
ansible/roles/download_dpdk/defaults/main.yml
ansible/roles/download_pktgen/defaults/main.yml [new file with mode: 0644]
ansible/roles/download_pktgen/tasks/main.yml [new file with mode: 0644]
ansible/roles/download_trex/tasks/main.yml
ansible/roles/infra_check_requirements/tasks/main.yml
ansible/roles/infra_create_vms/tasks/configure_vm.yml
ansible/roles/infra_deploy_openstack/tasks/configure_kolla.yml [new file with mode: 0644]
ansible/roles/infra_deploy_openstack/tasks/configure_openstack.yml [new file with mode: 0644]
ansible/roles/infra_deploy_openstack/tasks/install_kolla.yml [new file with mode: 0644]
ansible/roles/infra_deploy_openstack/tasks/main.yml [new file with mode: 0644]
ansible/roles/infra_deploy_openstack/tasks/rampup_openstack.yml [new file with mode: 0644]
ansible/roles/infra_deploy_openstack/templates/multinode.j2 [new file with mode: 0644]
ansible/roles/infra_deploy_openstack/vars/main.yml [new file with mode: 0644]
ansible/roles/infra_destroy_previous_configuration/tasks/delete_network.yml
ansible/roles/infra_destroy_previous_configuration/tasks/delete_vm.yml
ansible/roles/infra_destroy_previous_configuration/tasks/main.yml
ansible/roles/infra_prepare_vms/tasks/main.yml [new file with mode: 0644]
ansible/roles/infra_rampup_stack_nodes/tasks/configure_docker.yml [new file with mode: 0644]
ansible/roles/infra_rampup_stack_nodes/tasks/install_packets.yml [new file with mode: 0644]
ansible/roles/infra_rampup_stack_nodes/tasks/main.yml [new file with mode: 0644]
ansible/roles/infra_rampup_stack_nodes/tasks/update_conf_files.yml [new file with mode: 0644]
ansible/roles/infra_rampup_stack_nodes/tasks/update_keys.yml [new file with mode: 0644]
ansible/roles/infra_rampup_stack_nodes/vars/main.yml [new file with mode: 0644]
ansible/roles/install_civetweb/defaults/main.yml
ansible/roles/install_dependencies_jumphost/tasks/Debian.yml [new file with mode: 0755]
ansible/roles/install_dependencies_jumphost/tasks/RedHat.yml [moved from ansible/roles/install_dependencies/tasks/RedHat.yml with 90% similarity]
ansible/roles/install_dependencies_jumphost/tasks/Suse.yml [moved from ansible/roles/install_dependencies/tasks/Debian.yml with 70% similarity, mode: 0644]
ansible/roles/install_dependencies_jumphost/tasks/main.yml [moved from ansible/roles/install_dependencies/tasks/main.yml with 100% similarity]
ansible/roles/install_dpdk/tasks/main.yml
ansible/roles/install_dpdk/vars/main.yml
ansible/roles/install_dpdk_shared/tasks/Debian.yml
ansible/roles/install_dpdk_shared/tasks/main.yml
ansible/roles/install_dpdk_shared/vars/main.yml
ansible/roles/install_image_dependencies/defaults/main.yml
ansible/roles/install_pktgen/tasks/main.yml [new file with mode: 0644]
ansible/roles/install_yardstick/tasks/main.yml [new file with mode: 0644]
ansible/roles/install_yardstick/tasks/regular_install.yml [new file with mode: 0644]
ansible/roles/install_yardstick/tasks/virtual_install.yml [new file with mode: 0644]
ansible/standalone_ovs_scale_out_ixia_correlated_test.yaml
ansible/standalone_ovs_scale_out_ixia_test.yaml
ansible/standalone_sriov_scale_out_ixia_correlated_test.yaml
ansible/standalone_sriov_scale_out_ixia_test.yaml
ansible/ubuntu_server_cloudimg_modify_samplevnfs.yml
api/resources/v1/env.py
api/resources/v1/testsuites.py
api/resources/v2/images.py
dashboard/opnfv_yardstick_tc056.json [new file with mode: 0644]
dashboard/opnfv_yardstick_tc058.json [new file with mode: 0644]
docker/Dockerfile
docker/Dockerfile.aarch64.patch
docker/supervisor.sh
docs/release/release-notes/release-notes.rst
docs/testing/user/userguide/01-introduction.rst
docs/testing/user/userguide/03-architecture.rst
docs/testing/user/userguide/04-installation.rst
docs/testing/user/userguide/05-operation.rst [new file with mode: 0644]
docs/testing/user/userguide/06-yardstick-plugin.rst [moved from docs/testing/user/userguide/05-yardstick_plugin.rst with 75% similarity]
docs/testing/user/userguide/07-result-store-InfluxDB.rst [moved from docs/testing/user/userguide/06-result-store-InfluxDB.rst with 83% similarity]
docs/testing/user/userguide/08-grafana.rst [moved from docs/testing/user/userguide/07-grafana.rst with 95% similarity]
docs/testing/user/userguide/09-api.rst [moved from docs/testing/user/userguide/08-api.rst with 84% similarity]
docs/testing/user/userguide/10-yardstick-user-interface.rst [moved from docs/testing/user/userguide/09-yardstick_user_interface.rst with 94% similarity]
docs/testing/user/userguide/11-vtc-overview.rst [moved from docs/testing/user/userguide/10-vtc-overview.rst with 90% similarity]
docs/testing/user/userguide/12-nsb-overview.rst [moved from docs/testing/user/userguide/11-nsb-overview.rst with 93% similarity]
docs/testing/user/userguide/13-nsb-installation.rst [moved from docs/testing/user/userguide/12-nsb_installation.rst with 80% similarity]
docs/testing/user/userguide/14-nsb-operation.rst [moved from docs/testing/user/userguide/13-nsb_operation.rst with 63% similarity]
docs/testing/user/userguide/15-list-of-tcs.rst
docs/testing/user/userguide/code/pod_ixia.yaml [new file with mode: 0644]
docs/testing/user/userguide/index.rst
docs/testing/user/userguide/opnfv_yardstick_tc050.rst
docs/testing/user/userguide/opnfv_yardstick_tc087.rst [new file with mode: 0644]
docs/testing/user/userguide/opnfv_yardstick_tc090.rst [new file with mode: 0644]
docs/testing/user/userguide/opnfv_yardstick_tc091.rst [new file with mode: 0644]
etc/infra/infra_deploy_multi.yaml.sample [new file with mode: 0644]
etc/infra/infra_deploy_one.yaml.sample [moved from etc/infra/infra_deploy.yaml.sample with 51% similarity]
etc/infra/infra_deploy_two.yaml.sample [new file with mode: 0644]
etc/yardstick/nodes/pod.yaml.nsb.sample.ixia
etc/yardstick/nodes/standalone/ixia_correlated_template.yaml
etc/yardstick/nodes/standalone/ixia_template.yaml
install.sh
requirements.txt
samples/dummy-no-context.yaml
samples/parser.yaml
samples/storperf.yaml
samples/vnf_samples/vnf_descriptors/ixia_rfc2544_tpl.yaml
samples/vnf_samples/vnf_descriptors/tg_ixload.yaml
samples/vnf_samples/vnf_descriptors/tg_ixload_4port.yaml
tests/ci/load_images.sh
tests/opnfv/test_cases/opnfv_yardstick_tc040.yaml
tests/opnfv/test_cases/opnfv_yardstick_tc042.yaml
tests/opnfv/test_cases/opnfv_yardstick_tc050.yaml
tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
tests/opnfv/test_cases/opnfv_yardstick_tc090.yaml [new file with mode: 0644]
tests/opnfv/test_cases/opnfv_yardstick_tc091.yaml [new file with mode: 0644]
tests/opnfv/test_suites/opnfv_k8-canal-lb-noha_daily.yaml [moved from tests/opnfv/test_suites/opnfv_vTC_daily.yaml with 64% similarity]
tests/opnfv/test_suites/opnfv_k8-multus-lb-noha_daily.yaml [moved from tests/opnfv/test_suites/opnfv_vTC_weekly.yaml with 64% similarity]
tests/opnfv/test_suites/opnfv_k8-multus-nofeature-noha_daily.yaml [new file with mode: 0644]
tests/opnfv/test_suites/opnfv_k8-nosdn-nofeature-ha_daily.yaml [new file with mode: 0644]
tests/opnfv/test_suites/opnfv_k8-nosdn-nofeature-noha_daily.yaml
tests/opnfv/test_suites/opnfv_k8-nosdn-stor4nfv-ha_daily.yaml [new file with mode: 0644]
tests/opnfv/test_suites/opnfv_k8-nosdn-stor4nfv-noha_daily.yaml [new file with mode: 0644]
tests/opnfv/test_suites/opnfv_k8-ocl-lb-noha_daily.yaml [new file with mode: 0644]
tests/opnfv/test_suites/opnfv_k8-sriov-cni-nofeature-noha_daily.yaml [new file with mode: 0644]
tests/opnfv/test_suites/opnfv_os-nosdn-calipso-noha_daily.yaml [new file with mode: 0644]
tests/opnfv/test_suites/opnfv_os-nosdn-fdio-noha_daily.yaml
tests/opnfv/test_suites/opnfv_os-nosdn-kvm_ovs_dpdk-ha_daily.yaml
tests/opnfv/test_suites/opnfv_os-nosdn-kvm_ovs_dpdk-noha_daily.yaml
tests/opnfv/test_suites/opnfv_os-nosdn-nofeature-ha_daily.yaml
tests/opnfv/test_suites/opnfv_os-odl-ovs_dpdk-ha_daily.yaml [new file with mode: 0644]
tests/opnfv/test_suites/opnfv_os-odl-ovs_dpdk-noha_daily.yaml [new file with mode: 0644]
tests/opnfv/test_suites/opnfv_os-odl-ovs_offload-noha_daily.yaml [new file with mode: 0644]
tests/opnfv/test_suites/opnfv_os-odl-sriov-noha_daily.yaml [new file with mode: 0644]
tests/opnfv/test_suites/opnfv_os-odl_l2-fdio-noha_daily.yaml
tests/unit/network_services/helpers/test_samplevnf_helper.py
tests/unit/network_services/nfvi/test_resource.py
tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
tools/virt_ci_rampup.sh
yardstick/benchmark/contexts/base.py
yardstick/benchmark/contexts/heat.py
yardstick/benchmark/contexts/standalone/model.py
yardstick/benchmark/contexts/standalone/ovs_dpdk.py
yardstick/benchmark/contexts/standalone/sriov.py
yardstick/benchmark/core/task.py
yardstick/benchmark/runners/base.py
yardstick/benchmark/runners/duration.py
yardstick/benchmark/runners/iteration.py
yardstick/benchmark/scenarios/availability/scenario_general.py
yardstick/benchmark/scenarios/availability/serviceha.py
yardstick/benchmark/scenarios/base.py
yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash
yardstick/benchmark/scenarios/lib/attach_volume.py
yardstick/benchmark/scenarios/lib/create_floating_ip.py
yardstick/benchmark/scenarios/lib/create_keypair.py
yardstick/benchmark/scenarios/lib/create_sec_group.py
yardstick/benchmark/scenarios/lib/create_server.py
yardstick/benchmark/scenarios/lib/delete_floating_ip.py
yardstick/benchmark/scenarios/lib/delete_keypair.py
yardstick/benchmark/scenarios/lib/delete_network.py
yardstick/benchmark/scenarios/lib/delete_router_interface.py
yardstick/benchmark/scenarios/lib/delete_server.py
yardstick/benchmark/scenarios/lib/get_flavor.py
yardstick/benchmark/scenarios/lib/get_server.py
yardstick/benchmark/scenarios/networking/pktgen_dpdk.py
yardstick/benchmark/scenarios/networking/pktgen_dpdk_latency_benchmark.bash
yardstick/benchmark/scenarios/networking/testpmd_fwd.bash
yardstick/benchmark/scenarios/networking/vnf_generic.py
yardstick/cmd/commands/task.py
yardstick/common/ansible_common.py
yardstick/common/constants.py
yardstick/common/exceptions.py
yardstick/common/messaging/__init__.py [new file with mode: 0644]
yardstick/common/messaging/consumer.py [new file with mode: 0644]
yardstick/common/messaging/payloads.py [new file with mode: 0644]
yardstick/common/messaging/producer.py [new file with mode: 0644]
yardstick/common/openstack_utils.py
yardstick/common/utils.py
yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py
yardstick/network_services/nfvi/resource.py
yardstick/network_services/vnf_generic/vnf/sample_vnf.py
yardstick/orchestrator/heat.py
yardstick/tests/functional/common/messaging/__init__.py [moved from tests/unit/network_services/collector/__init__.py with 100% similarity]
yardstick/tests/functional/common/messaging/test_messaging.py [new file with mode: 0644]
yardstick/tests/integration/dummy-scenario-heat-context.yaml [new file with mode: 0644]
yardstick/tests/unit/apiserver/resources/v1/__init__.py [moved from tests/unit/network_services/libs/__init__.py with 100% similarity]
yardstick/tests/unit/apiserver/resources/v1/test_testsuites.py [new file with mode: 0644]
yardstick/tests/unit/apiserver/resources/v2/__init__.py [moved from tests/unit/network_services/libs/ixia_libs/__init__.py with 100% similarity]
yardstick/tests/unit/apiserver/resources/v2/test_images.py [new file with mode: 0644]
yardstick/tests/unit/benchmark/contexts/standalone/test_model.py
yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
yardstick/tests/unit/benchmark/contexts/test_base.py
yardstick/tests/unit/benchmark/contexts/test_heat.py
yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py
yardstick/tests/unit/benchmark/scenarios/lib/test_create_floating_ip.py
yardstick/tests/unit/benchmark/scenarios/lib/test_create_keypair.py
yardstick/tests/unit/benchmark/scenarios/lib/test_create_sec_group.py
yardstick/tests/unit/benchmark/scenarios/lib/test_create_server.py
yardstick/tests/unit/benchmark/scenarios/lib/test_delete_floating_ip.py
yardstick/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py
yardstick/tests/unit/benchmark/scenarios/lib/test_delete_network.py
yardstick/tests/unit/benchmark/scenarios/lib/test_delete_router_interface.py
yardstick/tests/unit/benchmark/scenarios/lib/test_delete_server.py
yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py
yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py
yardstick/tests/unit/benchmark/scenarios/test_base.py
yardstick/tests/unit/common/messaging/__init__.py [new file with mode: 0644]
yardstick/tests/unit/common/messaging/test_consumer.py [new file with mode: 0644]
yardstick/tests/unit/common/messaging/test_payloads.py [new file with mode: 0644]
yardstick/tests/unit/common/messaging/test_producer.py [new file with mode: 0644]
yardstick/tests/unit/common/test_openstack_utils.py
yardstick/tests/unit/common/test_utils.py
yardstick/tests/unit/network_services/__init__.py [new file with mode: 0644]
yardstick/tests/unit/network_services/collector/__init__.py [new file with mode: 0644]
yardstick/tests/unit/network_services/collector/test_publisher.py [moved from tests/unit/network_services/collector/test_publisher.py with 91% similarity]
yardstick/tests/unit/network_services/collector/test_subscriber.py [moved from tests/unit/network_services/collector/test_subscriber.py with 94% similarity]
yardstick/tests/unit/network_services/libs/__init__.py [new file with mode: 0644]
yardstick/tests/unit/network_services/libs/ixia_libs/__init__.py [new file with mode: 0644]
yardstick/tests/unit/network_services/libs/ixia_libs/test_IxNet.py [moved from tests/unit/network_services/libs/ixia_libs/test_IxNet.py with 97% similarity]
yardstick/tests/unit/network_services/test_utils.py [moved from tests/unit/network_services/test_utils.py with 99% similarity]
yardstick/tests/unit/network_services/test_yang_model.py [moved from tests/unit/network_services/test_yang_model.py with 96% similarity]
yardstick/tests/unit/orchestrator/test_heat.py

diff --git a/INFO b/INFO
index 1a49af2..9bcc292 100644 (file)
--- a/INFO
+++ b/INFO
@@ -11,7 +11,6 @@ IRC: #opnfv-yardstick
 Repository: yardstick
 
 Committers:
-jorgen.w.karlsson@ericsson.com
 jean.gaoliang@huawei.com
 lvjing5@huawei.com
 wu.zhihui1@zte.com.cn
index 730cd4a..677c470 100644 (file)
--- a/INFO.yaml
+++ b/INFO.yaml
@@ -34,10 +34,6 @@ repositories:
     - 'yardstick'
 committers:
     - <<: *opnfv_yardstick_ptl
-    - name: 'Jörgen Karlsson'
-      email: 'jorgen.w.karlsson@ericsson.com'
-      company: 'ericsson.com'
-      id: 'jnon'
     - name: 'Kubi'
       email: 'jean.gaoliang@huawei.com'
       company: 'huawei.com'
index c9b6e74..072c12c 100644 (file)
@@ -59,7 +59,7 @@
     - set_fact:
         raw_imgfile: "{{ workspace }}/{{ raw_imgfile_basename }}"
 
-  # cleanup non-lxd
+    # cleanup non-lxd
     - name: unmount all old mount points
       mount:
         name: "{{ item }}"
       command: kpartx -dv "{{ raw_imgfile }}"
       ignore_errors: true
 
+    - name: Debug dump loop devices
+      command: losetup -a
+      ignore_errors: true
+
+    - name: delete loop devices for image file
+      # use this because kpartx -dv will fail if raw_imgfile was delete
+      # but in theory we could have deleted file still attached to loopback device?
+      # use grep because of // and awk
+      shell: losetup -O NAME,BACK-FILE | grep "{{ raw_imgfile_basename }}" | awk '{ print $1 }' | xargs -l1 losetup -v -d
+      ignore_errors: true
+
+    - name: Debug dump loop devices again
+      command: losetup -a
+      ignore_errors: true
+
     - name: delete {{ raw_imgfile }}
       file:
         path: "{{ raw_imgfile }}"
       tags: mknod_devices
 
     - name: find first partition device
-#      command: kpartx -l "{{ loop_device }}"
       command: kpartx -l "{{ raw_imgfile }}"
       register: kpartx_res
 
index 9364c52..e61e2ab 100644 (file)
     - get_url:
         url: "{{ CLOUD_IMG_URL }}"
         dest: "{{ CLOUD_IMAGE }}"
-
-    - name: Extra cloud image kernel
-    - unarchive:
-        asdf:
+#
+#    - name: Extra cloud image kernel
+#    - unarchive:
 
     - os_image:
         name: yardstick-{{ release }}-kernel
index 4ad21af..8cf5dff 100644 (file)
 # limitations under the License.
 ---
 - hosts: jumphost
+  vars:
+    rs_file: "{{ RS_FILE }}"
+    clean_up: "{{ CLEAN_UP | default(False) }}" # If True will be delete all VMs, networks, disk images
+
+  tasks:
+  - set_fact:
+      proxy_host: "{{ lookup('env', 'http_proxy') | urlsplit('hostname') }}"
+      proxy_proto: "{{ lookup('env', 'http_proxy') | urlsplit('scheme') }}"
+      proxy_port: "{{ lookup('env', 'http_proxy') | urlsplit('port') }}"
+
+  - set_fact:
+      proxy_host_ip: "{{ lookup('dig', proxy_host) }}"
 
   roles:
-    - infra_check_requirements
     - infra_destroy_previous_configuration
+    - infra_check_requirements
     - infra_create_network
     - infra_create_vms
+    - infra_prepare_vms
+
+- hosts: deploy,regular,yardstickG
+  gather_facts: no
+  become: yes
+
+  roles:
+  - infra_rampup_stack_nodes
+
+
+- hosts: deploy
+  become: yes
+  environment: "{{ proxy_env }}"
+
+  roles:
+  - infra_deploy_openstack
diff --git a/ansible/install.yaml b/ansible/install.yaml
new file mode 100644 (file)
index 0000000..afffbed
--- /dev/null
@@ -0,0 +1,42 @@
+---
+- hosts: localhost
+
+  vars:
+    arch_amd64: "amd64"
+    arch_arm64: "arm64"
+    inst_mode_container: "container"
+    inst_mode_baremetal: "baremetal"
+    ubuntu_archive:
+      amd64: "http://archive.ubuntu.com/ubuntu/"
+      arm64: "http://ports.ubuntu.com/ubuntu-ports/"
+    installation_mode: "{{ INSTALLATION_MODE | default('baremetal') }}"
+    yardstick_dir: "{{ YARDSTICK_DIR | default('/home/opnfv/repos/yardstick') }}"
+    virtual_environment: "{{ VIRTUAL_ENVIRONMENT | default(False) }}"
+    nsb_dir: "{{ NSB_DIR | default('/opt/nsb_bin/') }}"
+
+  pre_tasks:
+
+    - name: Create NSB binaries directory, accesible to any user
+      file:
+        path: "{{ nsb_dir }}"
+        state: directory
+        owner: root
+        mode: 0777
+
+  roles:
+    - add_repos_jumphost
+    - install_dependencies_jumphost
+    - install_yardstick
+    - configure_uwsgi
+    - configure_nginx
+    - download_trex
+    - install_trex
+    - configure_rabbitmq
+
+  post_tasks:
+
+    - service:
+        name: nginx
+        state: restarted
+
+    - shell: uwsgi -i /etc/yardstick/yardstick.ini
index ba92b5c..0d22318 100644 (file)
@@ -42,7 +42,6 @@
         lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
         root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
         py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
-        py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
         dut_result_dir: "/mnt/results"
         version: "8.01.106.3"
       pcis:
index 52bc40b..d2dfaa3 100644 (file)
@@ -42,7 +42,6 @@
         lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
         root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
         py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
-        py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
         dut_result_dir: "/mnt/results"
         version: "8.01.106.3"
       pcis:
index 98a59f9..0149054 100644 (file)
@@ -22,7 +22,7 @@
   environment:
     "{{ proxy_env }}"
   roles:
-    - install_dependencies
+    - install_dependencies_jumphost
     - docker
 
 - name: "handle all openstack stuff when: openrc_file is defined"
diff --git a/ansible/roles/add_repos_jumphost/tasks/Debian.yml b/ansible/roles/add_repos_jumphost/tasks/Debian.yml
new file mode 100644 (file)
index 0000000..626f0b0
--- /dev/null
@@ -0,0 +1,81 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+# Arguments needed: arch_arm64, arch_amd64, ubuntu_archive
+
+- name: Set the repositories architecture name
+  set_fact:
+    arch: "{{ arch_arm64 if ansible_architecture == 'aarch64' else arch_amd64 }}"
+    extra_arch: "{{ arch_amd64 if ansible_architecture == 'aarch64' else arch_arm64 }}"
+
+- name: Define the repositories names
+  set_fact:
+    repo: "{{ ubuntu_archive[arch] }}"
+    extra_repo: "{{ ubuntu_archive[extra_arch] }}"
+
+- name: Add architecture to the default repository list
+  replace:
+    path: "{{ sources_list_file }}"
+    regexp: '(^deb\s+)([^\[].*)$'
+    replace: 'deb [arch={{ arch }}] \2'
+
+- name: Remove support for source repositories
+  replace:
+    path: "{{ sources_list_file }}"
+    regexp: "^deb-src "
+    replace: "# deb-src "
+
+- name: Add extra architecture
+  command: "dpkg --add-architecture {{ extra_arch }}"
+
+- name: Define the default release version
+  copy:
+    dest: "{{ default_distro_file }}"
+    content: 'APT::Default-Release "{{ ansible_distribution_release }}";'
+
+- name: Remove extra repository file
+  file:
+    path: "{{ repo_file }}"
+    state: absent
+  ignore_errors: yes
+
+- name: Add extra repository file
+  file:
+    path: "{{ repo_file }}"
+    state: touch
+
+- name: Add the repository for qemu_static_user/xenial
+  blockinfile:
+    path: "{{ repo_file }}"
+    marker: "MARKER"
+    content: |
+      deb [arch={{ arch }}] {{ repo }} xenial-updates universe
+  when: ansible_distribution_release != "xenial"
+
+- name: Add extra architecture repositories if installing in container
+  blockinfile:
+    path: "{{ repo_file }}"
+    marker: "MARKER"
+    content: |
+      deb [arch={{ extra_arch }}] {{ extra_repo }} {{ ansible_distribution_release }} main universe multiverse restricted
+      deb [arch={{ extra_arch }}] {{ extra_repo }} {{ ansible_distribution_release }}-updates main universe multiverse restricted
+      deb [arch={{ extra_arch }}] {{ extra_repo }} {{ ansible_distribution_release }}-security main universe multiverse restricted
+      deb [arch={{ extra_arch }}] {{ extra_repo }} {{ ansible_distribution_release }}-proposed main universe multiverse restricted
+  when: installation_mode == "container"
+
+- name: Remove the marker
+  lineinfile:
+    dest: "{{ repo_file }}"
+    state: absent
+    regexp: "MARKER"
similarity index 81%
rename from ansible/install_dependencies.yml
rename to ansible/roles/add_repos_jumphost/tasks/main.yml
index 1c7d201..f50fd9f 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Intel Corporation.
+# Copyright (c) 2018 Intel Corporation.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,8 +12,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
-- name: install yardstick dependencies
-  hosts: all
-
-  roles:
-    - install_dependencies
+- include: "{{ ansible_os_family }}.yml"
+  when: ansible_os_family == "Debian"
diff --git a/ansible/roles/add_repos_jumphost/vars/main.yml b/ansible/roles/add_repos_jumphost/vars/main.yml
new file mode 100644 (file)
index 0000000..30e4447
--- /dev/null
@@ -0,0 +1,17 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+repo_file: "/etc/apt/sources.list.d/yardstick.list"
+sources_list_file: "/etc/apt/sources.list"
+default_distro_file: "/etc/apt/apt.conf.d/default-distro"
diff --git a/ansible/roles/configure_gui/tasks/main.yml b/ansible/roles/configure_gui/tasks/main.yml
new file mode 100644 (file)
index 0000000..846a9cb
--- /dev/null
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Define variables
+  set_fact:
+    gui_dir: "{{ yardstick_dir }}/gui/"
+
+- name: Run gui.sh
+  shell:
+    cmd: /bin/bash gui.sh
+    chdir: "{{ gui_dir }}"
+
+- name: Create nginx/yardstick directory
+  file:
+    path: /etc/nginx/yardstick
+    state: directory
+    recurse: yes
+
+- name: Move dist to /etc/nginx/yardstick/gui
+  shell:
+    cmd: mv dist /etc/nginx/yardstick/gui
+    chdir: "{{ gui_dir }}"
diff --git a/ansible/roles/configure_nginx/tasks/main.yml b/ansible/roles/configure_nginx/tasks/main.yml
new file mode 100644 (file)
index 0000000..37b0527
--- /dev/null
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Define variables
+  set_fact:
+    socket_file: "{{ socket_file|default('/var/run/yardstick.sock') }}"
+
+- name: Make sure conf.d directory exists
+  file:
+    path: /etc/nginx/conf.d
+    state: directory
+
+- name: Create the nginx config file
+  template:
+    src: yardstick.conf.j2
+    dest: "/etc/nginx/conf.d/yardstick.conf"
+
+- name: Configure ports if RedHat OS
+  shell: |
+    semanage port -m -t http_port_t  -p tcp 5000
+    semanage port -m -t http_port_t  -p udp 5000
+  when: ansible_os_family == "RedHat"
\ No newline at end of file
diff --git a/ansible/roles/configure_nginx/templates/yardstick.conf.j2 b/ansible/roles/configure_nginx/templates/yardstick.conf.j2
new file mode 100644 (file)
index 0000000..484096c
--- /dev/null
@@ -0,0 +1,18 @@
+server {
+    listen 5000;
+    server_name localhost;
+    index index.htm index.html;
+    location / {
+        include uwsgi_params;
+        client_max_body_size    2000m;
+        uwsgi_pass unix://{{ socket_file }};
+    }
+
+    location /gui/ {
+        alias /etc/nginx/yardstick/gui/;
+    }
+
+    location /report/ {
+        alias /tmp/;
+    }
+}
diff --git a/ansible/roles/configure_rabbitmq/tasks/main.yml b/ansible/roles/configure_rabbitmq/tasks/main.yml
new file mode 100644 (file)
index 0000000..3ad60c1
--- /dev/null
@@ -0,0 +1,30 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Restart rabbitmq
+  service:
+    name: rabbitmq-server
+    state: restarted
+
+- name: rabbitmqctl start_app
+  shell: rabbitmqctl start_app
+
+- name: Configure rabbitmq
+  rabbitmq_user:
+    user: yardstick
+    password: yardstick
+    configure_priv: .*
+    read_priv: .*
+    write_priv: .*
+    state: present
diff --git a/ansible/roles/configure_uwsgi/tasks/main.yml b/ansible/roles/configure_uwsgi/tasks/main.yml
new file mode 100644 (file)
index 0000000..6a22446
--- /dev/null
@@ -0,0 +1,45 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Define variables
+  set_fact:
+    config_dir: "/etc/yardstick/"
+    log_dir: "/var/log/yardstick/"
+    socket_file: "/var/run/yardstick.sock"
+
+- name: Create UWSGI config directory
+  file:
+    path: "/etc/yardstick"
+    state: directory
+    owner: root
+    mode: 0755
+
+- name: Create API log directory
+  file:
+    path: "{{ log_dir }}"
+    state: directory
+    owner: root
+    mode: 0777
+
+- name: Create the socket for communicating
+  file:
+    path: "{{ socket_file }}"
+    state: touch
+    owner: root
+    mode: 0644
+
+- name: Create the UWSGI config file
+  template:
+    src: yardstick.ini.j2
+    dest: "{{ config_dir }}yardstick.ini"
diff --git a/ansible/roles/configure_uwsgi/templates/yardstick.ini.j2 b/ansible/roles/configure_uwsgi/templates/yardstick.ini.j2
new file mode 100644 (file)
index 0000000..c049daf
--- /dev/null
@@ -0,0 +1,18 @@
+[uwsgi]
+master = true
+debug = true
+chdir = {{ yardstick_dir }}api
+module = server
+plugins = python
+processes = 10
+threads = 5
+async = true
+max-requests = 5000
+chmod-socket = 666
+callable = app_wrapper
+enable-threads = true
+close-on-exec = 1
+daemonize = {{ log_dir }}uwsgi.log
+socket = {{ socket_file }}
+{# If virtual environment, we need to add:
+   virtualenv = <virtual_env> #}
\ No newline at end of file
index cf41287..7f998de 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
-  - name: add Ubuntu docker repo
-    apt_repository: repo='deb [trusted=yes] {{ ubuntu_docker_url }} ubuntu-{{ ansible_distribution_release }} main' state=present
-
-  - name: ensure correct docker version
-    action: "{{ ansible_pkg_mgr }} name={{ item }} state=present force=yes"
-    with_items: "{{ docker_packages[ansible_os_family] }}"
-
-  - name: remove Ubuntu docker repo
-    apt_repository:
-      repo: 'deb [trusted=yes] {{ ubuntu_docker_url }} ubuntu-{{ ansible_distribution_release }} main'
-      state: absent
-      update_cache: no
+  - name: Install docker.io
+    action: "{{ ansible_pkg_mgr }} name=docker.io state=present force=yes"
index 8b50774..a735d52 100644 (file)
@@ -16,5 +16,3 @@ docker_project_url: https://yum.dockerproject.org
 docker_packages:
   "RedHat":
     - docker-engine-1.13.1
-  "Debian":
-    - docker-engine=1.13.1*
index 9beaeb8..3f56323 100644 (file)
@@ -1,4 +1,4 @@
 ---
 collectd_url: "https://github.com/collectd/collectd.git"
 collectd_dest: "{{ clone_dest }}/collectd"
-collectd_version: "c870991a2d614e51c03c0da76e9aef997343551b"
+collectd_version: "collectd-5.8"
index fb42ed4..d548280 100644 (file)
@@ -1,10 +1,14 @@
 ---
 dpdk_version: "17.02"
-dpdk_url: "http://dpdk.org/browse/dpdk/snapshot/dpdk-{{ dpdk_version }}.zip"
+dpdk_url: "http://dpdk.org/browse/dpdk/snapshot/dpdk-{{ dpdk_version }}.tar.gz"
 dpdk_file: "{{ dpdk_url|basename }}"
-dpdk_unarchive: "{{ dpdk_file|regex_replace('[.]zip$', '') }}"
+dpdk_unarchive: "{{ dpdk_file|regex_replace('[.]tar[.]gz$', '') }}"
 dpdk_dest: "{{ clone_dest }}/"
+#Note DPDK 17.08 17.11 and 18.02 are currently unsupported due to prox build issues
 dpdk_sha256s:
-  "17.02": "sha256:c675285d5eab8e7e8537a10bdae6f03c603caa80cb4bf5b055ddd482e3b7d67f"
-  "16.04": "sha256:65e587af439b21abf54c644f86ba5ce7ea65657b3272480dcd78b769d2aecac3"
-  "16.07": "sha256:46fee52f9b3ff54df869414c69f122aea8f744de5ed5f8fb275a22cc9cefe094"
+  "16.07": "sha256:d876e4b2a7101f28e7e345d3c88e66afe877d15f0159c19c5bc5bc26b7b7d788"
+  "17.02": "sha256:b07b546e910095174bdb6152bb0d7ce057cc4b79aaa74771aeee4e8a7219fb38"
+  "17.05": "sha256:763bfb7e1765efcc949e79d645dc9f1ebd16591431ba0db5ce22becd928dcd0a"
+  "17.08": "sha256:3a08addbff45c636538514e9a5838fb91ea557661a4c071e03a9a6987d46e5b6" #unsupported
+  "17.11": "sha256:77a727bb3834549985f291409c9a77a1e8be1c9329ce4c3eb19a22d1461022e4" #unsupported
+  "18.02": "sha256:f1210310fd5f01a3babe3a09d9b3e5a9db791c2ec6ecfbf94ade9f893a0632b8" #unsupported
diff --git a/ansible/roles/download_pktgen/defaults/main.yml b/ansible/roles/download_pktgen/defaults/main.yml
new file mode 100644 (file)
index 0000000..b598e1d
--- /dev/null
@@ -0,0 +1,8 @@
+---
+pktgen_version: "3.2.12"
+pktgen_url: "http://dpdk.org/browse/apps/pktgen-dpdk/snapshot/pktgen-{{ pktgen_version }}.zip"
+pktgen_file: "{{ pktgen_url|basename }}"
+pktgen_unarchive: "{{ pktgen_file|regex_replace('[.]zip$', '') }}"
+pktgen_dest: "{{ clone_dest }}/"
+pktgen_sha256s:
+  "3.2.12": "sha256:a20aeb677fb847c0871acabb6e8f965ba3485e92f92e86e0bb5dc11c787b11d8"
diff --git a/ansible/roles/download_pktgen/tasks/main.yml b/ansible/roles/download_pktgen/tasks/main.yml
new file mode 100644 (file)
index 0000000..ff71f20
--- /dev/null
@@ -0,0 +1,38 @@
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- file:
+    path: "{{ pktgen_dest }}"
+    state: directory
+
+- name: fetch pktgen
+  get_url:
+    url: "{{ pktgen_url }}"
+    dest: "{{ pktgen_dest }}"
+    validate_certs: False
+    checksum: "{{ pktgen_sha256s[pktgen_version] }}"
+
+- unarchive:
+    src: "{{ pktgen_dest }}/{{ pktgen_file }}"
+    dest: "{{ pktgen_dest }}/"
+    copy: no
+
+- name: cleanup tar file to save space
+  file:
+      path: "{{ pktgen_dest }}/{{ pktgen_file }}"
+      state: absent
+
+- set_fact:
+    pktgen_path: "{{ pktgen_dest }}/{{ pktgen_unarchive }}"
+
index baa964f..9df67d9 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
+- name: make sure trex_dest exists
+  file:
+    path: "{{ trex_dest }}"
+    state: directory
+
 - name: fetch Trex
   get_url:
     url: "{{ trex_url }}"
index a11bc56..991bd73 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
+- name: Reread system properties
+  setup:
+
 - name: Include
   include_vars:
-    file: "{{rs_file}}"
+    file: "{{ rs_file }}"
     name: infra_deploy_vars
 
 - name: Store total CPU, RAM, Disk requested resources
   set_fact:
-    vcpu_t: "{{item.vcpus|int + vcpu_t|int}}"
-    vram_t: "{{item.ram|int + vram_t|int}}"
-    disk_t: "{{item.disk|int + disk_t|int}}"
-  with_items: "{{infra_deploy_vars.nodes}}"
+    vcpu_t: "{{ item.vcpus|int + vcpu_t | int }}"
+    vram_t: "{{ item.ram|int + vram_t | int }}"
+    disk_t: "{{ item.disk|int + disk_t | int }}"
+  with_items: "{{ infra_deploy_vars.nodes }}"
 
 - name: Fail if not enough RAM
   fail:
     msg: "Failed, not enough RAM, required: {{ vram_t }}, available {{ ansible_memory_mb.nocache.free }}"
-  when: ansible_memory_mb.nocache.free < vram_t|int
+  when: ansible_memory_mb.nocache.free < vram_t | int
 
 - name: Fail if not enough CPU
   fail:
     msg: "Failed, not enough CPU, required: {{ vcpu_t }}, available {{ ansible_processor_vcpus }}"
-  when: ansible_processor_vcpus < vcpu_t|int
+  when: ansible_processor_vcpus < vcpu_t | int
 
 - name: Define default network counter
   set_fact:
 
 - name: Increment counter for every default network detected
   set_fact:
-    num_default_network_detected: "{{ num_default_network_detected|int + 1 }}"
+    num_default_network_detected: "{{ num_default_network_detected | int + 1 }}"
   when:
     - item.default_gateway is defined
     - item.default_gateway == True
-  with_items: "{{infra_deploy_vars.networks}}"
+  with_items: "{{ infra_deploy_vars.networks }}"
 
 - name: Fail if more than 1 or 0 default networks
   fail:
     msg: "Failed, there must be 1 default network: {{ num_default_network_detected }} detected"
-  when: num_default_network_detected|int != 1
+  when: num_default_network_detected | int != 1
 
 - name: Fail if not enough Disk space
   set_fact:
-    disk_avail: "{% for mount in ansible_mounts if mount.mount == '/' %}{{ (mount.size_available/1024/1024)|int }}{% endfor %}"
+    disk_avail: "{% for mount in ansible_mounts if mount.mount == '/' %}{{ (mount.size_available/1024/1024) | int }}{% endfor %}"
 - fail:
     msg: "Failed, not enough disk space, required {{ disk_t }}, available: {{ disk_avail }}"
-  when: disk_avail|int < disk_t|int
+  when: disk_avail|int < disk_t | int
+
+- set_fact:
+    ostack_nodes: "{{ ostack_nodes | default([]) + [item.openstack_node] }}"
+  when: item.openstack_node is defined
+  with_items: "{{ infra_deploy_vars.nodes }}"
+
+# all-in-one node node type must be controller, multinode requires at least one controller and one compute node
+- fail:
+    msg: "OpenStack node types currently supported: controller, compute. Check input VMs file."
+  when: ostack_nodes is undefined or ostack_nodes | length < 1
+
+- fail:
+    msg: "In all-in-one configuration OpenStack node type must be controller."
+  when: ostack_nodes | length == 1 and 'controller' not in ostack_nodes
+
+- fail:
+    msg: "At least one controller and one compute node expected when total number of OpenStack nodes is more than one."
+  when: ostack_nodes | length > 1 and not ('compute' in ostack_nodes and 'controller' in ostack_nodes)
index c20a0b1..a6a5e06 100644 (file)
@@ -47,8 +47,6 @@
       output:
         all: ">> /var/log/cloud-init.log"
       ssh_pwauth: True
-      bootcmd:
-        - echo 127.0.0.1 {{ node_item.hostname }} >> /etc/hosts
       users:
           - name: {{ node_item.user }}
             lock-passwd: False
diff --git a/ansible/roles/infra_deploy_openstack/tasks/configure_kolla.yml b/ansible/roles/infra_deploy_openstack/tasks/configure_kolla.yml
new file mode 100644 (file)
index 0000000..9713c0d
--- /dev/null
@@ -0,0 +1,40 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Modify globals.yml
+  replace:
+    path: /etc/kolla/globals.yml
+    regexp: "{{ item.find }}"
+    replace: "{{ item.insert_after }}"
+  with_items:
+    - { find: '^#kolla_base_distro:.*', insert_after: 'kolla_base_distro: "ubuntu"' }
+    - { find: '^#kolla_install_type:.*', insert_after: 'kolla_install_type: "source"' }
+    - { find: '^#openstack_release:.*', insert_after: 'openstack_release: "pike"' }
+    - { find: 'kolla_internal_vip_address:.*', insert_after: 'kolla_internal_vip_address: "{{ deployvm_ip }}"' }
+    - { find: '^#network_interface:.*', insert_after: 'network_interface: "{{ hostvars[ansible_host].ansible_default_ipv4.interface }}"' }
+    - { find: '^#neutron_external_interface:.*', insert_after: 'neutron_external_interface: "{{ neutron_iface }}"' }
+    - { find: '^#enable_haproxy:.*', insert_after: 'enable_haproxy: "no"'}
+    - { find: '^#enable_heat:.*' , insert_after: 'enable_heat: "yes"'}
+    - { find: '^#docker_registry:.*', insert_after: 'docker_registry: "{{ ansible_host }}:4000"' }
+
+- name: Generate multinode from inventory
+  template:
+    src: templates/multinode.j2
+    dest: "{{ git_repos_path + 'multinode' }}"
+
+- set_fact:
+    path2multinode: "{{ git_repos_path + kolla_ans_path + '/ansible/inventory/multinode' }}"
+
+- name: Append rest groups to multinode file
+  shell: line=`grep -n '\[deployment\]' {{ path2multinode }} | cut -d ':' -f1` && tail -n +$line {{ path2multinode }} >> "{{ git_repos_path + 'multinode' }}"
diff --git a/ansible/roles/infra_deploy_openstack/tasks/configure_openstack.yml b/ansible/roles/infra_deploy_openstack/tasks/configure_openstack.yml
new file mode 100644 (file)
index 0000000..3963cb6
--- /dev/null
@@ -0,0 +1,67 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Create folders
+  file:
+    path: "{{ item }}"
+    state: directory
+  with_items:
+    - /etc/kolla/config/nova
+    - /etc/kolla/config/neutron
+
+- set_fact:
+    filter_ops: RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter
+
+- name: Configure OpenStack Nova
+  copy:
+    content: |
+      [filter_scheduler]
+      enabled_filters = {{ filter_ops }}
+      [libvirt]
+      cpu_mode = host-passthrough
+    dest: /etc/kolla/config/nova.conf
+
+- name: Configure OpenStack Neutron
+  copy:
+    content: |
+      [DEFAULT]
+      service_plugins=neutron.services.l3_router.l3_router_plugin.L3RouterPlugin
+      [securitygroup]
+      firewall_driver = neutron.agent.firewall.NoopFirewallDriver
+      [ml2]
+      extension_drivers=port_security
+      [agent]
+      extensions=port_security
+    dest: /etc/kolla/config/neutron.conf
+
+- name: Configure OpenStack ml2_plugin.ini
+  copy:
+    content: |
+      [ml2]
+      tenant_network_types = vxlan
+      extension_drivers = port_security
+      type_drivers = vlan,flat,local,vxlan
+      mechanism_drivers = openvswitch
+      [ml2_type_flat]
+      flat_networks = physnet1
+      [ml2_type_vlan]
+      network_vlan_ranges = physnet1
+      [securitygroup]
+      firewall_driver = iptables_hybrid
+      [ovs]
+      datapath_type = system
+      bridge_mappings = physnet1:br-ex
+      tunnel_bridge = br-tun
+      local_ip = {{ deployvm_ip }}
+    dest: /etc/kolla/config/neutron/ml2_plugin.ini
diff --git a/ansible/roles/infra_deploy_openstack/tasks/install_kolla.yml b/ansible/roles/infra_deploy_openstack/tasks/install_kolla.yml
new file mode 100644 (file)
index 0000000..38c163c
--- /dev/null
@@ -0,0 +1,54 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Download kolla and kolla-ansible from git repos
+  git:
+    repo: "{{ item.repo }}"
+    dest: "{{ git_repos_path + item.dir }}"
+    version: stable/pike
+  with_items:
+    - { dir: "{{ kolla_path }}", repo: 'https://git.openstack.org/openstack/kolla'}
+    - { dir: "{{ kolla_ans_path }}", repo: 'https://git.openstack.org/openstack/kolla-ansible' }
+
+- name: Copy kolla-ansible password.yml and globals.yml
+  shell: cp -r "{{ git_repos_path + kolla_ans_path + '/etc/kolla/' }}" /etc/
+
+- name: Copy kolla-ansible all-in-one, multinode
+  shell: cp * "{{ git_repos_path }}"
+  args:
+    chdir: "{{ git_repos_path + kolla_ans_path + '/ansible/inventory/' }}"
+
+- name: Install requirements
+  pip:
+    chdir: "{{ item[0] }}"
+    requirements: "{{ item[1] }}"
+  with_nested:
+    - [ "{{ git_repos_path + kolla_path }}", "{{ git_repos_path + kolla_ans_path }}" ]
+    - [ 'requirements.txt', 'test-requirements.txt' ]
+
+- name: pip install .
+  pip:
+    chdir: "{{ item }}"
+    name: '.'
+  with_items:
+    - "{{ git_repos_path + kolla_path }}"
+    -  "{{ git_repos_path + kolla_ans_path }}"
+
+- name: Run setup.py
+  shell: "python setup.py install"
+  args:
+    chdir: "{{ item }}"
+  with_items:
+    - "{{ git_repos_path + kolla_path }}"
+    -  "{{ git_repos_path + kolla_ans_path }}"
diff --git a/ansible/roles/infra_deploy_openstack/tasks/main.yml b/ansible/roles/infra_deploy_openstack/tasks/main.yml
new file mode 100644 (file)
index 0000000..ba5d5bc
--- /dev/null
@@ -0,0 +1,125 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+# This script is based on https://docs.openstack.org/kolla-ansible/pike/user/quickstart.html
+- name: Include variables
+  include_vars:
+    file: "{{ rs_file }}"
+    name: infra_deploy_vars
+
+- set_fact:
+    traffic_ip: "{{ item.interfaces[1].ip }}"
+  when: item.hostname == ansible_host
+  with_items: "{{ infra_deploy_vars.nodes }}"
+
+- name: Get neutron iface
+  set_fact:
+    neutron_iface: "{{ item }}"
+  when:
+    - hostvars[ansible_host]['ansible_' + item.replace('-', '_')].ipv4 is defined
+    - hostvars[ansible_host]['ansible_' + item.replace('-', '_')].ipv4.address is defined
+    - hostvars[ansible_host]['ansible_' + item.replace('-', '_')].ipv4.address == traffic_ip
+  with_items: "{{ hostvars[ansible_host].ansible_interfaces }}"
+
+- name: Create a registry container
+  docker_container:
+    name: registry
+    image: registry:2
+    restart_policy: always
+    ports:
+    - "4000:5000"
+
+- name: Download and install Kolla
+  include_tasks: install_kolla.yml
+
+- name: Configure Kolla
+  include_tasks: configure_kolla.yml
+
+- name: Configure Open Stack
+  include_tasks: configure_openstack.yml
+
+- name: Ramp up Open Stack
+  include_tasks: rampup_openstack.yml
+
+- name: Update admin-openrc.sh
+  lineinfile:
+    path: /etc/kolla/admin-openrc.sh
+    regexp: "{{ item.find }}"
+    line: "{{ item.add }}"
+  with_items:
+   - { find: 'EXTERNAL_NETWORK', add: 'export EXTERNAL_NETWORK=public' }
+   - { find: 'OS_AUTH_TYPE', add: 'export OS_AUTH_TYPE=password' }
+
+- name: Copy env file
+  shell: cp /etc/kolla/admin-openrc.sh /tmp/admin-openrc.yaml
+
+- name: Rework as env vars
+  replace:
+    path: /tmp/admin-openrc.yaml
+    regexp: 'export\s+(.*)=(.*)'
+    replace: '\1: \2'
+
+- name: Download OpenStack env file
+  fetch:
+    src: /tmp/admin-openrc.yaml
+    dest: /tmp/
+    flat: yes
+
+- include_vars:
+    file: /tmp/admin-openrc.yaml
+    name: ostack_env
+
+- name: Re-assign IP address
+  shell: ip address show {{ neutron_iface }} | awk '/inet/ {print $2}'
+  when: neutron_iface is defined
+  register: ip_netmask
+
+- shell: >
+    ip addr del dev {{ neutron_iface }} {{ ip_netmask.stdout }} &&
+    ip addr add dev br-ex {{ infra_deploy_vars.networks[1].host_ip }}/{{ ip_netmask.stdout_lines[0].split('/')[1] }}
+  when:
+    - neutron_iface is defined
+    - ip_netmask.stdout | length > 0
+
+- name: Create external network
+  os_network:
+    name: public
+    external: yes
+    provider_physical_network: physnet1
+    provider_network_type: flat
+  environment:
+    - no_proxy: "{{ lookup('env', 'no_proxy') + ',' + ansible_host + ',' + hostvars[ansible_host].ansible_default_ipv4.address }}"
+    - "{{ ostack_env }}"
+
+- name: Create sub-network
+  os_subnet:
+    name: public-subnet
+    network_name: public
+    cidr: "{{ ip_netmask.stdout }}"
+    allocation_pool_start: "{{ infra_deploy_vars.networks[1].dhcp_ip_start }}"
+    allocation_pool_end: "{{ infra_deploy_vars.networks[1].dhcp_ip_stop }}"
+    gateway_ip: "{{ infra_deploy_vars.networks[1].host_ip }}"
+    enable_dhcp: no
+  environment:
+    - no_proxy: "{{ lookup('env', 'no_proxy') + ',' + ansible_host + ',' + hostvars[ansible_host].ansible_default_ipv4.address }}"
+    - "{{ ostack_env }}"
+
+- name: Upload OpenStack env file to Yardstick VM
+  copy:
+    src: /etc/kolla/admin-openrc.sh
+    dest: '/tmp/admin-openrc.sh'
+    delegate_to: "{{ item }}"
+  when: "groups['yardstickG'] is defined"
+  with_items:
+    - "{{ groups['yardstickG'] }}"
diff --git a/ansible/roles/infra_deploy_openstack/tasks/rampup_openstack.yml b/ansible/roles/infra_deploy_openstack/tasks/rampup_openstack.yml
new file mode 100644 (file)
index 0000000..c75bec6
--- /dev/null
@@ -0,0 +1,43 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Generate passwords
+  shell: kolla-genpwd
+
+- name: Generate the kolla-build.conf
+  shell: tox -e genconfig
+  args:
+    chdir: "{{ git_repos_path + kolla_path }}"
+
+- set_fact:
+    target: "{{ git_repos_path + 'all-in-one' }}"
+
+- set_fact:
+    target: "{{ git_repos_path + 'multinode' }}"
+  when: "groups['ostack'] | length > 1"
+
+- name: Run kolla-ansible precheck
+  shell: kolla-ansible prechecks -i "{{ target }}"
+
+- name: Build kolla-ansible
+  shell: kolla-build -b ubuntu -t source --profile default --tag pike --registry {{ ansible_host }}:4000 --push
+
+- name: Pull images from registry
+  shell: kolla-ansible pull -i "{{ target }}"
+
+- name: Run kolla-ansible deploy
+  shell: kolla-ansible deploy -i "{{ target }}"
+
+- name: Create an openrc file
+  shell: kolla-ansible post-deploy
diff --git a/ansible/roles/infra_deploy_openstack/templates/multinode.j2 b/ansible/roles/infra_deploy_openstack/templates/multinode.j2
new file mode 100644 (file)
index 0000000..57f87b5
--- /dev/null
@@ -0,0 +1,39 @@
+{% set control_dict = {} %}
+{% set compute_dict = {} %}
+{% for host in groups['ostack'] %}
+{% if  hostvars[host].node_type is defined and hostvars[host].node_type == 'controller' %}
+{% set control_dict = control_dict.update({hostvars[host].ansible_host: hostvars[host].ansible_default_ipv4.interface}) %}
+{% endif %}
+{% endfor %}
+{% for host in groups['ostack'] %}
+{% if  hostvars[host].node_type is defined and hostvars[host].node_type == 'compute' %}
+{% for iface in hostvars[host].ansible_interfaces %}
+{%- if ((hostvars[host]['ansible_' + iface.replace('-', '_')].ipv4 is defined) and
+        (hostvars[host]['ansible_' + iface.replace('-', '_')].ipv4.address is defined) and
+        (hostvars[host]['ansible_' + iface.replace('-', '_')].ipv4.address == hostvars[host].secondary_ip)) -%}
+{% set compute_dict = compute_dict.update({hostvars[host].ansible_host: iface}) %}
+{% endif %}
+{% endfor %}
+{% endif %}
+{% endfor %}
+{% macro print_node(in_dict, iface_str='', cnt=1) %}
+{%- for host, iface in in_dict | dictsort -%}
+{% if loop.index <= cnt %}
+{% if iface_str %}
+{{ host }} ansible_ssh_user={{ hostvars[host].ansible_user }} ansible_private_key_file=/root/.ssh/id_rsa ansible_become=True {{ iface_str }}={{ iface }}
+{% else %}
+{{ host }} ansible_ssh_user={{ hostvars[host].ansible_user }} ansible_private_key_file=/root/.ssh/id_rsa ansible_become=True
+{% endif %}
+{% endif %}
+{% endfor %}
+{% endmacro %}
+[control]
+{{ print_node(control_dict, iface_str='network_interface', cnt=control_dict | length) }}
+[compute]
+{{ print_node(compute_dict, iface_str='network_interface', cnt=compute_dict | length) }}
+[network]
+{{ print_node(control_dict, iface_str='', cnt=control_dict | length) }}
+[monitoring]
+{{ print_node(control_dict) }}
+[storage]
+{{ print_node(control_dict, iface_str='', cnt=control_dict | length) }}
diff --git a/ansible/roles/infra_deploy_openstack/vars/main.yml b/ansible/roles/infra_deploy_openstack/vars/main.yml
new file mode 100644 (file)
index 0000000..bbea568
--- /dev/null
@@ -0,0 +1,18 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+kolla_path: "{{ 'https://git.openstack.org/openstack/kolla' | urlsplit('path') | basename }}"
+kolla_ans_path: "{{ 'https://git.openstack.org/openstack/kolla-ansible' | urlsplit('path') | basename }}"
+deployvm_ip: "{{ hostvars[ansible_host].host_ip }}"
+git_repos_path: '/tmp/repos/'
index 314ee30..5e61633 100644 (file)
 ---
 - name: Destroy old networks created by virt
   virt_net:
-    name: "{{ network_item.name }}"
+    name: "{{ network_item }}"
     command: destroy
-  when: network_item.name in virt_nets.list_nets
+  when: clean_up | bool or network_item in deploy_nets
 
-# Ignoring erros as network can be created without being defined.
+# Ignoring errors as network can be created without being defined.
 # This can happen if a user manually creates a network using the virsh command.
 # If the network is not defined the undefine code will throw an error.
 - name: Undefine old networks defined by virt
   virt_net:
-    name: "{{ network_item.name }}"
+    name: "{{ network_item }}"
     command: undefine
-  when: network_item.name in virt_nets.list_nets
+  when: clean_up | bool or network_item in deploy_nets
   ignore_errors: yes
 
 - name: Check if "ovs-vsctl" command is present
   ignore_errors: yes
 
 - name: Destroy OVS bridge if it exists
-  command: ovs-vsctl --if-exists -- del-br "{{ network_item.name }}"
-  when: ovs_vsctl_present.rc == 0
+  command: ovs-vsctl --if-exists -- del-br "{{ network_item }}"
+  when:
+    - ovs_vsctl_present.rc == 0
+    - clean_up | bool or network_item in deploy_nets
+  ignore_errors: yes
 
 - name: Check if linux bridge is present
-  stat: path="{{ '/sys/class/net/'+network_item.name+'/brif/' }}"
+  stat: path="{{ '/sys/class/net/' + network_item + '/brif/' }}"
   register: check_linux_bridge
 
 - name: Remove linux bridge if it exists
   shell: |
-    ifconfig "{{ network_item.name }}" down
-    brctl delbr "{{ network_item.name }}"
-  when: check_linux_bridge.stat.exists
+    ifconfig "{{ network_item }}" down
+    brctl delbr "{{ network_item }}"
+  when:
+    - check_linux_bridge.stat.exists
+    - clean_up | bool or network_item in deploy_nets
index 5e43ee8..91e9493 100644 (file)
 - name: Destroy old VMs
   virt:
     command: destroy
-    name: "{{ node_item.hostname }}"
-  when: node_item.hostname in virt_vms.list_vms
+    name: "{{ vmhost_item }}"
+  when: clean_up | bool or vmhost_item in deploy_vms
   ignore_errors: yes
 
 # Ignore errors as VM can be running while undefined
 - name: Undefine old VMs
   virt:
     command: undefine
-    name: "{{ node_item.hostname }}"
-  when: node_item.hostname in virt_vms.list_vms
+    name: "{{ vmhost_item }}"
+  when: clean_up | bool or vmhost_item in deploy_vms
   ignore_errors: yes
index e6c2c02..6c4aa33 100644 (file)
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
-- name: Include
+- name: Include input file
   include_vars:
     file: "{{ rs_file }}"
     name: infra_deploy_vars
   virt: command=list_vms
   register: virt_vms
 
+- set_fact:
+    deploy_vms: "{{ deploy_vms | default([]) + [item.hostname] }}"
+  with_items: "{{ infra_deploy_vars.nodes }}"
+
+- name: Define old disk images to delete
+  shell: virsh domblklist {{ item }} | awk '/\/.*/ { print $2 }'
+  when: clean_up | bool or item in deploy_vms
+  with_items: "{{ virt_vms.list_vms }}"
+  register: virt_img
+
+- set_fact:
+    images: "{{ images | default([]) + item.stdout_lines }}"
+  when: item.stdout_lines is defined and item.stdout_lines | length > 0
+  with_items: "{{ virt_img.results }}"
+
 - name: Destroy old VMs
   include_tasks: delete_vm.yml
-  extra_vars: "{{ virt_vms }}"
   loop_control:
-    loop_var: node_item
-  with_items: "{{ infra_deploy_vars.nodes }}"
+    loop_var: vmhost_item
+  with_items: "{{ virt_vms.list_vms }}"
+
+- set_fact:
+    deploy_nets: "{{ deploy_nets | default([]) + [item.name] }}"
+  with_items: "{{ infra_deploy_vars.networks }}"
 
 - name: Delete old networks
   include_tasks: delete_network.yml
-  extra_vars: "{{ virt_nets }}"
   loop_control:
     loop_var: network_item
-  with_items: "{{ infra_deploy_vars.networks }}"
+  with_items: "{{ virt_nets.list_nets }}"
+
+- name: Delete old disk images
+  file:
+    path: "{{ item }}"
+    state: absent
+  when: images is defined and images | length > 0
+  with_items: "{{ images }}"
diff --git a/ansible/roles/infra_prepare_vms/tasks/main.yml b/ansible/roles/infra_prepare_vms/tasks/main.yml
new file mode 100644 (file)
index 0000000..d7ed085
--- /dev/null
@@ -0,0 +1,105 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Include input file
+  include_vars:
+    file: "{{ rs_file }}"
+    name: infra_deploy_vars
+
+- name: Install setuptools
+  apt:
+    name: python-setuptools
+
+- name: Install pip
+  shell: easy_install pip
+  environment: "{{ proxy_env }}"
+
+- name: Install dependency for dns dig
+  pip:
+    name: dnspython
+    state: latest
+
+- set_fact:
+    block_str: "{{ block_str | default('') + item.interfaces[0].ip + ' ' + item.hostname + '\n'}}"
+  with_items: "{{ infra_deploy_vars.nodes }}"
+
+- name: Delete hosts between markers
+  blockinfile:
+    path: /etc/hosts
+    marker: "# {mark} generated hosts file"
+    content: ""
+
+- name: Update /etc/hosts
+  blockinfile:
+    path: /etc/hosts
+    block: |
+      {{ block_str }}
+    marker: "# {mark} generated hosts file"
+
+- name: Clear known hosts
+  shell: >
+    ssh-keygen -f /root/.ssh/known_hosts -R "{{ item.interfaces[0].ip }}";
+    ssh-keygen -f /root/.ssh/known_hosts -R "{{ item.hostname }}"
+  with_items: "{{ infra_deploy_vars.nodes }}"
+
+- set_fact:
+    controllers: "{{ controllers | default([]) + [item.hostname] }}"
+  when:
+    - item.openstack_node is defined
+    - item.openstack_node == 'controller'
+  with_items: "{{ infra_deploy_vars.nodes }}"
+
+- name: Add host controller as deploy
+  add_host:
+    hostname: "{{ item.hostname }}"
+    host_ip: "{{ item.interfaces[0].ip }}"
+    groups: deploy, ostack
+    ansible_host: "{{ item.hostname }}"
+    ansible_user: "{{ item.user }}"
+    ansible_ssh_pass: "{{ item.password }}"
+    node_type: "{{ item.openstack_node }}"
+    secondary_ip: "{{ item.interfaces[1].ip }}"
+  when: item.hostname == controllers[0]
+  with_items: "{{ infra_deploy_vars.nodes }}"
+
+- name: Add hosts others as controller, compute
+  add_host:
+    hostname: "{{ item.hostname }}"
+    host_ip: "{{ item.interfaces[0].ip }}"
+    groups: regular,ostack
+    ansible_host: "{{ item.hostname }}"
+    ansible_user: "{{ item.user }}"
+    ansible_ssh_pass: "{{ item.password }}"
+    node_type: "{{ item.openstack_node }}"
+    secondary_ip: "{{ item.interfaces[1].ip }}"
+  when:
+    - item.openstack_node is defined
+    - item.openstack_node == 'controller' or item.openstack_node == 'compute'
+    - item.hostname != controllers[0]
+  with_items: "{{ infra_deploy_vars.nodes }}"
+
+- name: Add yardstick host to group
+  add_host:
+    hostname: "{{ item.hostname }}"
+    host_ip: "{{ item.interfaces[0].ip }}"
+    groups: yardstickG
+    ansible_host: "{{ item.hostname }}"
+    ansible_user: "{{ item.user }}"
+    ansible_ssh_pass: "{{ item.password }}"
+    secondary_ip: "{{ item.interfaces[1].ip }}"
+  when: item.hostname == 'yardstickvm'
+  with_items: "{{ infra_deploy_vars.nodes }}"
+
+- name: Workaround, not all VMs are ready by that time
+  pause: seconds=20
diff --git a/ansible/roles/infra_rampup_stack_nodes/tasks/configure_docker.yml b/ansible/roles/infra_rampup_stack_nodes/tasks/configure_docker.yml
new file mode 100644 (file)
index 0000000..a6ae00e
--- /dev/null
@@ -0,0 +1,48 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- file:
+    path: /lib/systemd/system/docker.service.d
+    state: directory
+
+- copy:
+    content: |
+      [Service]
+      MountFlags=shared
+    dest: /lib/systemd/system/docker.service.d/kolla.conf
+
+- set_fact:
+    ostack_hosts: "{{ ostack_hosts | default([]) + [hostvars[item].ansible_host] }}"
+  with_items: "{{ groups['ostack'] }}"
+
+- name: Create proxy configuration for docker
+  copy:
+    content: |
+      [Service]
+      Environment="HTTP_PROXY={{ lookup('env', 'http_proxy') }}"
+      Environment="HTTPS_PROXY={{ lookup('env', 'https_proxy') }}"
+      Environment="FTP_PROXY={{ lookup('env', 'ftp_proxy') }}"
+      Environment="NO_PROXY={{ lookup('env', 'no_proxy') }},{{ hostvars[ansible_host].ansible_default_ipv4.address }},{{ ostack_hosts | join(',') }}"
+    dest: /lib/systemd/system/docker.service.d/http-proxy.conf
+
+- name: Update /etc/default/docker
+  lineinfile:
+    path: /etc/default/docker
+    line: 'DOCKER_OPTS="--dns {{ hostvars[ansible_host].ansible_default_ipv4.gateway }} --insecure-registry {{ deploy_host }}:4000"'
+
+- name: reload restart docker
+  systemd:
+    state: restarted
+    daemon_reload: yes
+    name: docker
diff --git a/ansible/roles/infra_rampup_stack_nodes/tasks/install_packets.yml b/ansible/roles/infra_rampup_stack_nodes/tasks/install_packets.yml
new file mode 100644 (file)
index 0000000..d22e815
--- /dev/null
@@ -0,0 +1,85 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Update apt cache
+  apt:
+    update_cache: yes
+    upgrade: yes
+    cache_valid_time: 36000
+  environment: "{{ proxy_env }}"
+
+- name: Install packets
+  apt:
+    name: "{{ item }}"
+  with_items:
+    - python-tox
+    - python-dev
+    - libffi-dev
+    - libssl-dev
+    - python3-dev
+    - ethtool
+    - ipmitool
+    - git
+    - ntp
+    - apparmor-utils
+    - docker.io
+    - libvirt-bin
+    - python-setuptools
+    - build-essential
+  environment: "{{ proxy_env }}"
+
+- name: Install pip
+  shell: easy_install pip
+  environment: "{{ proxy_env }}"
+
+- name: Update pip ansible docker
+  pip:
+    name: "{{ item }}"
+    state: latest
+  with_items:
+    - ansible
+    - docker
+    - tox
+    - shade
+  environment: "{{ proxy_env }}"
+
+- name: Remove conflicting packages
+  apt:
+    name: "{{ item }}"
+    state: absent
+  with_items:
+    - lxd
+    - lxc
+
+- name: Stop and disable libvirt
+  systemd:
+    state: stopped
+    enabled: no
+    name: libvirt-bin.service
+
+- name: Stop and disable apparmor service
+  systemd:
+    name: apparmor
+    state: stopped
+    enabled: no
+
+- name: Get stat of libvirtd apparmor profile
+  stat:
+    path: /etc/apparmor.d/disable/usr.sbin.libvirtd
+  register: apparmor_libvirtd_profile
+
+- name: Remove apparmor profile for libvirt
+  shell: ln -s /etc/apparmor.d/usr.sbin.libvirtd /etc/apparmor.d/disable/ && apparmor_parser -R /etc/apparmor.d/usr.sbin.libvirtd
+  when:
+    - apparmor_libvirtd_profile.stat.exists == False
diff --git a/ansible/roles/infra_rampup_stack_nodes/tasks/main.yml b/ansible/roles/infra_rampup_stack_nodes/tasks/main.yml
new file mode 100644 (file)
index 0000000..65d5e59
--- /dev/null
@@ -0,0 +1,39 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+# Configure proxy and install python to support ansible
+- name: Create apt.conf proxy config
+  raw: >
+    echo 'Acquire::http::proxy "{{ hostvars[groups['jumphost'][0]].proxy_proto + '://' + hostvars[groups['jumphost'][0]].proxy_host_ip + ':' + hostvars[groups['jumphost'][0]].proxy_port }}";'
+    > /etc/apt/apt.conf.d/22proxy
+
+- name: Install python which is required to run ansible mudules
+  raw: apt-get update && apt-get install -y python
+
+- name: Gather facts
+  setup:
+
+- name: Update configuration files
+  include_tasks: update_conf_files.yml
+
+- name: Install packets
+  include_tasks: install_packets.yml
+  when: ansible_hostname in groups['ostack']
+
+- name: Configure docker settings
+  include_tasks: configure_docker.yml
+  when: ansible_hostname in groups['ostack']
+
+- name: generate and apply SSH keys
+  include_tasks: update_keys.yml
diff --git a/ansible/roles/infra_rampup_stack_nodes/tasks/update_conf_files.yml b/ansible/roles/infra_rampup_stack_nodes/tasks/update_conf_files.yml
new file mode 100644 (file)
index 0000000..424fb54
--- /dev/null
@@ -0,0 +1,69 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Set hostname
+  shell: hostname {{ ansible_hostname }}
+
+- name: Delete hosts between markers
+  blockinfile:
+    path: /etc/hosts
+    marker: "# {mark} generated hosts file"
+    content: ""
+
+- set_fact:
+    block_str: "{{ block_str | default('') + hostvars[item].host_ip + ' ' + hostvars[item].ansible_host + '\n'}}"
+  with_items: "{{ groups['ostack'] }}"
+
+- name: Update /etc/hosts
+  blockinfile:
+    path: /etc/hosts
+    block: |
+      {{ block_str }}
+    marker: "# {mark} generated hosts file"
+
+- name: Update /etc/hosts
+  lineinfile:
+    path: /etc/hosts
+    regexp: ".*{{ hostvars[groups['jumphost'][0]].proxy_host }}.*"
+    line: "{{ hostvars[groups['jumphost'][0]].proxy_host_ip }} {{ hostvars[groups['jumphost'][0]].proxy_host }}"
+
+- name: Turn off IPv6
+  lineinfile:
+    path: /etc/sysctl.conf
+    regexp: '^{{ item }}.*'
+    line: "{{ item }} = 1"
+  with_items:
+    - 'net.ipv6.conf.all.disable_ipv6'
+    - 'net.ipv6.conf.default.disable_ipv6'
+    - 'net.ipv6.conf.lo.disable_ipv6'
+
+- name: Update IP configuration
+  shell: sysctl -p
+
+- name: Update resolv.conf
+  shell: echo "{{ 'nameserver ' + hostvars[ansible_host].ansible_default_ipv4.gateway }}" > /etc/resolvconf/resolv.conf.d/base
+
+- name: Update name servers
+  shell: resolvconf -u
+
+- name: Update /etc/environment
+  lineinfile:
+    path: /etc/environment
+    regexp: "{{ item.find }}"
+    line: "{{ item.add }}"
+  with_items:
+   - { find: 'http_proxy=', add: "{{ 'export http_proxy=' + lookup('env', 'http_proxy') }}" }
+   - { find: 'https_proxy=', add: "{{ 'export https_proxy=' + lookup('env', 'https_proxy') }}" }
+   - { find: 'ftp_proxy=', add: "{{ 'export ftp_proxy=' + lookup('env', 'ftp_proxy') }}" }
+   - { find: 'no_proxy=', add: "{{ 'export no_proxy=' + lookup('env', 'no_proxy') + ',' + ansible_host + ',' + hostvars[ansible_host].ansible_default_ipv4.address }}" }
diff --git a/ansible/roles/infra_rampup_stack_nodes/tasks/update_keys.yml b/ansible/roles/infra_rampup_stack_nodes/tasks/update_keys.yml
new file mode 100644 (file)
index 0000000..816f7cb
--- /dev/null
@@ -0,0 +1,48 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Generate keys
+  user:
+    name: "{{ hostvars[ansible_host].ansible_user }}"
+    generate_ssh_key: yes
+    state: present
+    ssh_key_file: "/root/.ssh/id_rsa"
+
+- name: Get remote files
+  fetch:
+    src: "/root/.ssh/id_rsa.pub"
+    dest: "/tmp"
+
+- name: Update authorized_key
+  authorized_key:
+    key: "{{ lookup('file', '/tmp/{{ hostvars[item].ansible_host }}/root/.ssh/id_rsa.pub') }}"
+    state: present
+    user: "{{ hostvars[item].ansible_user }}"
+  with_items:
+    - "{{ groups['ostack'] }}"
+    - "{{ groups['yardstickG'] }}"
+
+- name: Make sure the known hosts file exists
+  file:
+    path: "{{ ssh_known_hosts_file }}"
+    state: touch
+
+- name: Add key to known hosts
+  known_hosts:
+    name: "{{ hostvars[item].ansible_host }}"
+    key: "{{ lookup('pipe', 'ssh-keyscan -t rsa {{ hostvars[item].ansible_host }}') }}"
+    path: "{{ ssh_known_hosts_file }}"
+  with_items:
+    - "{{ groups['ostack'] }}"
+    - "{{ groups['yardstickG'] }}"
diff --git a/ansible/roles/infra_rampup_stack_nodes/vars/main.yml b/ansible/roles/infra_rampup_stack_nodes/vars/main.yml
new file mode 100644 (file)
index 0000000..252eb86
--- /dev/null
@@ -0,0 +1,16 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+ssh_known_hosts_file: "/root/.ssh/known_hosts"
+deploy_host: "{{ hostvars[groups['deploy'][0]].ansible_host }}"
index 2235fe1..ed5ab27 100644 (file)
@@ -15,6 +15,7 @@
 civetweb_dest: "{{ clone_dest }}/civetweb"
 civetweb_build_dependencies:
   Debian:
+    - libjson-c-dev=0.11-4ubuntu2
     - libjson0
     - libjson0-dev
     - libssl-dev
diff --git a/ansible/roles/install_dependencies_jumphost/tasks/Debian.yml b/ansible/roles/install_dependencies_jumphost/tasks/Debian.yml
new file mode 100755 (executable)
index 0000000..9baf7e5
--- /dev/null
@@ -0,0 +1,76 @@
+# Copyright (c) 2017 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Update repositories
+  apt:
+    update_cache: yes
+
+- name: Install core packages
+  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+  with_items:
+    - wget
+    - curl
+    - screen
+    - procps
+    - socat
+    - sshpass
+    - sudo
+    - vim
+    - libffi-dev
+    - libfuse-dev
+    - libssl-dev
+    - libxft-dev
+    - libxml2-dev
+    - libxss-dev
+    - libxslt-dev
+    - libxslt1-dev
+    - libzmq-dev
+    - qemu-user-static
+    - qemu-utils
+    - kpartx
+    - python
+    - python-setuptools
+    - python-dev
+    - python-pip
+    - python-libvirt
+    - python-virtualenv
+    - bridge-utils
+    - ebtables
+    - openssl
+    - ccze
+    - nginx-full
+    - uwsgi
+    - uwsgi-plugin-python
+    - supervisor
+    - lsof
+    - nodejs
+    - npm
+    - rabbitmq-server
+
+- name: Install libc6:arm64 package
+  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
+  with_items:
+    - libc6:arm64
+  when:
+    - arch is defined
+    - arch != arch_arm64
+    - installation_mode == inst_mode_container
+
+- name: Remove dependencies that are no longer required
+  apt:
+    update_cache: yes
+
+- name: Remove useless packages from the cache
+  apt:
+    autoclean: yes
     - python-setuptools
     - libffi-devel
     - python-devel
-    - kpartx
-
+    - nodejs
+    - npm
+    - gcc
+    - lsof
+    - procps
+    - bridge-utils
+    - ebtables
+    - openssl
+    - python-virtualenv
+    - ccze
old mode 100755 (executable)
new mode 100644 (file)
similarity index 70%
rename from ansible/roles/install_dependencies/tasks/Debian.yml
rename to ansible/roles/install_dependencies_jumphost/tasks/Suse.yml
index bba6fb1..af53c9c
@@ -1,4 +1,4 @@
-# Copyright (c) 2017 Intel Corporation.
+# Copyright (c) 2018 Intel Corporation.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
+- name: Install EPEL if needed
+  action: "{{ ansible_pkg_mgr }} name=epel-release state=present"
+  when: ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux']
+
 - name: Install core packages
   action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
   with_items:
-    - python-minimal
+    - deltarpm
     - wget
+    - expect
     - curl
     - screen
-    - procps
     - git
     - socat
     - sshpass
-    - libxslt1-dev
-    - libffi-dev
-    - libfuse-dev
     - qemu-kvm
-    - qemu-user-static
-    - qemu-utils
     - kpartx
-    - libvirt0
-    - python-libvirt
+    - libxslt-devel
+    - libffi-devel
+    - openssl-devel
+    - nginx
+    - uwsgi
+    - python-setuptools
+    - libffi-devel
+    - python-devel
+    - nodejs
+    - npm
+    - gcc
+    - lsof
+    - procps
     - bridge-utils
     - ebtables
     - openssl
-    - libssl-dev
-    - python-dev
     - python-virtualenv
     - ccze
-    - libxml2-dev
-    - libxslt-dev
-    - libzmq-dev
-    - nginx-full
-    - uwsgi
-    - uwsgi-plugin-python
-    - supervisor
-    - python-setuptools
-    - lsof
index e82ad83..5bcfb50 100644 (file)
 
 - name: copy dpdk-devbind.py to correct location
   copy:
-    src: "{{ dpdk_devbind[dpdk_version] }}"
+    src: "{{ dpdk_devbind_usertools if dpdk_version|float >= 17.02 else dpdk_devbind_tools }}"
     dest: "{{ INSTALL_BIN_PATH }}/dpdk-devbind.py"
     remote_src: yes
     force: yes
index 45bcc33..957f47e 100644 (file)
@@ -1,9 +1,10 @@
 ---
-dpdk_make_arch: x86_64-native-linuxapp-gcc
+dpdk_make_archs:
+ "amd64": "x86_64-native-linuxapp-gcc"
+ "arm64": "arm64-native-linuxapp-gcc"
+dpdk_make_arch: "{{ dpdk_make_archs[YARD_IMG_ARCH] }}"
 dpdk_module_dir: "/lib/modules/{{ dpdk_kernel }}/extra"
 hugetable_mount: /mnt/huge
-dpdk_devbind:
-  "16.07": "{{ dpdk_path }}/tools/dpdk-devbind.py"
-  "17.02": "{{ dpdk_path }}/usertools/dpdk-devbind.py"
-  "17.04": "{{ dpdk_path }}/usertools/dpdk-devbind.py"
+dpdk_devbind_tools: "{{ dpdk_path }}/tools/dpdk-devbind.py"
+dpdk_devbind_usertools: "{{ dpdk_path }}/usertools/dpdk-devbind.py"
 dpdk_pmd_path: /usr/lib/dpdk-pmd/
index c77e4f9..641d8f9 100755 (executable)
@@ -16,6 +16,7 @@
   action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
   with_items:
     - libpcap-dev
+    - libnuma-dev
 
 - name: Install kernel headers
   action: "{{ ansible_pkg_mgr }} name=linux-headers-{{ dpdk_kernel }} state=present"
index 65954be..94b9215 100644 (file)
   set_fact:
     RTE_KERNELDIR: "/lib/modules/{{ dpdk_kernel }}/build"
 
+# make clean must be run here as DPDK-shared is a copy of the DPDK directory
+# which has already run the make command
+# no T= target for clean
+- command: make -j {{ ansible_processor_vcpus }} clean O={{ dpdk_make_arch }}
+  args:
+    chdir: "{{ dpdk_shared_path }}"
+  environment:
+    RTE_KERNELDIR: "{{ RTE_KERNELDIR }}"
+
 - command: make -j {{ ansible_processor_vcpus }} config  T={{ dpdk_make_arch }} O={{ dpdk_make_arch }}
   args:
     chdir: "{{ dpdk_shared_path }}"
     regexp: '^CONFIG_RTE_EAL_PMD_PATH=""'
     line: 'CONFIG_RTE_EAL_PMD_PATH="{{ dpdk_pmd_path }}"'
 
-  # no T= target for clean
-- command: make -j {{ ansible_processor_vcpus }} clean O={{ dpdk_make_arch }}
-  args:
-    chdir: "{{ dpdk_shared_path }}"
-  environment:
-    RTE_KERNELDIR: "{{ RTE_KERNELDIR }}"
-
 # TODO: disable ASLR
 
 - command: make -j {{ ansible_processor_vcpus }}
index 45bcc33..b663ced 100644 (file)
@@ -1,9 +1,8 @@
 ---
-dpdk_make_arch: x86_64-native-linuxapp-gcc
+dpdk_make_archs:
+ "amd64": "x86_64-native-linuxapp-gcc"
+ "arm64": "arm64-native-linuxapp-gcc"
+dpdk_make_arch: "{{ dpdk_make_archs[YARD_IMG_ARCH] }}"
 dpdk_module_dir: "/lib/modules/{{ dpdk_kernel }}/extra"
 hugetable_mount: /mnt/huge
-dpdk_devbind:
-  "16.07": "{{ dpdk_path }}/tools/dpdk-devbind.py"
-  "17.02": "{{ dpdk_path }}/usertools/dpdk-devbind.py"
-  "17.04": "{{ dpdk_path }}/usertools/dpdk-devbind.py"
 dpdk_pmd_path: /usr/lib/dpdk-pmd/
index f0b5321..7a3f5fa 100644 (file)
@@ -28,6 +28,7 @@ install_dependencies:
     # for IxLoad
     - libxft-dev
     - libxss-dev
+    - expect
   RedHat:
     - bc
     - fio
diff --git a/ansible/roles/install_pktgen/tasks/main.yml b/ansible/roles/install_pktgen/tasks/main.yml
new file mode 100644 (file)
index 0000000..294c779
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: set build env vars
+  set_fact:
+      build_env_vars:
+        RTE_SDK: "{{ RTE_SDK }}"
+        RTE_TARGET: "{{ RTE_TARGET }}"
+
+- name: "make pktgen"
+  command: make
+  args:
+    chdir: "{{ pktgen_path }}"
+  environment: "{{ build_env_vars }}"
diff --git a/ansible/roles/install_yardstick/tasks/main.yml b/ansible/roles/install_yardstick/tasks/main.yml
new file mode 100644 (file)
index 0000000..ee1b837
--- /dev/null
@@ -0,0 +1,46 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+# Arguments needed: map_min_addr_file, yardstick_dir
+
+- name: Define variables
+  set_fact:
+    map_min_addr_file: "/etc/sysctl.d/mmap_min_addr.conf"
+
+- name: Remove the kernel minimum virtual address restriction that a process is allowed to mmap
+  copy:
+    dest: "{{ map_min_addr_file }}"
+    content: "vm.mmap_min_addr = 0\n"
+
+- name: Config git SSL
+  git_config:
+    name: http.sslVerify
+    scope: global
+    value: False
+
+# There is a bug with the easy install ansible module in suse linux.
+# Until this is fixed the shell command must be used
+- name: Install pip
+  shell: easy_install -U pip
+#    easy_install:
+#    name: pip
+#    state: latest
+
+- name: install yardstick without virtual environment
+  include_tasks: regular_install.yml
+  when: virtual_environment == False
+
+- name: install yardstick with virtual environment
+  include_tasks: virtual_install.yml
+  when: virtual_environment == True
diff --git a/ansible/roles/install_yardstick/tasks/regular_install.yml b/ansible/roles/install_yardstick/tasks/regular_install.yml
new file mode 100644 (file)
index 0000000..4a9925a
--- /dev/null
@@ -0,0 +1,22 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Install Yardstick requirements
+  pip:
+    requirements: "{{ yardstick_dir }}/requirements.txt"
+
+- name: Install Yardstick code
+  pip:
+    name: "{{ yardstick_dir }}/."
+    extra_args: -e
diff --git a/ansible/roles/install_yardstick/tasks/virtual_install.yml b/ansible/roles/install_yardstick/tasks/virtual_install.yml
new file mode 100644 (file)
index 0000000..8545acb
--- /dev/null
@@ -0,0 +1,25 @@
+# Copyright (c) 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Install Yardstick requirements
+  pip:
+    requirements: "{{ yardstick_dir }}/requirements.txt"
+    virtualenv: "{{ yardstick_dir }}/virtualenv"
+
+- name: Install Yardstick code
+  pip:
+    name: "{{ yardstick_dir }}/."
+    extra_args: -e
+    virtualenv: "{{ yardstick_dir }}/virtualenv"
+
index 5166765..b54ea9b 100644 (file)
       user: ""
       password: ""
       key_filename: ~
-      tg_config: 
+      tg_config:
         ixchassis: "1.1.1.127" #ixia chassis ip
         tcl_port: "8009" # tcl server port
         lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
         root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
         py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
-        py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
         dut_result_dir: "/mnt/results"
         version: "8.01.106.3"
       pcis:
index ff66537..cae3734 100644 (file)
@@ -60,7 +60,6 @@
         lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
         root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
         py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
-        py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
         dut_result_dir: "/mnt/results"
         version: "8.01.106.3"
       pcis:
index 45a4a49..0e3a0af 100644 (file)
       user: ""
       password: ""
       key_filename: ~
-      tg_config: 
+      tg_config:
         ixchassis: "1.1.1.127" #ixia chassis ip
         tcl_port: "8009" # tcl server port
         lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
         root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
         py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
-        py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
         dut_result_dir: "/mnt/results"
         version: "8.01.106.3"
       pcis:
index 659dbef..8fb09d9 100644 (file)
@@ -49,7 +49,6 @@
         lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
         root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
         py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
-        py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
         dut_result_dir: "/mnt/results"
         version: "8.01.106.3"
       pcis:
index 85afa2a..7aa6c8c 100644 (file)
@@ -40,6 +40,8 @@
     - install_dpdk
     - download_trex
     - install_trex
+    - download_pktgen
+    - install_pktgen
     - download_civetweb
     - install_civetweb
     - download_samplevnfs
index 75c981a..6c9eb83 100644 (file)
@@ -10,12 +10,16 @@ from __future__ import absolute_import
 
 import errno
 import logging
+
+import ipaddress
 import os
 import subprocess
 import threading
 import time
 import uuid
 import glob
+
+import six
 import yaml
 import collections
 
@@ -269,6 +273,8 @@ class V1Env(ApiResource):
                     LOG.info('Openrc file not found')
                     installer_ip = os.environ.get('INSTALLER_IP',
                                                   '192.168.200.2')
+                    # validate installer_ip is a valid ipaddress
+                    installer_ip = str(ipaddress.IPv4Address(six.u(installer_ip)))
                     installer_type = os.environ.get('INSTALLER_TYPE', 'compass')
                     LOG.info('Getting openrc file from %s', installer_type)
                     self._get_remote_rc_file(rc_file,
index 5f72c2e..3e14670 100644 (file)
@@ -20,6 +20,7 @@ from yardstick.common.utils import result_handler
 from yardstick.benchmark.core import Param
 from yardstick.benchmark.core.task import Task
 from api.swagger import models
+from api.database.v1.handlers import TasksHandler
 
 LOG = logging.getLogger(__name__)
 LOG.setLevel(logging.DEBUG)
@@ -58,7 +59,7 @@ class V1Testsuite(ApiResource):
         task_args.update(args.get('opts', {}))
 
         param = Param(task_args)
-        task_thread = TaskThread(Task().start, param)
+        task_thread = TaskThread(Task().start, param, TasksHandler())
         task_thread.start()
 
         return result_handler(consts.API_SUCCESS, {'task_id': task_id})
index 0c36a0a..c3e5ee7 100644 (file)
@@ -18,8 +18,7 @@ from api.database.v2.handlers import V2ImageHandler
 from api.database.v2.handlers import V2EnvironmentHandler
 from yardstick.common.utils import result_handler
 from yardstick.common.utils import source_env
-from yardstick.common.utils import change_obj_to_dict
-from yardstick.common.openstack_utils import get_nova_client
+from yardstick.common import openstack_utils
 from yardstick.common.openstack_utils import get_glance_client
 from yardstick.common import constants as consts
 
@@ -47,39 +46,21 @@ class V2Images(ApiResource):
     def get(self):
         try:
             source_env(consts.OPENRC)
-        except Exception:
+        except OSError:
             return result_handler(consts.API_ERROR, 'source openrc error')
 
-        nova_client = get_nova_client()
-        try:
-            images_list = nova_client.images.list()
-        except Exception:
+        image_list = openstack_utils.list_images()
+
+        if image_list is False:
             return result_handler(consts.API_ERROR, 'get images error')
-        else:
-            images = {i.name: self.get_info(change_obj_to_dict(i)) for i in images_list}
+
+        images = {i.name: format_image_info(i) for i in image_list}
 
         return result_handler(consts.API_SUCCESS, {'status': 1, 'images': images})
 
     def post(self):
         return self._dispatch_post()
 
-    def get_info(self, data):
-        try:
-            size = data['OS-EXT-IMG-SIZE:size']
-        except KeyError:
-            size = None
-        else:
-            size = float(size) / 1024 / 1024
-
-        result = {
-            'name': data.get('name', ''),
-            'discription': data.get('description', ''),
-            'size': size,
-            'status': data.get('status'),
-            'time': data.get('updated')
-        }
-        return result
-
     def load_image(self, args):
         try:
             image_name = args['name']
@@ -268,7 +249,7 @@ class V2Images(ApiResource):
         r = requests.head(url)
         try:
             file_size = int(r.headers['content-length'])
-        except Exception:
+        except (TypeError, ValueError):
             return
 
         with open(path, 'wb') as f:
@@ -303,14 +284,13 @@ class V2Image(ApiResource):
         except ValueError:
             return result_handler(consts.API_ERROR, 'no such image id')
 
-        nova_client = get_nova_client()
-        images = nova_client.images.list()
+        images = openstack_utils.list_images()
         try:
             image = next((i for i in images if i.name == image.name))
         except StopIteration:
             pass
 
-        return_image = self.get_info(change_obj_to_dict(image))
+        return_image = format_image_info(image)
         return_image['id'] = image_id
 
         return result_handler(consts.API_SUCCESS, {'image': return_image})
@@ -349,19 +329,16 @@ class V2Image(ApiResource):
 
         return result_handler(consts.API_SUCCESS, {'image': image_id})
 
-    def get_info(self, data):
-        try:
-            size = data['OS-EXT-IMG-SIZE:size']
-        except KeyError:
-            size = None
-        else:
-            size = float(size) / 1024 / 1024
-
-        result = {
-            'name': data.get('name', ''),
-            'description': data.get('description', ''),
-            'size': size,
-            'status': data.get('status'),
-            'time': data.get('updated')
-        }
-        return result
+
+def format_image_info(image):
+    image_dict = {}
+
+    if image is None:
+        return image_dict
+
+    image_dict['name'] = image.name
+    image_dict['size'] = float(image.size) / 1024 / 1024
+    image_dict['status'] = image.status.upper()
+    image_dict['time'] = image.updated_at
+
+    return image_dict
diff --git a/dashboard/opnfv_yardstick_tc056.json b/dashboard/opnfv_yardstick_tc056.json
new file mode 100644 (file)
index 0000000..5d7f2b0
--- /dev/null
@@ -0,0 +1,301 @@
+{
+  "__inputs": [
+    {
+      "name": "DS_YARDSTICK",
+      "label": "yardstick",
+      "description": "",
+      "type": "datasource",
+      "pluginId": "influxdb",
+      "pluginName": "InfluxDB"
+    }
+  ],
+  "__requires": [
+    {
+      "type": "grafana",
+      "id": "grafana",
+      "name": "Grafana",
+      "version": "4.4.3"
+    },
+    {
+      "type": "panel",
+      "id": "graph",
+      "name": "Graph",
+      "version": ""
+    },
+    {
+      "type": "datasource",
+      "id": "influxdb",
+      "name": "InfluxDB",
+      "version": "1.0.0"
+    },
+    {
+      "type": "panel",
+      "id": "singlestat",
+      "name": "Singlestat",
+      "version": ""
+    }
+  ],
+  "annotations": {
+    "list": []
+  },
+  "editable": true,
+  "gnetId": null,
+  "graphTooltip": 0,
+  "hideControls": false,
+  "id": null,
+  "links": [],
+  "refresh": "1m",
+  "rows": [
+    {
+      "collapse": false,
+      "height": 340,
+      "panels": [
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "yardstick",
+          "description": "",
+          "fill": 1,
+          "id": 1,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": true,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "span": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "alias": "",
+              "dsType": "influxdb",
+              "groupBy": [],
+              "measurement": "opnfv_yardstick_tc056",
+              "orderByTime": "ASC",
+              "policy": "default",
+              "query": "SELECT \"openstack-user-list_outage_time\" FROM \"opnfv_yardstick_tc056\" WHERE $timeFilter",
+              "rawQuery": false,
+              "refId": "A",
+              "resultFormat": "time_series",
+              "select": [
+                [
+                  {
+                    "params": [
+                      "openstack-volume-list_outage_time"
+                    ],
+                    "type": "field"
+                  }
+                ]
+              ],
+              "tags": []
+            }
+          ],
+          "thresholds": [
+            {
+              "colorMode": "critical",
+              "fill": true,
+              "line": true,
+              "op": "gt",
+              "value": 5
+            },
+            {
+              "colorMode": "ok",
+              "fill": true,
+              "line": true,
+              "op": "lt",
+              "value": 5
+            }
+          ],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "command monitor (outage time)",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "s",
+              "label": "",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            }
+          ]
+        },
+        {
+          "cacheTimeout": null,
+          "colorBackground": true,
+          "colorValue": false,
+          "colors": [
+            "rgba(245, 54, 54, 0.9)",
+            "rgba(237, 129, 40, 0.89)",
+            "rgba(50, 172, 45, 0.97)"
+          ],
+          "datasource": "yardstick",
+          "format": "none",
+          "gauge": {
+            "maxValue": 100,
+            "minValue": 0,
+            "show": false,
+            "thresholdLabels": false,
+            "thresholdMarkers": true
+          },
+          "id": 4,
+          "interval": null,
+          "links": [],
+          "mappingType": 1,
+          "mappingTypes": [
+            {
+              "name": "value to text",
+              "value": 1
+            },
+            {
+              "name": "range to text",
+              "value": 2
+            }
+          ],
+          "maxDataPoints": 100,
+          "nullPointMode": "connected",
+          "nullText": null,
+          "postfix": "",
+          "postfixFontSize": "50%",
+          "prefix": "",
+          "prefixFontSize": "50%",
+          "rangeMaps": [
+            {
+              "from": "null",
+              "text": "N/A",
+              "to": "null"
+            }
+          ],
+          "span": 2,
+          "sparkline": {
+            "fillColor": "rgba(31, 118, 189, 0.18)",
+            "full": false,
+            "lineColor": "rgb(31, 120, 193)",
+            "show": false
+          },
+          "tableColumn": "",
+          "targets": [
+            {
+              "alias": "",
+              "dsType": "influxdb",
+              "groupBy": [],
+              "measurement": "opnfv_yardstick_tc056",
+              "orderByTime": "ASC",
+              "policy": "default",
+              "query": "SELECT \"sla_pass\" FROM \"opnfv_yardstick_tc056\" WHERE $timeFilter",
+              "rawQuery": false,
+              "refId": "A",
+              "resultFormat": "time_series",
+              "select": [
+                [
+                  {
+                    "params": [
+                      "sla_pass"
+                    ],
+                    "type": "field"
+                  }
+                ]
+              ],
+              "tags": []
+            }
+          ],
+          "thresholds": "0.5,1",
+          "title": "SLA PASS/FAIL",
+          "type": "singlestat",
+          "valueFontSize": "80%",
+          "valueMaps": [
+            {
+              "op": "=",
+              "text": "N/A",
+              "value": "null"
+            }
+          ],
+          "valueName": "avg"
+        }
+      ],
+      "repeat": null,
+      "repeatIteration": null,
+      "repeatRowId": null,
+      "showTitle": false,
+      "title": "Dashboard Row",
+      "titleSize": "h6"
+    }
+  ],
+  "schemaVersion": 14,
+  "style": "dark",
+  "tags": [
+    "HA"
+  ],
+  "templating": {
+    "list": []
+  },
+  "time": {
+    "from": "now/d",
+    "to": "now/d"
+  },
+  "timepicker": {
+    "refresh_intervals": [
+      "5s",
+      "10s",
+      "30s",
+      "1m",
+      "5m",
+      "15m",
+      "30m",
+      "1h",
+      "2h",
+      "1d"
+    ],
+    "time_options": [
+      "5m",
+      "15m",
+      "1h",
+      "6h",
+      "12h",
+      "24h",
+      "2d",
+      "7d",
+      "30d"
+    ]
+  },
+  "timezone": "",
+  "title": "opnfv_yardstick_tc056",
+  "version": 2
+}
diff --git a/dashboard/opnfv_yardstick_tc058.json b/dashboard/opnfv_yardstick_tc058.json
new file mode 100644 (file)
index 0000000..55b5a5f
--- /dev/null
@@ -0,0 +1,265 @@
+{
+  "annotations": {
+    "list": []
+  },
+  "editable": true,
+  "gnetId": null,
+  "graphTooltip": 0,
+  "hideControls": false,
+  "id": 33,
+  "links": [],
+  "refresh": "1m",
+  "rows": [
+    {
+      "collapse": false,
+      "height": 343,
+      "panels": [
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": "yardstick",
+          "description": "",
+          "fill": 1,
+          "id": 1,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": true,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "links": [],
+          "nullPointMode": "null",
+          "percentage": false,
+          "pointradius": 5,
+          "points": true,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "span": 9,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "alias": "",
+              "dsType": "influxdb",
+              "groupBy": [],
+              "measurement": "opnfv_yardstick_tc058",
+              "orderByTime": "ASC",
+              "policy": "default",
+              "query": "SELECT \"server-status_outage_time\" FROM \"opnfv_yardstick_tc058\" WHERE $timeFilter",
+              "rawQuery": false,
+              "refId": "A",
+              "resultFormat": "time_series",
+              "select": [
+                [
+                  {
+                    "params": [
+                      "server-status_outage_time"
+                    ],
+                    "type": "field"
+                  }
+                ]
+              ],
+              "tags": []
+            }
+          ],
+          "thresholds": [
+            {
+              "colorMode": "critical",
+              "fill": true,
+              "line": true,
+              "op": "gt",
+              "value": 5
+            },
+            {
+              "colorMode": "ok",
+              "fill": true,
+              "line": true,
+              "op": "lt",
+              "value": 5
+            }
+          ],
+          "timeFrom": null,
+          "timeShift": null,
+          "title": "Server Status outage time",
+          "tooltip": {
+            "shared": true,
+            "sort": 0,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "buckets": null,
+            "mode": "time",
+            "name": null,
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "format": "s",
+              "label": "",
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": true
+            },
+            {
+              "format": "short",
+              "label": null,
+              "logBase": 1,
+              "max": null,
+              "min": null,
+              "show": false
+            }
+          ]
+        },
+        {
+          "cacheTimeout": null,
+          "colorBackground": true,
+          "colorValue": false,
+          "colors": [
+            "rgba(245, 54, 54, 0.9)",
+            "rgba(237, 129, 40, 0.89)",
+            "rgba(50, 172, 45, 0.97)"
+          ],
+          "datasource": "yardstick",
+          "format": "short",
+          "gauge": {
+            "maxValue": 100,
+            "minValue": 0,
+            "show": false,
+            "thresholdLabels": false,
+            "thresholdMarkers": true
+          },
+          "id": 4,
+          "interval": null,
+          "links": [],
+          "mappingType": 1,
+          "mappingTypes": [
+            {
+              "name": "value to text",
+              "value": 1
+            },
+            {
+              "name": "range to text",
+              "value": 2
+            }
+          ],
+          "maxDataPoints": 100,
+          "nullPointMode": "connected",
+          "nullText": null,
+          "postfix": "",
+          "postfixFontSize": "50%",
+          "prefix": "",
+          "prefixFontSize": "50%",
+          "rangeMaps": [
+            {
+              "from": "null",
+              "text": "N/A",
+              "to": "null"
+            }
+          ],
+          "span": 3,
+          "sparkline": {
+            "fillColor": "rgba(31, 118, 189, 0.18)",
+            "full": false,
+            "lineColor": "rgb(31, 120, 193)",
+            "show": false
+          },
+          "tableColumn": "",
+          "targets": [
+            {
+              "alias": "",
+              "dsType": "influxdb",
+              "groupBy": [],
+              "measurement": "opnfv_yardstick_tc058",
+              "orderByTime": "ASC",
+              "policy": "default",
+              "query": "SELECT \"sla_pass\" FROM \"opnfv_yardstick_tc058\" WHERE $timeFilter",
+              "rawQuery": false,
+              "refId": "A",
+              "resultFormat": "time_series",
+              "select": [
+                [
+                  {
+                    "params": [
+                      "sla_pass"
+                    ],
+                    "type": "field"
+                  }
+                ]
+              ],
+              "tags": []
+            }
+          ],
+          "thresholds": "0.5,1",
+          "title": "SLA PASS/FAIL",
+          "type": "singlestat",
+          "valueFontSize": "80%",
+          "valueMaps": [
+            {
+              "op": "=",
+              "text": "N/A",
+              "value": "null"
+            }
+          ],
+          "valueName": "avg"
+        }
+      ],
+      "repeat": null,
+      "repeatIteration": null,
+      "repeatRowId": null,
+      "showTitle": false,
+      "title": "Dashboard Row",
+      "titleSize": "h6"
+    }
+  ],
+  "schemaVersion": 14,
+  "style": "dark",
+  "tags": [
+    "HA"
+  ],
+  "templating": {
+    "list": []
+  },
+  "time": {
+    "from": "2018-03-26T09:00:00.000Z",
+    "to": "2018-03-28T08:59:59.998Z"
+  },
+  "timepicker": {
+    "refresh_intervals": [
+      "5s",
+      "10s",
+      "30s",
+      "1m",
+      "5m",
+      "15m",
+      "30m",
+      "1h",
+      "2h",
+      "1d"
+    ],
+    "time_options": [
+      "5m",
+      "15m",
+      "1h",
+      "6h",
+      "12h",
+      "24h",
+      "2d",
+      "7d",
+      "30d"
+    ]
+  },
+  "timezone": "",
+  "title": "opnfv_yardstick_tc058",
+  "version": 8
+}
index 959315c..5813f02 100644 (file)
@@ -20,34 +20,41 @@ ENV REPOS_DIR="/home/opnfv/repos" \
 # Set work directory
 
 # Yardstick repo
-ENV YARDSTICK_REPO_DIR="${REPOS_DIR}/yardstick" \
+ENV YARDSTICK_REPO_DIR="${REPOS_DIR}/yardstick/" \
+    RELENG_REPO_DIR="${REPOS_DIR}/releng" \
     STORPERF_REPO_DIR="${REPOS_DIR}/storperf"
 
-RUN apt-get update && apt-get install -y git python-setuptools python-pip && apt-get -y autoremove && apt-get clean
+RUN apt-get update && apt-get install -y git python python-setuptools python-pip && apt-get -y autoremove && apt-get clean
 RUN easy_install -U setuptools==30.0.0
-RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0
+RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0 ansible==2.4.2
 
 RUN mkdir -p ${REPOS_DIR}
 
 RUN git config --global http.sslVerify false
+#For developers: To test your changes you must comment out the git clone for ${YARDSTICK_REPO_DIR}.
+#You must also uncomment the RUN and COPY commands below.
+#You must run docker build from your yardstick directory on the host.
 RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/yardstick ${YARDSTICK_REPO_DIR}
+#RUN mkdir ${YARDSTICK_REPO_DIR}
+#COPY ./ ${YARDSTICK_REPO_DIR}
+RUN git clone --depth 1 https://gerrit.opnfv.org/gerrit/releng ${RELENG_REPO_DIR}
 RUN git clone --depth 1 -b $BRANCH https://gerrit.opnfv.org/gerrit/storperf ${STORPERF_REPO_DIR}
 
-WORKDIR ${YARDSTICK_REPO_DIR}
-RUN ${YARDSTICK_REPO_DIR}/install.sh
+RUN ansible-playbook -c local -vvv -e INSTALLATION_MODE="container" ${YARDSTICK_REPO_DIR}/ansible/install.yaml
+
 RUN ${YARDSTICK_REPO_DIR}/docker/supervisor.sh
 
 RUN echo "daemon off;" >> /etc/nginx/nginx.conf
-
-EXPOSE 5000
+# nginx=5000, rabbitmq=5672
+EXPOSE 5000 5672
 
 ADD http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img ${IMAGE_DIR}
 ADD http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img ${IMAGE_DIR}
 
-COPY ./exec_tests.sh /usr/local/bin/
+COPY ./docker/exec_tests.sh /usr/local/bin/
 
-ENV NSB_DIR="/opt/nsb_bin" \
-    PYTHONPATH="${PYTHONPATH}:${NSB_DIR}/trex_client:${NSB_DIR}/trex_client/stl"
+ENV NSB_DIR="/opt/nsb_bin"
+ENV PYTHONPATH="${PYTHONPATH}:${NSB_DIR}/trex_client:${NSB_DIR}/trex_client/stl"
 
 WORKDIR ${REPOS_DIR}
 CMD ["/usr/bin/supervisord"]
index a224329..720a399 100644 (file)
@@ -1,24 +1,17 @@
 From: Cristina Pauna <cristina.pauna@enea.com>
-Date: Thu, 11 Jan 2018 19:06:26 +0200
-Subject: [PATCH] Patch for Yardstick AARCH64 Docker file
+Date: Mon, 30 Apr 2018 14:09:00 +0300
+Subject: [PATCH] [PATCH] Patch for Yardstick AARCH64 Docker file
 
 Signed-off-by: Cristina Pauna <cristina.pauna@enea.com>
 Signed-off-by: Alexandru Nemes <alexandru.nemes@enea.com>
 ---
- docker/Dockerfile | 13 +++++++------
- 1 file changed, 7 insertions(+), 6 deletions(-)
+ docker/Dockerfile | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
 
 diff --git a/docker/Dockerfile b/docker/Dockerfile
-index 2ee5b4c..23e5ea5 100644
+index fed9f9bd..9654b5dc 100644
 --- a/docker/Dockerfile
 +++ b/docker/Dockerfile
-@@ -1,5 +1,5 @@
- ##############################################################################
--# Copyright (c) 2015 Ericsson AB and others.
-+# Copyright (c) 2017 Enea AB and others.
- #
- # All rights reserved. This program and the accompanying materials
- # are made available under the terms of the Apache License, Version 2.0
 @@ -7,9 +7,9 @@
  # http://www.apache.org/licenses/LICENSE-2.0
  ##############################################################################
@@ -31,24 +24,25 @@ index 2ee5b4c..23e5ea5 100644
 
  ARG BRANCH=master
 
-@@ -24,7 +24,8 @@ ENV YARDSTICK_REPO_DIR="${REPOS_DIR}/yardstick" \
+@@ -24,7 +24,9 @@ ENV YARDSTICK_REPO_DIR="${REPOS_DIR}/yardstick/" \
      RELENG_REPO_DIR="${REPOS_DIR}/releng" \
      STORPERF_REPO_DIR="${REPOS_DIR}/storperf"
 
--RUN apt-get update && apt-get install -y git python-setuptools python-pip && apt-get -y autoremove && apt-get clean
-+RUN apt-get update && apt-get install -y git python-setuptools python-pip && apt-get -y autoremove && \
+-RUN apt-get update && apt-get install -y git python python-setuptools python-pip && apt-get -y autoremove && apt-get clean
++RUN apt-get update && apt-get install -y git python python-setuptools python-pip && apt-get -y autoremove && \
 +    apt-get install -y libssl-dev && apt-get -y install libffi-dev && apt-get clean
++
  RUN easy_install -U setuptools==30.0.0
- RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0
-
-@@ -43,8 +44,8 @@ RUN echo "daemon off;" >> /etc/nginx/nginx.conf
+ RUN pip install appdirs==1.4.0 pyopenssl==17.5.0 python-openstackclient==3.11.0 python-heatclient==1.11.0 ansible==2.4.2
 
- EXPOSE 5000
+@@ -45,8 +47,8 @@ RUN echo "daemon off;" >> /etc/nginx/nginx.conf
+ # nginx=5000, rabbitmq=5672
+ EXPOSE 5000 5672
 
 -ADD http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img ${IMAGE_DIR}
 -ADD http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img ${IMAGE_DIR}
 +ADD http://download.cirros-cloud.net/daily/20161201/cirros-d161201-aarch64-disk.img ${IMAGE_DIR}
 +ADD http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-disk1.img ${IMAGE_DIR}
 
- COPY ./exec_tests.sh /usr/local/bin/
+ COPY ./docker/exec_tests.sh /usr/local/bin/
 
index b67de22..44e34fb 100755 (executable)
@@ -11,7 +11,7 @@
 # nginx service start when boot
 supervisor_config='/etc/supervisor/conf.d/yardstick.conf'
 
-if [[ ! -e "${supervisor_config}" ]];then
+if [[ ! -e "${supervisor_config}" ]]; then
     cat << EOF > "${supervisor_config}"
 [supervisord]
 nodaemon = true
@@ -22,5 +22,8 @@ command = service nginx restart
 [program:yardstick_uwsgi]
 directory = /etc/yardstick
 command = uwsgi -i yardstick.ini
+
+[program:rabbitmq]
+command = service rabbitmq-server restart
 EOF
 fi
index 4ebf0ec..6598a27 100644 (file)
@@ -1,7 +1,8 @@
+=======
 License
 =======
 
-OPNFV Euphrates release note for Yardstick Docs
+OPNFV Fraser release note for Yardstick Docs
 are licensed under a Creative Commons Attribution 4.0 International License.
 You should have received a copy of the license along with this.
 If not, see <http://creativecommons.org/licenses/by/4.0/>.
@@ -9,8 +10,9 @@ If not, see <http://creativecommons.org/licenses/by/4.0/>.
 The *Yardstick framework*, the *Yardstick test cases* are open-source software,
  licensed under the terms of the Apache License, Version 2.0.
 
-OPNFV Euphrates Release Note for Yardstick
-==========================================
+=======================================
+OPNFV Fraser Release Note for Yardstick
+=======================================
 
 .. toctree::
    :maxdepth: 2
@@ -23,50 +25,43 @@ OPNFV Euphrates Release Note for Yardstick
 
 
 Abstract
---------
+========
 
 This document describes the release note of Yardstick project.
 
 
 Version History
----------------
+===============
 +-------------------+-----------+---------------------------------+
 | *Date*            | *Version* | *Comment*                       |
 |                   |           |                                 |
 +-------------------+-----------+---------------------------------+
-| December 15, 2017 | 5.1.0     | Yardstick for Euphrates release |
-|                   |           |                                 |
-+-------------------+-----------+---------------------------------+
-| October 20, 2017  | 5.0.0     | Yardstick for Euphrates release |
+| April 27, 2018    | 6.0.0     | Yardstick for Fraser release    |
 |                   |           |                                 |
 +-------------------+-----------+---------------------------------+
 
 
 Important Notes
----------------
+===============
 
 The software delivered in the OPNFV Yardstick_ Project, comprising the
-*Yardstick framework*, the *Yardstick test cases* and the experimental
-framework *Apex Lake* is a realization of the methodology in ETSI-ISG
-NFV-TST001_.
+*Yardstick framework*, and the *Yardstick test cases* is a realization of
+the methodology in ETSI-ISG NFV-TST001_.
 
 The *Yardstick* framework is *installer*, *infrastructure* and *application*
 independent.
 
-OPNFV Euphrates Release
------------------------
+OPNFV Fraser Release
+====================
 
-This Euphrates release provides *Yardstick* as a framework for NFVI testing
+This Fraser release provides *Yardstick* as a framework for NFVI testing
 and OPNFV feature testing, automated in the OPNFV CI pipeline, including:
 
 * Documentation generated with Sphinx
 
   * User Guide
-
   * Developer Guide
-
   * Release notes (this document)
-
   * Results
 
 * Automated Yardstick test suite (daily, weekly)
@@ -84,39 +79,29 @@ and OPNFV feature testing, automated in the OPNFV CI pipeline, including:
 
 * Yardstick plug-in configuration yaml files, plug-in install/remove scripts
 
-For Euphrates release, the *Yardstick framework* is used for the following
+For Fraser release, the *Yardstick framework* is used for the following
 testing:
 
 * OPNFV platform testing - generic test cases to measure the categories:
 
   * Compute
-
   * Network
-
   * Storage
 
-* OPNFV platform network service benchmarking(NSB)
+* OPNFV platform network service benchmarking (NSB)
 
   * NSB
 
 * Test cases for the following OPNFV Projects:
 
   * Container4NFV
-
   * High Availability
-
   * IPv6
-
   * KVM
-
   * Parser
-
   * StorPerf
-
   * VSperf
 
-  * virtual Traffic Classifier
-
 The *Yardstick framework* is developed in the OPNFV community, by the
 Yardstick_ team.
 
@@ -126,49 +111,47 @@ Yardstick_ team.
 
 
 Release Data
-------------
+============
 
 +--------------------------------+-----------------------+
 | **Project**                    | Yardstick             |
 |                                |                       |
 +--------------------------------+-----------------------+
-| **Repo/tag**                   | yardstick/opnfv-5.1.0 |
+| **Repo/tag**                   | yardstick/opnfv-6.0.0 |
 |                                |                       |
 +--------------------------------+-----------------------+
-| **Yardstick Docker image tag** | opnfv-5.1.0           |
+| **Yardstick Docker image tag** | opnfv-6.0.0           |
 |                                |                       |
 +--------------------------------+-----------------------+
-| **Release designation**        | Euphrates             |
+| **Release designation**        | Fraser                |
 |                                |                       |
 +--------------------------------+-----------------------+
-| **Release date**               | December 15, 2017     |
+| **Release date**               | April 27, 2018        |
 |                                |                       |
 +--------------------------------+-----------------------+
-| **Purpose of the delivery**    | OPNFV Euphrates 5.1.0 |
+| **Purpose of the delivery**    | OPNFV Fraser 6.0.0    |
 |                                |                       |
 +--------------------------------+-----------------------+
 
 
 Deliverables
-------------
+============
 
 Documents
-^^^^^^^^^
+---------
 
- - User Guide: http://docs.opnfv.org/en/stable-euphrates/submodules/yardstick/docs/testing/user/userguide/index.html
+ - User Guide: http://docs.opnfv.org/en/stable-fraser/submodules/yardstick/docs/testing/user/userguide/index.html
 
- - Developer Guide: http://docs.opnfv.org/en/stable-euphrates/submodules/yardstick/docs/testing/developer/devguide/index.html
+ - Developer Guide: http://docs.opnfv.org/en/stable-fraser/submodules/yardstick/docs/testing/developer/devguide/index.html
 
 
 Software Deliverables
-^^^^^^^^^^^^^^^^^^^^^
-
+---------------------
 
- - The Yardstick Docker image: https://hub.docker.com/r/opnfv/yardstick (tag: opnfv-5.1.0)
+ - The Yardstick Docker image: https://hub.docker.com/r/opnfv/yardstick (tag: opnfv-6.0.0)
 
-
-New Contexts
-############
+List of Contexts
+^^^^^^^^^^^^^^^^
 
 +--------------+-------------------------------------------+
 | **Context**  | **Description**                           |
@@ -188,31 +171,40 @@ New Contexts
 +--------------+-------------------------------------------+
 
 
-New Runners
-###########
-
-+--------------+-------------------------------------------------------+
-| **Runner**   | **Description**                                       |
-|              |                                                       |
-+--------------+-------------------------------------------------------+
-| *Arithmetic* | Steps every run arithmetically according to specified |
-|              | input value                                           |
-|              |                                                       |
-+--------------+-------------------------------------------------------+
-| *Duration*   | Runs for a specified period of time                   |
-|              |                                                       |
-+--------------+-------------------------------------------------------+
-| *Iteration*  | Runs for a specified number of iterations             |
-|              |                                                       |
-+--------------+-------------------------------------------------------+
-| *Sequence*   | Selects input value to a scenario from an input file  |
-|              | and runs all entries sequentially                     |
-|              |                                                       |
-+--------------+-------------------------------------------------------+
-
-
-New Scenarios
-#############
+List of Runners
+^^^^^^^^^^^^^^^
+
+Note: Yardstick Fraser 6.0.0 add two new Runners, "Dynamictp" and "Search".
+
++---------------+-------------------------------------------------------+
+| **Runner**    | **Description**                                       |
+|               |                                                       |
++---------------+-------------------------------------------------------+
+| *Arithmetic*  | Steps every run arithmetically according to specified |
+|               | input value                                           |
+|               |                                                       |
++---------------+-------------------------------------------------------+
+| *Duration*    | Runs for a specified period of time                   |
+|               |                                                       |
++---------------+-------------------------------------------------------+
+| *Iteration*   | Runs for a specified number of iterations             |
+|               |                                                       |
++---------------+-------------------------------------------------------+
+| *Sequence*    | Selects input value to a scenario from an input file  |
+|               | and runs all entries sequentially                     |
+|               |                                                       |
++---------------+-------------------------------------------------------+
+| **Dynamictp** | A runner that searches for the max throughput with    |
+|               | binary search                                         |
+|               |                                                       |
++---------------+-------------------------------------------------------+
+| **Search**    | A runner that runs a specific time before it returns  |
+|               |                                                       |
++---------------+-------------------------------------------------------+
+
+
+List of Scenarios
+^^^^^^^^^^^^^^^^^
 
 +----------------+-----------------------------------------------------+
 | **Category**   | **Delivered**                                       |
@@ -234,224 +226,138 @@ New Scenarios
 |                |                                                     |
 +----------------+-----------------------------------------------------+
 | *Compute*      | * cpuload                                           |
-|                |                                                     |
 |                | * cyclictest                                        |
-|                |                                                     |
 |                | * lmbench                                           |
-|                |                                                     |
 |                | * lmbench_cache                                     |
-|                |                                                     |
 |                | * perf                                              |
-|                |                                                     |
 |                | * unixbench                                         |
-|                |                                                     |
 |                | * ramspeed                                          |
-|                |                                                     |
 |                | * cachestat                                         |
-|                |                                                     |
 |                | * memeoryload                                       |
-|                |                                                     |
 |                | * computecapacity                                   |
-|                |                                                     |
 |                | * SpecCPU2006                                       |
 |                |                                                     |
 +----------------+-----------------------------------------------------+
 | *Networking*   | * iperf3                                            |
-|                |                                                     |
 |                | * netperf                                           |
-|                |                                                     |
 |                | * netperf_node                                      |
-|                |                                                     |
 |                | * ping                                              |
-|                |                                                     |
 |                | * ping6                                             |
-|                |                                                     |
 |                | * pktgen                                            |
-|                |                                                     |
 |                | * sfc                                               |
-|                |                                                     |
 |                | * sfc with tacker                                   |
-|                |                                                     |
-|                | * vtc instantion validation                         |
-|                |                                                     |
-|                | * vtc instantion validation with noisy neighbors    |
-|                |                                                     |
-|                | * vtc throughput                                    |
-|                |                                                     |
-|                | * vtc throughput in the presence of noisy neighbors |
-|                |                                                     |
 |                | * networkcapacity                                   |
-|                |                                                     |
 |                | * netutilization                                    |
-|                |                                                     |
 |                | * nstat                                             |
-|                |                                                     |
 |                | * pktgenDPDK                                        |
 |                |                                                     |
 +----------------+-----------------------------------------------------+
 | *Parser*       | Tosca2Heat                                          |
 |                |                                                     |
 +----------------+-----------------------------------------------------+
-| *Storage*      | fio                                                 |
-|                |                                                     |
-|                | bonnie++                                            |
-|                |                                                     |
-|                | storagecapacity                                     |
+| *Storage*      | * fio                                               |
+|                | * bonnie++                                          |
+|                | * storagecapacity                                   |
 |                |                                                     |
 +----------------+-----------------------------------------------------+
 | *StorPerf*     | storperf                                            |
 |                |                                                     |
 +----------------+-----------------------------------------------------+
-| *NSB*          | vPE thoughput test case                             |
+| *NSB*          | vFW thoughput test case                             |
 |                |                                                     |
 +----------------+-----------------------------------------------------+
 
 
-
 New Test cases
-^^^^^^^^^^^^^^
+--------------
 
 * Generic NFVI test cases
 
- * OPNFV_YARDSTICK_TCO78 - SPEC CPU 2006
-
- * OPNFV_YARDSTICK_TCO79 - Bonnie++
-
-* Kubernetes Test cases
+ * OPNFV_YARDSTICK_TCO84 - SPEC CPU 2006 for VM
 
- * OPNFV_YARDSTICK_TCO80 - NETWORK LATENCY BETWEEN CONTAINER
+* HA Test cases
 
- * OPNFV_YARDSTICK_TCO81 - NETWORK LATENCY BETWEEN CONTAINER AND VM
+ * OPNFV_YARDSTICK_TC087 - SDN Controller resilience in non-HA configuration
+ * OPNFV_YARDSTICK_TC090 - Control node Openstack service down - database instance
+ * OPNFV_YARDSTICK_TC091 - Control node Openstack service down - heat-api
 
 
 Version Change
---------------
+==============
 
 Module Version Changes
-^^^^^^^^^^^^^^^^^^^^^^
+----------------------
 
-This is the fifth tracked release of Yardstick. It is based on following
+This is the sixth tracked release of Yardstick. It is based on following
 upstream versions:
 
-- OpenStack Ocata
-
-- OpenDayLight Nitrogen
-
-- ONOS Junco
+- OpenStack Pike
+- OpenDayLight Oxygen
 
 
 Document Version Changes
-^^^^^^^^^^^^^^^^^^^^^^^^
+------------------------
 
-This is the fifth tracked version of the Yardstick framework in OPNFV.
+This is the sixth tracked version of the Yardstick framework in OPNFV.
 It includes the following documentation updates:
 
 - Yardstick User Guide: add "network service benchmarking(NSB)" chapter;
   add "Yardstick - NSB Testing -Installation" chapter; add "Yardstick API" chapter;
   add "Yardstick user interface" chapter; Update Yardstick installation chapter;
-
 - Yardstick Developer Guide
-
 - Yardstick Release Notes for Yardstick: this document
 
 
 Feature additions
-^^^^^^^^^^^^^^^^^
-
-- Yardstick RESTful API support
-
-- Network service benchmarking
-
-- Stress testing with Bottlenecks team
-
-- Yardstick framework improvement:
-
-  - yardstick report CLI
-
-  - Node context support OpenStack configuration via Ansible
-
-  - Https support
+-----------------
 
-  - Kubernetes context type
-
-- Yardstick container local GUI
-
-- Python 3 support
+- Plugin-based test cases support Heat context
+- SR-IOV support for the Heat context
+- Support using existing network in Heat context
+- Support running test cases with existing VNFs/without destroying VNF in Heat context
+- Add vFW scale-up template
+- Improvements of unit tests and gating
+- GUI improvement about passing parameters
 
 
 Scenario Matrix
----------------
-
-For Euphrates 5.0.0, Yardstick was tested on the following scenarios:
-
-+--------------------------+------+---------+------+------+
-| Scenario                 | Apex | Compass | Fuel | Joid |
-+==========================+======+=========+======+======+
-| os-nosdn-nofeature-noha  |      |         | X    | X    |
-+--------------------------+------+---------+------+------+
-| os-nosdn-nofeature-ha    | X    | X       | X    | X    |
-+--------------------------+------+---------+------+------+
-| os-odl_l2-nofeature-ha   |      | X       | X    | X    |
-+--------------------------+------+---------+------+------+
-| os-odl_l2-nofeature-noha |      |         | X    |      |
-+--------------------------+------+---------+------+------+
-| os-odl_l3-nofeature-ha   | X    | X       | X    |      |
-+--------------------------+------+---------+------+------+
-| os-odl_l3-nofeature-noha |      |         | X    |      |
-+--------------------------+------+---------+------+------+
-| os-onos-sfc-ha           |      |         |      |      |
-+--------------------------+------+---------+------+------+
-| os-onos-nofeature-ha     |      | X       |      | X    |
-+--------------------------+------+---------+------+------+
-| os-onos-nofeature-noha   |      |         |      |      |
-+--------------------------+------+---------+------+------+
-| os-odl_l2-sfc-ha         |      |         | X    |      |
-+--------------------------+------+---------+------+------+
-| os-odl_l2-sfc-noha       |      |         | X    |      |
-+--------------------------+------+---------+------+------+
-| os-odl_l2-bgpvpn-ha      | X    |         | X    |      |
-+--------------------------+------+---------+------+------+
-| os-odl_l2-bgpvpn-noha    |      |         | X    |      |
-+--------------------------+------+---------+------+------+
-| os-nosdn-kvm-ha          | X    |         | X    |      |
-+--------------------------+------+---------+------+------+
-| os-nosdn-kvm-noha        |      |         | X    |      |
-+--------------------------+------+---------+------+------+
-| os-nosdn-ovs-ha          |      |         | X    |      |
-+--------------------------+------+---------+------+------+
-| os-nosdn-ovs-noha        |      |         | X    |      |
-+--------------------------+------+---------+------+------+
-| os-ocl-nofeature-ha      |      | X       |      |      |
-+--------------------------+------+---------+------+------+
-| os-nosdn-lxd-ha          |      |         |      | X    |
-+--------------------------+------+---------+------+------+
-| os-nosdn-lxd-noha        |      |         |      | X    |
-+--------------------------+------+---------+------+------+
-| os-nosdn-fdio-ha         | X    |         |      |      |
-+--------------------------+------+---------+------+------+
-| os-odl_l2-fdio-noha      | X    |         |      |      |
-+--------------------------+------+---------+------+------+
-| os-odl-gluon-noha        | X    |         |      |      |
-+--------------------------+------+---------+------+------+
-| os-nosdn-openo-ha        |      | X       |      |      |
-+--------------------------+------+---------+------+------+
-| os-nosdn-kvm_ovs_dpdk    |      |         | X    |      |
-| -noha                    |      |         |      |      |
-+--------------------------+------+---------+------+------+
-| os-nosdn-kvm_ovs_dpdk-ha |      |         | X    |      |
-+--------------------------+------+---------+------+------+
-| os-nosdn-kvm_ovs_dpdk    |      |         | X    |      |
-| _bar-ha                  |      |         |      |      |
-+--------------------------+------+---------+------+------+
-| os-nosdn-kvm_ovs_dpdk    |      |         | X    |      |
-| _bar-noha                |      |         |      |      |
-+--------------------------+------+---------+------+------+
-| opnfv_os-ovn-nofeature-  | X    |         |      |      |
-| noha_daily               |      |         |      |      |
-+--------------------------+------+---------+------+------+
+===============
+
+For Fraser 6.0.0, Yardstick was tested on the following scenarios:
+
++-------------------------+------+---------+----------+------+------+-------+
+|        Scenario         | Apex | Compass | Fuel-arm | Fuel | Joid | Daisy |
++=========================+======+=========+==========+======+======+=======+
+| os-nosdn-nofeature-noha |  X   |    X    |          |      |  X   |       |
++-------------------------+------+---------+----------+------+------+-------+
+| os-nosdn-nofeature-ha   |  X   |    X    |    X     |  X   |  X   |   X   |
++-------------------------+------+---------+----------+------+------+-------+
+| os-nosdn-bar-noha       |  X   |    X    |          |      |      |       |
++-------------------------+------+---------+----------+------+------+-------+
+| os-nosdn-bar-ha         |  X   |         |          |      |      |       |
++-------------------------+------+---------+----------+------+------+-------+
+| os-odl-bgpvpn-ha        |  X   |         |          |      |      |       |
++-------------------------+------+---------+----------+------+------+-------+
+| os-nosdn-calipso-noha   |  X   |         |          |      |      |       |
++-------------------------+------+---------+----------+------+------+-------+
+| os-nosdn-kvm-ha         |      |    X    |          |      |      |       |
++-------------------------+------+---------+----------+------+------+-------+
+| os-odl_l3-nofeature-ha  |      |    X    |          |      |      |       |
++-------------------------+------+---------+----------+------+------+-------+
+| os-odl-sfc-ha           |      |    X    |          |      |      |       |
++-------------------------+------+---------+----------+------+------+-------+
+| os-odl-nofeature-ha     |      |         |          |  X   |      |   X   |
++-------------------------+------+---------+----------+------+------+-------+
+| os-nosdn-ovs-ha         |      |         |          |  X   |      |       |
++-------------------------+------+---------+----------+------+------+-------+
+| k8-nosdn-nofeature-ha   |      |    X    |          |      |      |       |
++-------------------------+------+---------+----------+------+------+-------+
+| k8-nosdn-stor4nfv-noha  |      |    X    |          |      |      |       |
++-------------------------+------+---------+----------+------+------+-------+
+
 
 Test results
-------------
+============
 
 Test results are available in:
 
@@ -459,109 +365,107 @@ Test results are available in:
 
 The reporting pages can be found at:
 
-+---------------+-------------------------------------------------------------------------------------+
-| apex          | http://testresults.opnfv.org/reporting/euphrates/yardstick/status-apex.html         |
-+---------------+-------------------------------------------------------------------------------------+
-| compass       | http://testresults.opnfv.org/reporting/euphrates/yardstick/status-compass.html      |
-+---------------+-------------------------------------------------------------------------------------+
-| fuel\@x86     | http://testresults.opnfv.org/reporting/euphrates/yardstick/status-fuel@x86.html     |
-+---------------+-------------------------------------------------------------------------------------+
-| fuel\@aarch64 | http://testresults.opnfv.org/reporting/euphrates/yardstick/status-fuel@aarch64.html |
-+---------------+-------------------------------------------------------------------------------------+
-| joid          | http://testresults.opnfv.org/reporting/euphrates/yardstick/status-joid.html         |
-+---------------+-------------------------------------------------------------------------------------+
++---------------+----------------------------------------------------------------------------------+
+| apex          | http://testresults.opnfv.org/reporting/fraser/yardstick/status-apex.html         |
++---------------+----------------------------------------------------------------------------------+
+| compass       | http://testresults.opnfv.org/reporting/fraser/yardstick/status-compass.html      |
++---------------+----------------------------------------------------------------------------------+
+| fuel\@x86     | http://testresults.opnfv.org/reporting/fraser/yardstick/status-fuel@x86.html     |
++---------------+----------------------------------------------------------------------------------+
+| fuel\@aarch64 | http://testresults.opnfv.org/reporting/fraser/yardstick/status-fuel@aarch64.html |
++---------------+----------------------------------------------------------------------------------+
+| joid          | http://testresults.opnfv.org/reporting/fraser/yardstick/status-joid.html         |
++---------------+----------------------------------------------------------------------------------+
 
 Known Issues/Faults
-^^^^^^^^^^^^^^^^^^^
+-------------------
 
 
 Corrected Faults
-^^^^^^^^^^^^^^^^
+----------------
+
+Fraser 6.0.0:
+
++--------------------+--------------------------------------------------------------------------+
+| **JIRA REFERENCE** |                             **DESCRIPTION**                              |
++====================+==========================================================================+
+|   YARDSTICK-831    | tc053 kill haproxy wrong                                                 |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-842    | load image fails when there's cirros image exist                         |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-857    | tc006 failed due to volume attached to different location "/dev/vdc"     |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-874    | Specify supported architecture for Ubuntu backports repository           |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-875    | Check if multiverse repository is available in Ubuntu                    |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-893    | Fix proxy env handling and ansible multinode support                     |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-899    | Variable local_iface_name is read before it is set                       |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-900    | Section in "upload_yardstick_image.yml" invalid                          |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-911    | Remove 'inconsistent-return-statements' from Pylint checks               |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-989    | Yardstick real-time influxdb KPI reporting regressions                   |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-994    | NSB set-up build script for baremetal broken                             |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-996    | Error in address input format in "_ip_range_action_partial"              |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1003   | Prox vnf descriptor cleanup for tg and vnf                               |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1006   | Ansible destroy script will fail if vm has already been undefined        |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1012   | constants: fix pylint warnings for OSError                               |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1014   | Remove unused args in                                                    |
+|                    | network_services.traffic_profile.ixia_rfc2544.IXIARFC2544Profile         |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1016   | Allow vm to access outside world through default gateway                 |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1019   | For 'qemu-img version 2.10.1' unit 'MB' is not acceptable ansible script |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1021   | NSB: All Sample VNF test cases timeout after 1 hour of execution         |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1036   | Prox: Addition of storage of extra counters for Grafana                  |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1038   | Missing file which is described in the operation_conf.yaml               |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1047   | Error in string format in HeatTemplateError message                      |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1056   | yardstick report command print error when run test case                  |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1059   | Reduce the log level if TRex client is no connected                      |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1073   | Error when retrieving "options" section in "scenario"                    |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1080   | Running Test Case in Latest Yardstick Docker Image shows Error           |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1082   | tc043,tc055, tc063, tc075,  pass wrong node name in the ci scenario yaml |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1102   | Don't hide exception traceback from Task.start()                         |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1107   | bad exception traceback print due to atexit_handler                      |
++--------------------+--------------------------------------------------------------------------+
+|   YARDSTICK-1120   | HA test case tc050 should start monitor before attack                    |
++--------------------+--------------------------------------------------------------------------+
+
+Fraser 6.0.0 known restrictions/issues
+======================================
 
-Euphrates 5.1.0:
-
-+---------------------+-------------------------------------------------------------------------+
-| **JIRA REFERENCE**  | **DESCRIPTION**                                                         |
-|                     |                                                                         |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-841 | Fix various NSB license issues                                          |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-73  | How To Work with Test Cases                                             |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-500 | VNF testing documentation                                               |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-826 | Allow overriding Heat IP addresses to match traffic generator profile   |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-828 | Refactor doc/testing/user/userguide "Yardstick Installation"            |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-830 | build_yardstick_image Ansible mount module doesn't work on Ubuntu 14.04 |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-833 | ansible_common transform password into lower case                       |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-847 | tc006, tc079, tc082 miss grafana dashboard in local deployment          |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-849 | kill process do not accurately kill the process like "nova-api"         |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-850 | tc023 miss description and tc050-58 wrong description                   |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-852 | tc078 cpu2006 fails in some situation                                   |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-854 | yardstick docker lack of trex_client                                    |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-867 | testcase tc078 have no data stored or dashboard to show results         |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-871 | Remove img_modify_playbook assignation in build_yardstick_image.yml     |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-829 | "nsb_setup.sh" doesn't parse the controller IP correctly                |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-839 | NSB Prox BM test cases to be fixed for incorporating scale-up           |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-840 | NSB Prox test documentation of vPE and LW-AFTR test cases               |
-+---------------------+-------------------------------------------------------------------------+
-| JIRA: YARDSTICK-848 | NSB "Prox" : Cleanup duplicated traffic profile                         |
-+---------------------+-------------------------------------------------------------------------+
-
-
-
-
-Euphrates 5.0.0:
-
-+---------------------+--------------------------------------------+
-| **JIRA REFERENCE**  | **DESCRIPTION**                            |
-|                     |                                            |
-+---------------------+--------------------------------------------+
-| JIRA: YARDSTICK-599 | Could not load EntryPoint.parse when using |
-|                     | 'openstack -h'                             |
-+---------------------+--------------------------------------------+
-| JIRA: YARDSTICK-602 | Don't rely on staic ip addresses as they   |
-|                     | are dynamic                                |
-+---------------------+--------------------------------------------+
-
-
-Euphratess 5.0.0 known restrictions/issues
-------------------------------------------
 +-----------+-----------+----------------------------------------------+
 | Installer | Scenario  | Issue                                        |
 +===========+===========+==============================================+
-| any       | \*-bgpvpn | Floating ips not supported. Some Test cases  |
-|           |           | related to floating ips are excluded.        |
-+-----------+-----------+----------------------------------------------+
-| any       | odl_l3-\* | Some test cases related to using floating IP |
-|           |           | addresses fail because of a known ODL bug.   |
-|           |           |                                              |
-+-----------+-----------+----------------------------------------------+
-| compass   | odl_l2-\* | In some test cases, VM instance will failed  |
-|           |           | raising network interfaces.                  |
 |           |           |                                              |
 +-----------+-----------+----------------------------------------------+
 
-
 Useful links
-------------
+============
 
  - wiki project page: https://wiki.opnfv.org/display/yardstick/Yardstick
 
- - wiki Yardstick Euphrates release planing page: https://wiki.opnfv.org/display/yardstick/Yardstick+Euphrates+Release+Planning
+ - wiki Yardstick Fraser release planing page: https://wiki.opnfv.org/display/yardstick/Release+Fraser
 
  - Yardstick repo: https://git.opnfv.org/cgit/yardstick
 
index c1d5def..d846e75 100755 (executable)
@@ -42,43 +42,47 @@ This document consists of the following chapters:
 * Chapter :doc:`02-methodology` describes the methodology implemented by the
   *Yardstick* Project for :term:`NFVI` verification.
 
-* Chapter :doc:`03-architecture` provides information on the software architecture
-  of *Yardstick*.
+* Chapter :doc:`03-architecture` provides information on the software
+  architecture of *Yardstick*.
 
 * Chapter :doc:`04-installation` provides instructions to install *Yardstick*.
 
-* Chapter :doc:`05-yardstick_plugin` provides information on how to integrate
+* Chapter :doc:`05-operation` provides information on how to use *Yardstick*
+  to run and create testcases.
+
+* Chapter :doc:`06-yardstick-plugin` provides information on how to integrate
   other OPNFV testing projects into *Yardstick*.
 
-* Chapter :doc:`06-result-store-InfluxDB` provides inforamtion on how to run
+* Chapter :doc:`07-result-store-InfluxDB` provides inforamtion on how to run
   plug-in test cases and store test results into community's InfluxDB.
 
-* Chapter :doc:`07-grafana` provides inforamtion on *Yardstick* grafana dashboard
-  and how to add a dashboard into *Yardstick* grafana dashboard.
+* Chapter :doc:`08-grafana` provides inforamtion on *Yardstick* grafana
+  dashboard and how to add a dashboard into *Yardstick* grafana dashboard.
 
-* Chapter :doc:`08-api` provides inforamtion on *Yardstick* ReST API and how to
+* Chapter :doc:`09-api` provides inforamtion on *Yardstick* ReST API and how to
   use *Yardstick* API.
 
-* Chapter :doc:`09-yardstick_user_interface` provides inforamtion on how to use
+* Chapter :doc:`10-yardstick-user-interface` provides inforamtion on how to use
   yardstick report CLI to view the test result in table format and also values
   pinned on to a graph
 
-* Chapter :doc:`10-vtc-overview` provides information on the :term:`VTC`.
+* Chapter :doc:`11-vtc-overview` provides information on the :term:`VTC`.
 
-* Chapter :doc:`13-nsb-overview` describes the methodology implemented by the
+* Chapter :doc:`12-nsb-overview` describes the methodology implemented by the
   Yardstick - Network service benchmarking to test real world usecase for a
   given VNF.
 
-* Chapter :doc:`14-nsb_installation` provides instructions to install
-  *Yardstick - Network service benchmarking testing*.
+* Chapter :doc:`13-nsb_installation` provides instructions to install
+  *Yardstick - Network Service Benchmarking (NSB) testing*.
+
+* Chapter :doc:`14-nsb-operation` provides information on running *NSB*
 
 * Chapter :doc:`15-list-of-tcs` includes a list of available *Yardstick* test
   cases.
 
-
 Contact Yardstick
 =================
 
 Feedback? `Contact us`_
 
-.. _Contact us: opnfv-users@lists.opnfv.org
+.. _Contact us: mailto:opnfv-users@lists.opnfv.org&subject="[yardstick]"
index 8336b60..622002e 100755 (executable)
@@ -9,8 +9,9 @@ Architecture
 
 Abstract
 ========
-This chapter describes the yardstick framework software architecture. we will introduce it from Use-Case View,
-Logical View, Process View and Deployment View. More technical details will be introduced in this chapter.
+This chapter describes the yardstick framework software architecture. We will
+introduce it from Use-Case View, Logical View, Process View and Deployment
+View. More technical details will be introduced in this chapter.
 
 Overview
 ========
@@ -23,8 +24,8 @@ files. Yardstick is inspired by Rally. Yardstick is intended to run on a
 computer with access and credentials to a cloud. The test case is described
 in a configuration file given as an argument.
 
-How it works: the benchmark task configuration file is parsed and converted into
-an internal model. The context part of the model is converted into a Heat
+How it works: the benchmark task configuration file is parsed and converted
+into an internal model. The context part of the model is converted into a Heat
 template and deployed into a stack. Each scenario is run using a runner, either
 serially or in parallel. Each runner runs in its own subprocess executing
 commands in a VM using SSH. The output of each scenario is written as json
@@ -43,13 +44,15 @@ names, image names, affinity rules and network configurations. A context is
 converted into a simplified Heat template, which is used to deploy onto the
 Openstack environment.
 
-**Data** - Output produced by running a benchmark, written to a file in json format
+**Data** - Output produced by running a benchmark, written to a file in json
+format
 
 **Runner** - Logic that determines how a test scenario is run and reported, for
 example the number of test iterations, input value stepping and test duration.
 Predefined runner types exist for re-usage, see `Runner types`_.
 
-**Scenario** - Type/class of measurement for example Ping, Pktgen, (Iperf, LmBench, ...)
+**Scenario** - Type/class of measurement for example Ping, Pktgen, (Iperf,
+LmBench, ...)
 
 **SLA** - Relates to what result boundary a test case must meet to pass. For
 example a latency limit, amount or ratio of lost packets and so on. Action
@@ -128,8 +131,8 @@ Snippet of an Iteration runner configuration:
 Use-Case View
 =============
 Yardstick Use-Case View shows two kinds of users. One is the Tester who will
-do testing in cloud, the other is the User who is more concerned with test result
-and result analyses.
+do testing in cloud, the other is the User who is more concerned with test
+result and result analyses.
 
 For testers, they will run a single test case or test case suite to verify
 infrastructure compliance or bencnmark their own infrastructure performance.
@@ -254,7 +257,8 @@ Yardstick Directory structure
 
 *tools/* - Currently contains tools to build image for VMs which are deployed
            by Heat. Currently contains how to build the yardstick-trusty-server
-           image with the different tools that are needed from within the image.
+           image with the different tools that are needed from within the
+           image.
 
 *plugin/* - Plug-in configuration files are stored here.
 
index cac8146..a484623 100644 (file)
@@ -39,18 +39,18 @@ Several prerequisites are needed for Yardstick:
 4. Connectivity from the Jumphost to the SUT public/external network
 
 .. note:: *Jumphost* refers to any server which meets the previous
-requirements. Normally it is the same server from where the OPNFV
-deployment has been triggered.
+   requirements. Normally it is the same server from where the OPNFV
+   deployment has been triggered.
 
 .. warning:: Connectivity from Jumphost is essential and it is of paramount
-importance to make sure it is working before even considering to install
-and run Yardstick. Make also sure you understand how your networking is
-designed to work.
+   importance to make sure it is working before even considering to install
+   and run Yardstick. Make also sure you understand how your networking is
+   designed to work.
 
 .. note:: If your Jumphost is operating behind a company http proxy and/or
-Firewall, please first consult `Proxy Support`_ section which is towards the
-end of this document. That section details some tips/tricks which *may* be of
-help in a proxified environment.
+   Firewall, please first consult `Proxy Support`_ section which is towards
+   the end of this document. That section details some tips/tricks which *may*
+   be of help in a proxified environment.
 
 
 Install Yardstick using Docker (first option) (**recommended**)
@@ -85,27 +85,30 @@ Run the Docker image to get a Yardstick container::
    docker run -itd --privileged -v /var/run/docker.sock:/var/run/docker.sock \
       -p 8888:5000 --name yardstick opnfv/yardstick:stable
 
-.. table:: Description of the parameters used with ``docker run`` command
-
-   ======================= ====================================================
-   Parameters              Detail
-   ======================= ====================================================
-   -itd                    -i: interactive, Keep STDIN open even if not
-                           attached
-                           -t: allocate a pseudo-TTY detached mode, in the
-                           background
-   ======================= ====================================================
-   --privileged            If you want to build ``yardstick-image`` in
-                           Yardstick container, this parameter is needed
-   ======================= ====================================================
-   -p 8888:5000            Redirect the a host port (8888) to a container port
-                           (5000)
-   ======================= ====================================================
-   -v /var/run/docker.sock If you want to use yardstick env grafana/influxdb to
-   :/var/run/docker.sock   create a grafana/influxdb container out of Yardstick
-                           container
-   ======================= ====================================================
-   --name yardstick        The name for this container
+Description of the parameters used with ``docker run`` command
+
+  +------------------------+--------------------------------------------------+
+  | Parameters             | Detail                                           |
+  +========================+==================================================+
+  | -itd                   |  -i: interactive, Keep STDIN open even if not    |
+  |                        |  attached                                        |
+  |                        +--------------------------------------------------+
+  |                        |  -t: allocate a pseudo-TTY detached mode, in the |
+  |                        |  background                                      |
+  +------------------------+--------------------------------------------------+
+  | --privileged           | If you want to build ``yardstick-image`` in      |
+  |                        | Yardstick container, this parameter is needed    |
+  +------------------------+--------------------------------------------------+
+  | -p 8888:5000           | Redirect the a host port (8888) to a container   |
+  |                        | port (5000)                                      |
+  +------------------------+--------------------------------------------------+
+  | -v /var/run/docker.sock| If you want to use yardstick env                 |
+  | :/var/run/docker.sock  | grafana/influxdb to create a grafana/influxdb    |
+  |                        | container out of Yardstick container             |
+  +------------------------+--------------------------------------------------+
+  | --name yardstick       | The name for this container                      |
+  +------------------------+--------------------------------------------------+
+
 
 If the host is restarted
 ^^^^^^^^^^^^^^^^^^^^^^^^
@@ -135,18 +138,18 @@ automatically::
    yardstick env prepare
 
 .. note:: Since Euphrates release, the above command will not be able to
-automatically configure the ``/etc/yardstick/openstack.creds`` file. So before
-running the above command, it is necessary to create the
-``/etc/yardstick/openstack.creds`` file and save OpenStack environment
-variables into it manually. If you have the openstack credential file saved
-outside the Yardstick Docker container, you can do this easily by mapping the
-credential file into Yardstick container using::
+  automatically configure the ``/etc/yardstick/openstack.creds`` file. So before
+  running the above command, it is necessary to create the
+  ``/etc/yardstick/openstack.creds`` file and save OpenStack environment
+  variables into it manually. If you have the openstack credential file saved
+  outside the Yardstick Docker container, you can do this easily by mapping the
+  credential file into Yardstick container using::
 
-   '-v /path/to/credential_file:/etc/yardstick/openstack.creds'
+     '-v /path/to/credential_file:/etc/yardstick/openstack.creds'
 
-when running the Yardstick container. For details of the required OpenStack
-environment variables please refer to section `Export OpenStack environment
-variables`_.
+  when running the Yardstick container. For details of the required OpenStack
+  environment variables please refer to section `Export OpenStack environment
+  variables`_.
 
 The ``env prepare`` command may take up to 6-8 minutes to finish building
 yardstick-image and other environment preparation. Meanwhile if you wish to
@@ -222,8 +225,8 @@ Yardstick is installed::
    sudo -EH tools/yardstick-img-modify tools/ubuntu-server-cloudimg-modify.sh
 
 .. warning:: Before building the guest image inside the Yardstick container,
-make sure the container is granted with privilege. The script will create files
-by default in ``/tmp/workspace/yardstick`` and the files will be owned by root.
+  make sure the container is granted with privilege. The script will create files
+  by default in ``/tmp/workspace/yardstick`` and the files will be owned by root.
 
 The created image can be added to OpenStack using the OpenStack client or via
 the OpenStack Dashboard::
@@ -270,7 +273,7 @@ For usage of Yardstick GUI, please watch our demo video at
 `Yardstick GUI demo`_.
 
 .. note:: The Yardstick GUI is still in development, the GUI layout and
-features may change.
+  features may change.
 
 Delete the Yardstick container
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -433,7 +436,7 @@ of Yardstick ``help`` command and ``ping.py`` test sample::
    yardstick task start samples/ping.yaml
 
 .. note:: The above commands could be run in both the Yardstick container and
-the Ubuntu directly.
+  the Ubuntu directly.
 
 Each testing tool supported by Yardstick has a sample configuration file.
 These configuration files can be found in the ``samples`` directory.
@@ -468,10 +471,10 @@ Then you can run a test case and visit http://host_ip:1948
 (``admin``/``admin``) to see the results.
 
 .. note:: Executing ``yardstick env`` command to deploy InfluxDB and Grafana
-requires Jumphost's docker API version => 1.24. Run the following command to
-check the docker API version on the Jumphost::
+  requires Jumphost's docker API version => 1.24. Run the following command to
+  check the docker API version on the Jumphost::
 
-   docker version
+    docker version
 
 
 Manual deployment of InfluxDB and Grafana containers
@@ -537,200 +540,6 @@ Deploy InfluxDB and Grafana directly in Ubuntu (**Todo**)
 ---------------------------------------------------------
 
 
-Yardstick common CLI
---------------------
-
-List test cases
-^^^^^^^^^^^^^^^
-
-``yardstick testcase list``: This command line would list all test cases in
-Yardstick. It would show like below::
-
-   +---------------------------------------------------------------------------------------
-   | Testcase Name         | Description
-   +---------------------------------------------------------------------------------------
-   | opnfv_yardstick_tc001 | Measure network throughput using pktgen
-   | opnfv_yardstick_tc002 | measure network latency using ping
-   | opnfv_yardstick_tc005 | Measure Storage IOPS, throughput and latency using fio.
-   ...
-   +---------------------------------------------------------------------------------------
-
-
-Show a test case config file
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Take opnfv_yardstick_tc002 for an example. This test case measure network
-latency. You just need to type in ``yardstick testcase show
-opnfv_yardstick_tc002``, and the console would show the config yaml of this
-test case::
-
-   ---
-
-   schema: "yardstick:task:0.1"
-   description: >
-      Yardstick TC002 config file;
-      measure network latency using ping;
-
-   {% set image = image or "cirros-0.3.5" %}
-
-   {% set provider = provider or none %}
-   {% set physical_network = physical_network or 'physnet1' %}
-   {% set segmentation_id = segmentation_id or none %}
-   {% set packetsize = packetsize or 100 %}
-
-   scenarios:
-   {% for i in range(2) %}
-   -
-    type: Ping
-    options:
-      packetsize: {{packetsize}}
-    host: athena.demo
-    target: ares.demo
-
-    runner:
-      type: Duration
-      duration: 60
-      interval: 10
-
-    sla:
-      max_rtt: 10
-      action: monitor
-   {% endfor %}
-
-   context:
-    name: demo
-    image: {{image}}
-    flavor: yardstick-flavor
-    user: cirros
-
-    placement_groups:
-      pgrp1:
-        policy: "availability"
-
-    servers:
-      athena:
-        floating_ip: true
-        placement: "pgrp1"
-      ares:
-        placement: "pgrp1"
-
-    networks:
-      test:
-        cidr: '10.0.1.0/24'
-        {% if provider == "vlan" %}
-        provider: {{provider}}
-        physical_network: {{physical_network}}å
-          {% if segmentation_id %}
-        segmentation_id: {{segmentation_id}}
-          {% endif %}
-        {% endif %}
-
-
-Start a task to run yardstick test case
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-If you want run a test case, then you need to use ``yardstick task start
-<test_case_path>`` this command support some parameters as below::
-
-   +---------------------+--------------------------------------------------+
-   | Parameters          | Detail                                           |
-   +=====================+==================================================+
-   | -d                  | show debug log of yardstick running              |
-   |                     |                                                  |
-   +---------------------+--------------------------------------------------+
-   | --task-args         | If you want to customize test case parameters,   |
-   |                     | use "--task-args" to pass the value. The format  |
-   |                     | is a json string with parameter key-value pair.  |
-   |                     |                                                  |
-   +---------------------+--------------------------------------------------+
-   | --task-args-file    | If you want to use yardstick                     |
-   |                     | env prepare command(or                           |
-   |                     | related API) to load the                         |
-   +---------------------+--------------------------------------------------+
-   | --parse-only        |                                                  |
-   |                     |                                                  |
-   |                     |                                                  |
-   +---------------------+--------------------------------------------------+
-   | --output-file \     | Specify where to output the log. if not pass,    |
-   | OUTPUT_FILE_PATH    | the default value is                             |
-   |                     | "/tmp/yardstick/yardstick.log"                   |
-   |                     |                                                  |
-   +---------------------+--------------------------------------------------+
-   | --suite \           | run a test suite, TEST_SUITE_PATH specify where  |
-   | TEST_SUITE_PATH     | the test suite locates                           |
-   |                     |                                                  |
-   +---------------------+--------------------------------------------------+
-
-
-Run Yardstick in a local environment
-------------------------------------
-
-We also have a guide about how to run Yardstick in a local environment.
-This work is contributed by Tapio Tallgren.
-You can find this guide at `How to run Yardstick in a local environment`_.
-
-
-Create a test suite for Yardstick
-------------------------------------
-
-A test suite in yardstick is a yaml file which include one or more test cases.
-Yardstick is able to support running test suite task, so you can customize your
-own test suite and run it in one task.
-
-``tests/opnfv/test_suites`` is the folder where Yardstick puts CI test suite.
-A typical test suite is like below (the ``fuel_test_suite.yaml`` example)::
-
-   ---
-   # Fuel integration test task suite
-
-   schema: "yardstick:suite:0.1"
-
-   name: "fuel_test_suite"
-   test_cases_dir: "samples/"
-   test_cases:
-   -
-    file_name: ping.yaml
-   -
-    file_name: iperf3.yaml
-
-As you can see, there are two test cases in the ``fuel_test_suite.yaml``. The
-``schema`` and the ``name`` must be specified. The test cases should be listed
-via the tag ``test_cases`` and their relative path is also marked via the tag
-``test_cases_dir``.
-
-Yardstick test suite also supports constraints and task args for each test
-case. Here is another sample (the ``os-nosdn-nofeature-ha.yaml`` example) to
-show this, which is digested from one big test suite::
-
-   ---
-
-   schema: "yardstick:suite:0.1"
-
-   name: "os-nosdn-nofeature-ha"
-   test_cases_dir: "tests/opnfv/test_cases/"
-   test_cases:
-   -
-     file_name: opnfv_yardstick_tc002.yaml
-   -
-     file_name: opnfv_yardstick_tc005.yaml
-   -
-     file_name: opnfv_yardstick_tc043.yaml
-        constraint:
-           installer: compass
-           pod: huawei-pod1
-        task_args:
-           huawei-pod1: '{"pod_info": "etc/yardstick/.../pod.yaml",
-           "host": "node4.LF","target": "node5.LF"}'
-
-As you can see in test case ``opnfv_yardstick_tc043.yaml``, there are two
-tags, ``constraint`` and ``task_args``. ``constraint`` is to specify which
-installer or pod it can be run in the CI environment. ``task_args`` is to
-specify the task arguments for each pod.
-
-All in all, to create a test suite in Yardstick, you just need to create a
-yaml file and add test cases, constraint or task arguments if necessary.
-
-
 Proxy Support
 -------------
 
@@ -790,7 +599,7 @@ stop and delete the container::
    sudo docker rm yardstick
 
 .. warning:: Be careful, the above ``rm`` command will delete the container
-completely. Everything on this container will be lost.
+  completely. Everything on this container will be lost.
 
 Then follow the previous instructions `Prepare the Yardstick container`_ to
 rebuild the Yardstick container.
@@ -804,4 +613,3 @@ References
 .. _`Cirros 0.3.5`: http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
 .. _`Ubuntu 16.04`: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
 .. _`Yardstick GUI demo`: https://www.youtube.com/watch?v=M3qbJDp6QBk
-.. _`How to run Yardstick in a local environment`: https://wiki.opnfv.org/display/yardstick/How+to+run+Yardstick+in+a+local+environment
diff --git a/docs/testing/user/userguide/05-operation.rst b/docs/testing/user/userguide/05-operation.rst
new file mode 100644 (file)
index 0000000..f390d16
--- /dev/null
@@ -0,0 +1,296 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Intel, Ericsson AB, Huawei Technologies Co. Ltd and others.
+
+..
+      Convention for heading levels in Yardstick:
+      =======  Heading 0 (reserved for the title in a document)
+      -------  Heading 1
+      ^^^^^^^  Heading 2
+      +++++++  Heading 3
+      '''''''  Heading 4
+      Avoid deeper levels because they do not render well.
+
+===============
+Yardstick Usage
+===============
+
+Once you have yardstick installed, you can start using it to run testcases
+immediately, through the CLI. You can also define and run new testcases and
+test suites. This chapter details basic usage (running testcases), as well as
+more advanced usage (creating your own testcases).
+
+Yardstick common CLI
+--------------------
+
+List test cases
+^^^^^^^^^^^^^^^
+
+``yardstick testcase list``: This command line would list all test cases in
+Yardstick. It would show like below::
+
+   +---------------------------------------------------------------------------------------
+   | Testcase Name         | Description
+   +---------------------------------------------------------------------------------------
+   | opnfv_yardstick_tc001 | Measure network throughput using pktgen
+   | opnfv_yardstick_tc002 | measure network latency using ping
+   | opnfv_yardstick_tc005 | Measure Storage IOPS, throughput and latency using fio.
+   ...
+   +---------------------------------------------------------------------------------------
+
+
+Show a test case config file
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Take opnfv_yardstick_tc002 for an example. This test case measure network
+latency. You just need to type in ``yardstick testcase show
+opnfv_yardstick_tc002``, and the console would show the config yaml of this
+test case:
+
+.. literalinclude::
+   ../../../../tests/opnfv/test_cases/opnfv_yardstick_tc002.yaml
+   :lines: 9-
+
+Run a Yardstick test case
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If you want run a test case, then you need to use ``yardstick task start
+<test_case_path>`` this command support some parameters as below:
+
+   +---------------------+--------------------------------------------------+
+   | Parameters          | Detail                                           |
+   +=====================+==================================================+
+   | -d                  | show debug log of yardstick running              |
+   |                     |                                                  |
+   +---------------------+--------------------------------------------------+
+   | --task-args         | If you want to customize test case parameters,   |
+   |                     | use "--task-args" to pass the value. The format  |
+   |                     | is a json string with parameter key-value pair.  |
+   |                     |                                                  |
+   +---------------------+--------------------------------------------------+
+   | --task-args-file    | If you want to use yardstick                     |
+   |                     | env prepare command(or                           |
+   |                     | related API) to load the                         |
+   +---------------------+--------------------------------------------------+
+   | --parse-only        |                                                  |
+   |                     |                                                  |
+   |                     |                                                  |
+   +---------------------+--------------------------------------------------+
+   | --output-file \     | Specify where to output the log. if not pass,    |
+   | OUTPUT_FILE_PATH    | the default value is                             |
+   |                     | "/tmp/yardstick/yardstick.log"                   |
+   |                     |                                                  |
+   +---------------------+--------------------------------------------------+
+   | --suite \           | run a test suite, TEST_SUITE_PATH specify where  |
+   | TEST_SUITE_PATH     | the test suite locates                           |
+   |                     |                                                  |
+   +---------------------+--------------------------------------------------+
+
+
+Run Yardstick in a local environment
+------------------------------------
+
+We also have a guide about `How to run Yardstick in a local environment`_.
+This work is contributed by Tapio Tallgren.
+
+Create a new testcase for Yardstick
+-----------------------------------
+
+As a user, you may want to define a new testcase in addition to the ones
+already available in Yardstick. This section will show you how to do this.
+
+Each testcase consists of two sections:
+
+* ``scenarios`` describes what will be done by the test
+* ``context`` describes the environment in which the test will be run.
+
+Defining the testcase scenarios
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+TODO
+
+Defining the testcase context(s)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Each testcase consists of one or more contexts, which describe the environment
+in which the testcase will be run.
+Current available contexts are:
+
+* ``Dummy``: this is a no-op context, and is used when there is no environment
+  to set up e.g. when testing whether OpenStack services are available
+* ``Node``: this context is used to perform operations on baremetal servers
+* ``Heat``: uses OpenStack to provision the required hosts, networks, etc.
+* ``Kubernetes``: uses Kubernetes to provision the resources required for the
+  test.
+
+Regardless of the context type, the ``context`` section of the testcase will
+consist of the following::
+
+   context:
+     name: demo
+     type: Dummy|Node|Heat|Kubernetes
+
+The content of the ``context`` section will vary based on the context type.
+
+Dummy Context
++++++++++++++
+
+No additional information is required for the Dummy context::
+
+  context:
+    name: my_context
+    type: Dummy
+
+Node Context
+++++++++++++
+
+TODO
+
+Heat Context
+++++++++++++
+
+In addition to ``name`` and ``type``, a Heat context requires the following
+arguments:
+
+* ``image``: the image to be used to boot VMs
+* ``flavor``: the flavor to be used for VMs in the context
+* ``user``: the username for connecting into the VMs
+* ``networks``: The networks to be created, networks are identified by name
+
+  * ``name``: network name (required)
+  * (TODO) Any optional attributes
+
+* ``servers``: The servers to be created
+
+  * ``name``: server name
+  * (TODO) Any optional attributes
+
+In addition to the required arguments, the following optional arguments can be
+passed to the Heat context:
+
+* ``placement_groups``:
+
+  * ``name``: the name of the placement group to be created
+  * ``policy``: either ``affinity`` or ``availability``
+* ``server_groups``:
+
+  * ``name``: the name of the server group
+  * ``policy``: either ``affinity`` or ``anti-affinity``
+
+Combining these elements together, a sample Heat context config looks like:
+
+.. literalinclude::
+   ../../../../yardstick/tests/integration/dummy-scenario-heat-context.yaml
+   :start-after: ---
+   :empahsise-lines: 14-
+
+Using exisiting HOT Templates
+'''''''''''''''''''''''''''''
+
+TODO
+
+Kubernetes Context
+++++++++++++++++++
+
+TODO
+
+Using multiple contexts in a testcase
++++++++++++++++++++++++++++++++++++++
+
+When using multiple contexts in a testcase, the ``context`` section is replaced
+by a ``contexts`` section, and each context is separated with a ``-`` line::
+
+  contexts:
+  -
+    name: context1
+    type: Heat
+    ...
+  -
+    name: context2
+    type: Node
+    ...
+
+
+Reusing a context
++++++++++++++++++
+
+Typically, a context is torn down after a testcase is run, however, the user
+may wish to keep an context intact after a testcase is complete.
+
+.. note::
+  This feature has been implemented for the Heat context only
+
+To keep or reuse a context, the ``flags`` option must be specified:
+
+* ``no_setup``: skip the deploy stage, and fetch the details of a deployed
+   context/Heat stack.
+* ``no_teardown``: skip the undeploy stage, thus keeping the stack intact for
+   the next test
+
+If either of these ``flags`` are ``True``, the context information must still
+be given. By default, these flags are disabled::
+
+  context:
+    name: mycontext
+    type: Heat
+    flags:
+      no_setup: True
+      no_teardown: True
+    ...
+
+Create a test suite for Yardstick
+---------------------------------
+
+A test suite in Yardstick is a .yaml file which includes one or more test
+cases. Yardstick is able to support running test suite task, so you can
+customize your own test suite and run it in one task.
+
+``tests/opnfv/test_suites`` is the folder where Yardstick puts CI test suite.
+A typical test suite is like below (the ``fuel_test_suite.yaml`` example):
+
+.. literalinclude::
+   ../../../../tests/opnfv/test_suites/fuel_test_suite.yaml
+   :lines: 9-
+
+As you can see, there are two test cases in the ``fuel_test_suite.yaml``. The
+``schema`` and the ``name`` must be specified. The test cases should be listed
+via the tag ``test_cases`` and their relative path is also marked via the tag
+``test_cases_dir``.
+
+Yardstick test suite also supports constraints and task args for each test
+case. Here is another sample (the ``os-nosdn-nofeature-ha.yaml`` example) to
+show this, which is digested from one big test suite::
+
+   ---
+
+   schema: "yardstick:suite:0.1"
+
+   name: "os-nosdn-nofeature-ha"
+   test_cases_dir: "tests/opnfv/test_cases/"
+   test_cases:
+   -
+     file_name: opnfv_yardstick_tc002.yaml
+   -
+     file_name: opnfv_yardstick_tc005.yaml
+   -
+     file_name: opnfv_yardstick_tc043.yaml
+        constraint:
+           installer: compass
+           pod: huawei-pod1
+        task_args:
+           huawei-pod1: '{"pod_info": "etc/yardstick/.../pod.yaml",
+           "host": "node4.LF","target": "node5.LF"}'
+
+As you can see in test case ``opnfv_yardstick_tc043.yaml``, there are two
+tags, ``constraint`` and ``task_args``. ``constraint`` is to specify which
+installer or pod it can be run in the CI environment. ``task_args`` is to
+specify the task arguments for each pod.
+
+All in all, to create a test suite in Yardstick, you just need to create a
+yaml file and add test cases, constraint or task arguments if necessary.
+
+References
+----------
+
+.. _`How to run Yardstick in a local environment`: https://wiki.opnfv.org/display/yardstick/How+to+run+Yardstick+in+a+local+environment
@@ -31,7 +31,7 @@ In this introduction we will install Storperf on Jump Host.
 
 
 Step 0: Environment preparation
->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
+-------------------------------
 
 Running Storperf on Jump Host
 Requirements:
@@ -47,24 +47,26 @@ environment and other dependencies:
 1. Make sure docker is installed.
 2. Make sure Keystone, Nova, Neutron, Glance, Heat are installed correctly.
 3. Make sure Jump Host have access to the OpenStack Controller API.
-4. Make sure Jump Host must have internet connectivity for downloading docker image.
-5. You need to know where to get basic openstack Keystone authorization info, such as
-   OS_PASSWORD, OS_PROJECT_NAME, OS_AUTH_URL, OS_USERNAME.
-6. To run a Storperf container, you need to have OpenStack Controller environment
-   variables defined and passed to Storperf container. The best way to do this is to
-   put environment variables in a "storperf_admin-rc" file. The storperf_admin-rc
-   should include credential environment variables at least:
-
-* OS_AUTH_URL
-* OS_USERNAME
-* OS_PASSWORD
-* OS_PROJECT_NAME
-* OS_PROJECT_ID
-* OS_USER_DOMAIN_ID
-
-*Yardstick* has a "prepare_storperf_admin-rc.sh" script which can be used to
-generate the "storperf_admin-rc" file, this script is located at
-test/ci/prepare_storperf_admin-rc.sh
+4. Make sure Jump Host must have internet connectivity for downloading docker
+   image.
+5. You need to know where to get basic openstack Keystone authorization info,
+   such as OS_PASSWORD, OS_PROJECT_NAME, OS_AUTH_URL, OS_USERNAME.
+6. To run a Storperf container, you need to have OpenStack Controller
+   environment variables defined and passed to Storperf container. The best way
+   to do this is to put environment variables in a "storperf_admin-rc" file.
+   The storperf_admin-rc should include credential environment variables at
+   least:
+
+   * OS_AUTH_URL
+   * OS_USERNAME
+   * OS_PASSWORD
+   * OS_PROJECT_NAME
+   * OS_PROJECT_ID
+   * OS_USER_DOMAIN_ID
+
+*Yardstick* has a ``prepare_storperf_admin-rc.sh`` script which can be used to
+generate the ``storperf_admin-rc`` file, this script is located at
+``test/ci/prepare_storperf_admin-rc.sh``
 
 ::
 
@@ -92,18 +94,18 @@ test/ci/prepare_storperf_admin-rc.sh
   echo "OS_USER_DOMAIN_ID="$USER_DOMAIN_ID >> ~/storperf_admin-rc
 
 
-The generated "storperf_admin-rc" file will be stored in the root directory. If
-you installed *Yardstick* using Docker, this file will be located in the
+The generated ``storperf_admin-rc`` file will be stored in the root directory.
+If you installed *Yardstick* using Docker, this file will be located in the
 container. You may need to copy it to the root directory of the Storperf
 deployed host.
 
 Step 1: Plug-in configuration file preparation
->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
+----------------------------------------------
 
 To install a plug-in, first you need to prepare a plug-in configuration file in
-YAML format and store it in the "plugin" directory. The plugin configration file
-work as the input of yardstick "plugin" command. Below is the Storperf plug-in
-configuration file sample:
+YAML format and store it in the "plugin" directory. The plugin configration
+file work as the input of yardstick "plugin" command. Below is the Storperf
+plug-in configuration file sample:
 ::
 
   ---
@@ -123,28 +125,28 @@ Here the Storperf will be installed on IP 192.168.23.2 which is the Jump Host
 in my local environment.
 
 Step 2: Plug-in install/remove scripts preparation
->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
+--------------------------------------------------
 
-In "yardstick/resource/scripts" directory, there are two folders: a "install"
-folder and a "remove" folder. You need to store the plug-in install/remove
-scripts in these two folders respectively.
+In ``yardstick/resource/scripts`` directory, there are two folders: an
+``install`` folder and a ``remove`` folder. You need to store the plug-in
+install/remove scripts in these two folders respectively.
 
 The detailed installation or remove operation should de defined in these two
 scripts. The name of both install and remove scripts should match the plugin-in
 name that you specified in the plug-in configuration file.
 
-For example, the install and remove scripts for Storperf are both named to
-"storperf.bash".
+For example, the install and remove scripts for Storperf are both named
+``storperf.bash``.
 
 Step 3: Install and remove Storperf
->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
+-----------------------------------
 
 To install Storperf, simply execute the following command::
 
   # Install Storperf
   yardstick plugin install plugin/storperf.yaml
 
-removing Storperf from yardstick
+Removing Storperf from yardstick
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 To remove Storperf, simply execute the following command::
@@ -24,7 +24,7 @@ Store Storperf Test Results into Community's InfluxDB
 =====================================================
 
 .. _Influxdb: https://git.opnfv.org/cgit/yardstick/tree/yardstick/dispatcher/influxdb.py
-.. _Mingjiang: limingjiang@huawei.com
+.. _Mingjiang: mailto:limingjiang@huawei.com
 .. _Visual: https://wiki.opnfv.org/download/attachments/6827660/tc074.PNG?version=1&modificationDate=1470298075000&api=v2
 .. _Login: http://testresults.opnfv.org/grafana/login
 
@@ -40,12 +40,13 @@ into community's InfluxDB:
    will be supported in the future.
 
 Our plan is to support rest-api in D release so that other testing projects can
-call the rest-api to use yardstick dispatcher service to push data to yardstick's
-influxdb database.
+call the rest-api to use yardstick dispatcher service to push data to
+Yardstick's InfluxDB database.
 
-For now, influxdb only support line protocol, and the json protocol is deprecated.
+For now, InfluxDB only supports line protocol, and the json protocol is
+deprecated.
 
-Take ping test case for example, the raw_result is json format like this:
+Take ping test case for example, the ``raw_result`` is json format like this:
 ::
 
     "benchmark": {
@@ -61,23 +62,24 @@ Take ping test case for example, the raw_result is json format like this:
     "runner_id": 2625
   }
 
-With the help of "influxdb_line_protocol", the json is transform to like below as a line string:
-::
+With the help of "influxdb_line_protocol", the json is transform to like below
+as a line string::
 
   'ping,deploy_scenario=unknown,host=athena.demo,installer=unknown,pod_name=unknown,
     runner_id=2625,scenarios=Ping,target=ares.demo,task_id=77755f38-1f6a-4667-a7f3-
       301c99963656,version=unknown rtt.ares=1.125 1470315409868094976'
 
-So, for data output of json format, you just need to transform json into line format and call
-influxdb api to post the data into the database. All this function has been implemented in Influxdb_.
-If you need support on this, please contact Mingjiang_.
+So, for data output of json format, you just need to transform json into line
+format and call influxdb api to post the data into the database. All this
+function has been implemented in Influxdb_. If you need support on this, please
+contact Mingjiang_.
 ::
 
   curl -i -XPOST 'http://104.197.68.199:8086/write?db=yardstick' --
     data-binary 'ping,deploy_scenario=unknown,host=athena.demo,installer=unknown, ...'
 
-Grafana will be used for visualizing the collected test data, which is shown in Visual_. Grafana
-can be accessed by Login_.
+Grafana will be used for visualizing the collected test data, which is shown in
+Visual_. Grafana can be accessed by Login_.
 
 
 .. image:: images/results_visualization.png
similarity index 95%
rename from docs/testing/user/userguide/07-grafana.rst
rename to docs/testing/user/userguide/08-grafana.rst
index 416857b..29bc23a 100644 (file)
@@ -108,8 +108,10 @@ There are 6 steps to go.
 
 5. When finished with all Grafana configuration changes in this temporary
    dashboard then chose "export" of the updated dashboard copy into a JSON file
-   and put it up for review in Gerrit, in file /yardstick/dashboard/Yardstick-TCxxx-yyyyyyyyyyyyy.
-   For instance a typical default name of the file would be "Yardstick-TC001 Copy-1234567891234".
+   and put it up for review in Gerrit, in file
+   ``/yardstick/dashboard/Yardstick-TCxxx-yyyyyyyyyyyyy``.
+   For instance a typical default name of the file would be
+   ``Yardstick-TC001 Copy-1234567891234``.
 
 6. Once you finish your dashboard, the next step is exporting the configuration
    file and propose a patch into Yardstick. Yardstick team will review and
similarity index 84%
rename from docs/testing/user/userguide/08-api.rst
rename to docs/testing/user/userguide/09-api.rst
index 2206c2a..f0ae398 100644 (file)
@@ -3,25 +3,29 @@
 .. http://creativecommons.org/licenses/by/4.0
 .. (c) OPNFV, Huawei Technologies Co.,Ltd and others.
 
+=====================
 Yardstick Restful API
-======================
+=====================
 
 
 Abstract
---------
+========
 
 Yardstick support restful API since Danube.
 
 
 Available API
--------------
+=============
 
 /yardstick/env/action
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+---------------------
 
-Description: This API is used to prepare Yardstick test environment. For Euphrates, it supports:
+Description: This API is used to prepare Yardstick test environment.
+For Euphrates, it supports:
 
-1. Prepare yardstick test environment, including set external network environment variable, load Yardstick VM images and create flavors;
+1. Prepare yardstick test environment, including setting the
+   ``EXTERNAL_NETWORK`` environment variable, load Yardstick VM images and
+   create flavors;
 2. Start an InfluxDB Docker container and config Yardstick output to InfluxDB;
 3. Start a Grafana Docker container and config it with the InfluxDB.
 
@@ -38,7 +42,8 @@ Example::
         'action': 'prepare_env'
     }
 
-This is an asynchronous API. You need to call /yardstick/asynctask API to get the task result.
+This is an asynchronous API. You need to call ``/yardstick/asynctask`` API to
+get the task result.
 
 
 Start and config an InfluxDB docker container
@@ -48,7 +53,8 @@ Example::
         'action': 'create_influxdb'
     }
 
-This is an asynchronous API. You need to call /yardstick/asynctask API to get the task result.
+This is an asynchronous API. You need to call ``/yardstick/asynctask`` API to
+get the task result.
 
 
 Start and config a Grafana docker container
@@ -58,11 +64,12 @@ Example::
         'action': 'create_grafana'
     }
 
-This is an asynchronous API. You need to call /yardstick/asynctask API to get the task result.
+This is an asynchronous API. You need to call ``/yardstick/asynctask`` API to
+get the task result.
 
 
 /yardstick/asynctask
-^^^^^^^^^^^^^^^^^^^^
+--------------------
 
 Description: This API is used to get the status of asynchronous tasks
 
@@ -84,7 +91,7 @@ NOTE::
 
 
 /yardstick/testcases
-^^^^^^^^^^^^^^^^^^^^
+--------------------
 
 Description: This API is used to list all released Yardstick test cases.
 
@@ -99,7 +106,7 @@ Example::
 
 
 /yardstick/testcases/release/action
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+-----------------------------------
 
 Description: This API is used to run a Yardstick released test case.
 
@@ -118,11 +125,12 @@ Example::
         }
     }
 
-This is an asynchronous API. You need to call /yardstick/results to get the result.
+This is an asynchronous API. You need to call ``/yardstick/results`` to get the
+result.
 
 
 /yardstick/testcases/samples/action
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+-----------------------------------
 
 Description: This API is used to run a Yardstick sample test case.
 
@@ -141,13 +149,15 @@ Example::
         }
     }
 
-This is an asynchronous API. You need to call /yardstick/results to get the result.
+This is an asynchronous API. You need to call ``/yardstick/results`` to get
+the result.
 
 
 /yardstick/testcases/<testcase_name>/docs
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+-----------------------------------------
 
-Description: This API is used to the documentation of a certain released test case.
+Description: This API is used to the documentation of a certain released test
+case.
 
 
 Method: GET
@@ -160,7 +170,7 @@ Example::
 
 
 /yardstick/testsuites/action
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+----------------------------
 
 Description: This API is used to run a Yardstick test suite.
 
@@ -179,11 +189,12 @@ Example::
         }
     }
 
-This is an asynchronous API. You need to call /yardstick/results to get the result.
+This is an asynchronous API. You need to call /yardstick/results to get the
+result.
 
 
 /yardstick/tasks/<task_id>/log
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+------------------------------
 
 Description: This API is used to get the real time log of test case execution.
 
@@ -198,9 +209,11 @@ Example::
 
 
 /yardstick/results
-^^^^^^^^^^^^^^^^^^
+------------------
 
-Description: This API is used to get the test results of tasks. If you call /yardstick/testcases/samples/action API, it will return a task id. You can use the returned task id to get the results by using this API.
+Description: This API is used to get the test results of tasks. If you call
+/yardstick/testcases/samples/action API, it will return a task id. You can use
+the returned task id to get the results by using this API.
 
 
 Method: GET
@@ -215,9 +228,10 @@ This API will return a list of test case result
 
 
 /api/v2/yardstick/openrcs
-^^^^^^^^^^^^^^^^^^^^^^^^^
+-------------------------
 
-Description: This API provides functionality of handling OpenStack credential file (openrc). For Euphrates, it supports:
+Description: This API provides functionality of handling OpenStack credential
+file (openrc). For Euphrates, it supports:
 
 1. Upload an openrc file for an OpenStack environment;
 2. Update an openrc;
@@ -268,7 +282,7 @@ Example::
 
 
 /api/v2/yardstick/openrcs/<openrc_id>
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+-------------------------------------
 
 Description: This API provides functionality of handling OpenStack credential file (openrc). For Euphrates, it supports:
 
@@ -294,9 +308,10 @@ Example::
 
 
 /api/v2/yardstick/pods
-^^^^^^^^^^^^^^^^^^^^^^
+----------------------
 
-Description: This API provides functionality of handling Yardstick pod file (pod.yaml). For Euphrates, it supports:
+Description: This API provides functionality of handling Yardstick pod file
+(pod.yaml). For Euphrates, it supports:
 
 1. Upload a pod file;
 
@@ -319,7 +334,7 @@ Example::
 
 
 /api/v2/yardstick/pods/<pod_id>
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+-------------------------------
 
 Description: This API provides functionality of handling Yardstick pod file (pod.yaml). For Euphrates, it supports:
 
@@ -343,9 +358,10 @@ Example::
 
 
 /api/v2/yardstick/images
-^^^^^^^^^^^^^^^^^^^^^^^^
+------------------------
 
-Description: This API is used to do some work related to Yardstick VM images. For Euphrates, it supports:
+Description: This API is used to do some work related to Yardstick VM images.
+For Euphrates, it supports:
 
 1. Load Yardstick VM images;
 
@@ -367,7 +383,7 @@ Example::
 
 
 /api/v2/yardstick/images/<image_id>
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+-----------------------------------
 
 Description: This API is used to do some work related to Yardstick VM images. For Euphrates, it supports:
 
@@ -391,9 +407,10 @@ Example::
 
 
 /api/v2/yardstick/tasks
-^^^^^^^^^^^^^^^^^^^^^^^
+-----------------------
 
-Description: This API is used to do some work related to yardstick tasks. For Euphrates, it supports:
+Description: This API is used to do some work related to yardstick tasks. For
+Euphrates, it supports:
 
 1. Create a Yardstick task;
 
@@ -416,7 +433,7 @@ Example::
 
 
 /api/v2/yardstick/tasks/<task_id>
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+--------------------------------
 
 Description: This API is used to do some work related to yardstick tasks. For Euphrates, it supports:
 
@@ -496,13 +513,15 @@ METHOD: DELETE
 Delete a task
 
 Example::
+
     http://<SERVER IP>:<PORT>/api/v2/yardstick/tasks/5g6g3e02-155a-4847-a5f8-154f1b31db8c
 
 
 /api/v2/yardstick/testcases
-^^^^^^^^^^^^^^^^^^^^^^^^^^^
+---------------------------
 
-Description: This API is used to do some work related to yardstick testcases. For Euphrates, it supports:
+Description: This API is used to do some work related to Yardstick testcases.
+For Euphrates, it supports:
 
 1. Upload a test case;
 2. Get all released test cases' information;
@@ -534,7 +553,7 @@ Example::
 
 
 /api/v2/yardstick/testcases/<case_name>
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+---------------------------------------
 
 Description: This API is used to do some work related to yardstick testcases. For Euphrates, it supports:
 
@@ -555,13 +574,15 @@ METHOD: DELETE
 
 Delete a certain test case
 Example::
+
     http://<SERVER IP>:<PORT>/api/v2/yardstick/testcases/opnfv_yardstick_tc002
 
 
 /api/v2/yardstick/testsuites
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+----------------------------
 
-Description: This API is used to do some work related to yardstick test suites. For Euphrates, it supports:
+Description: This API is used to do some work related to yardstick test suites.
+For Euphrates, it supports:
 
 1. Create a test suite;
 2. Get all test suites;
@@ -596,7 +617,7 @@ Example::
 
 
 /api/v2/yardstick/testsuites
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+----------------------------
 
 Description: This API is used to do some work related to yardstick test suites. For Euphrates, it supports:
 
@@ -622,9 +643,10 @@ Example::
 
 
 /api/v2/yardstick/projects
-^^^^^^^^^^^^^^^^^^^^^^^^^^
+--------------------------
 
-Description: This API is used to do some work related to yardstick test projects. For Euphrates, it supports:
+Description: This API is used to do some work related to Yardstick test
+projects. For Euphrates, it supports:
 
 1. Create a Yardstick project;
 2. Get all projects;
@@ -656,7 +678,7 @@ Example::
 
 
 /api/v2/yardstick/projects
-^^^^^^^^^^^^^^^^^^^^^^^^^^
+--------------------------
 
 Description: This API is used to do some work related to yardstick test projects. For Euphrates, it supports:
 
@@ -682,9 +704,10 @@ Example::
 
 
 /api/v2/yardstick/containers
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+----------------------------
 
-Description: This API is used to do some work related to Docker containers. For Euphrates, it supports:
+Description: This API is used to do some work related to Docker containers.
+For Euphrates, it supports:
 
 1. Create a Grafana Docker container;
 2. Create an InfluxDB Docker container;
@@ -721,7 +744,7 @@ Example::
 
 
 /api/v2/yardstick/containers/<container_id>
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+-------------------------------------------
 
 Description: This API is used to do some work related to Docker containers. For Euphrates, it supports:
 
@@ -1,3 +1,4 @@
+========================
 Yardstick User Interface
 ========================
 
@@ -6,14 +7,14 @@ in table format and also values pinned on to a graph.
 
 
 Command
--------
+=======
 ::
 
     yardstick report generate <task-ID> <testcase-filename>
 
 
 Description
------------
+===========
 
 1. When the command is triggered using the task-id and the testcase
 name provided the respective values are retrieved from the
@@ -29,10 +29,10 @@ to run the :term:`VNF`. The exploitation of Deep Packet Inspection
 assumptions:
 
 * third parties unaffiliated with either source or recipient are able to
-inspect each IP packet’s payload
+  inspect each IP packet's payload
 
-* the classifier knows the relevant syntax of each applications packet
-payloads (protocol signatures, data patterns, etc.).
+* the classifier knows the relevant syntax of each application's packet
+  payloads (protocol signatures, data patterns, etc.).
 
 The proposed :term:`DPI` based approach will only use an indicative, small
 number of the initial packets from each flow in order to identify the content
@@ -47,14 +47,14 @@ Concepts
 ========
 
 * *Traffic Inspection*: The process of packet analysis and application
-identification of network traffic that passes through the :term:`VTC`.
+  identification of network traffic that passes through the :term:`VTC`.
 
 * *Traffic Forwarding*: The process of packet forwarding from an incoming
-network interface to a pre-defined outgoing network interface.
+  network interface to a pre-defined outgoing network interface.
 
 * *Traffic Rule Application*: The process of packet tagging, based on a
-predefined set of rules. Packet tagging may include e.g. Type of Service
-(:term:`ToS`) field modification.
+  predefined set of rules. Packet tagging may include e.g. Type of Service
+  (:term:`ToS`) field modification.
 
 Architecture
 ============
@@ -3,11 +3,12 @@
 .. http://creativecommons.org/licenses/by/4.0
 .. (c) OPNFV, 2016-2017 Intel Corporation.
 
+===================================
 Network Services Benchmarking (NSB)
 ===================================
 
 Abstract
---------
+========
 
 .. _Yardstick: https://wiki.opnfv.org/yardstick
 
@@ -15,10 +16,10 @@ This chapter provides an overview of the NSB, a contribution to OPNFV
 Yardstick_ from Intel.
 
 Overview
---------
+========
 
-The goal of NSB is to Extend Yardstick to perform real world VNFs and NFVi Characterization and
-benchmarking with repeatable and deterministic methods.
+The goal of NSB is to Extend Yardstick to perform real world VNFs and NFVi
+Characterization and benchmarking with repeatable and deterministic methods.
 
 The Network Service Benchmarking (NSB) extends the yardstick framework to do
 VNF characterization and benchmarking in three different execution
@@ -70,17 +71,17 @@ NSB extension includes:
         - VNF KPIs, e.g., packet_in, packet_drop, packet_fwd etc
 
 Architecture
-------------
+============
 
 The Network Service (NS) defines a set of Virtual Network Functions (VNF)
 connected together using NFV infrastructure.
 
 The Yardstick NSB extension can support multiple VNFs created by different
 vendors including traffic generators. Every VNF being tested has its
-own data model. The Network service defines a VNF modelling on base of performed
-network functionality. The part of the data model is a set of the configuration
-parameters, number of connection points used and flavor including core and
-memory amount.
+own data model. The Network service defines a VNF modelling on base of
+performed network functionality. The part of the data model is a set of the
+configuration parameters, number of connection points used and flavor including
+core and memory amount.
 
 The ETSI defines a Network Service as a set of configurable VNFs working in
 some NFV Infrastructure connecting each other using Virtual Links available
@@ -112,7 +113,7 @@ Network Service framework performs the necessary test steps. It may involve
     - Read the KPI's provided by particular VNF
 
 Components of Network Service
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+-----------------------------
 
   * *Models for Network Service benchmarking*: The Network Service benchmarking
     requires the proper modelling approach. The NSB provides models using Python
@@ -132,9 +133,9 @@ Components of Network Service
   same way as other VNFs being a part of benchmarked network service.
   Same as other VNFs the traffic generator are instantiated and terminated.
 
-  Every traffic generator has own configuration defined as a traffic profile and
-  a set of KPIs supported. The python models for TG is extended by specific calls
-  to listen and generate traffic.
+  Every traffic generator has own configuration defined as a traffic profile
+  and a set of KPIs supported. The python models for TG is extended by
+  specific calls to listen and generate traffic.
 
   * *The stateless TREX traffic generator*: The main traffic generator used as
     Network Service stimulus is open source TREX tool.
@@ -165,7 +166,7 @@ Components of Network Service
       - RFC2544 throughput for various loss rate defined (1% is a default)
 
 Graphical Overview
-------------------
+==================
 
 NSB Testing with yardstick framework  facilitate performance testing of various
 VNFs provided.
@@ -192,7 +193,7 @@ VNFs provided.
               Figure 1: Network Service - 2 server configuration
 
 VNFs supported for chracterization:
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+-----------------------------------
 
 1. CGNAPT - Carrier Grade Network Address and port Translation
 2. vFW - Virtual Firewall
@@ -3,11 +3,12 @@
 .. http://creativecommons.org/licenses/by/4.0
 .. (c) OPNFV, 2016-2017 Intel Corporation.
 
+=====================================
 Yardstick - NSB Testing -Installation
 =====================================
 
 Abstract
---------
+========
 
 The Network Service Benchmarking (NSB) extends the yardstick framework to do
 VNF characterization and benchmarking in three different execution
@@ -26,79 +27,65 @@ The steps needed to run Yardstick with NSB testing are:
 
 
 Prerequisites
--------------
+=============
 
 Refer chapter Yardstick Installation for more information on yardstick
 prerequisites
 
-Several prerequisites are needed for Yardstick(VNF testing):
-
-  - Python Modules: pyzmq, pika.
-
-  - flex
-
-  - bison
-
-  - build-essential
-
-  - automake
-
-  - libtool
+Several prerequisites are needed for Yardstick (VNF testing):
 
-  - librabbitmq-dev
-
-  - rabbitmq-server
-
-  - collectd
-
-  - intel-cmt-cat
+  * Python Modules: pyzmq, pika.
+  * flex
+  * bison
+  * build-essential
+  * automake
+  * libtool
+  * librabbitmq-dev
+  * rabbitmq-server
+  * collectd
+  * intel-cmt-cat
 
 Hardware & Software Ingredients
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+-------------------------------
 
 SUT requirements:
 
 
-   +-----------+--------------------+
-   | Item      | Description        |
-   +-----------+--------------------+
-   | Memory    | Min 20GB           |
-   +-----------+--------------------+
-   | NICs      | 2 x 10G            |
-   +-----------+--------------------+
-   | OS        | Ubuntu 16.04.3 LTS |
-   +-----------+--------------------+
-   | kernel    | 4.4.0-34-generic   |
-   +-----------+--------------------+
-   | DPDK      | 17.02              |
-   +-----------+--------------------+
+   ======= ===================
+   Item    Description
+   ======= ===================
+   Memory  Min 20GB
+   NICs    2 x 10G
+   OS      Ubuntu 16.04.3 LTS
+   kernel  4.4.0-34-generic
+   DPDK    17.02
+   ======= ===================
 
 Boot and BIOS settings:
 
 
-   +------------------+---------------------------------------------------+
-   | Boot settings    | default_hugepagesz=1G hugepagesz=1G hugepages=16  |
-   |                  | hugepagesz=2M hugepages=2048 isolcpus=1-11,22-33  |
-   |                  | nohz_full=1-11,22-33 rcu_nocbs=1-11,22-33         |
-   |                  | iommu=on iommu=pt intel_iommu=on                  |
-   |                  | Note: nohz_full and rcu_nocbs is to disable Linux |
-   |                  | kernel interrupts                                 |
-   +------------------+---------------------------------------------------+
-   |BIOS              | CPU Power and Performance Policy <Performance>    |
-   |                  | CPU C-state Disabled                              |
-   |                  | CPU P-state Disabled                              |
-   |                  | Enhanced Intel® Speedstep® Tech Disabled          |
-   |                  | Hyper-Threading Technology (If supported) Enabled |
-   |                  | Virtualization Techology Enabled                  |
-   |                  | Intel(R) VT for Direct I/O Enabled                |
-   |                  | Coherency Enabled                                 |
-   |                  | Turbo Boost Disabled                              |
-   +------------------+---------------------------------------------------+
+   ============= =================================================
+   Boot settings default_hugepagesz=1G hugepagesz=1G hugepages=16
+                 hugepagesz=2M hugepages=2048 isolcpus=1-11,22-33
+                 nohz_full=1-11,22-33 rcu_nocbs=1-11,22-33
+                 iommu=on iommu=pt intel_iommu=on
+                 Note: nohz_full and rcu_nocbs is to disable Linux
+                 kernel interrupts
+   BIOS          CPU Power and Performance Policy <Performance>
+                 CPU C-state Disabled
+                 CPU P-state Disabled
+                 Enhanced Intel® Speedstep® Tech Disabl
+                 Hyper-Threading Technology (If supported) Enabled
+                 Virtualization Techology Enabled
+                 Intel(R) VT for Direct I/O Enabled
+                 Coherency Enabled
+                 Turbo Boost Disabled
+   ============= =================================================
 
 
 
 Install Yardstick (NSB Testing)
--------------------------------
+===============================
 
 Download the source code and install Yardstick from it
 
@@ -148,6 +135,15 @@ Ansible:
   ansible_user=root
   ansible_pass=root
 
+.. note::
+
+   SSH access without password needs to be configured for all your nodes defined in
+   ``yardstick-install-inventory.ini`` file.
+   If you want to use password authentication you need to install sshpass
+
+   .. code-block:: console
+
+     sudo -EH apt-get install sshpass
 
 To execute an installation for a Bare-Metal or a Standalone context:
 
@@ -168,11 +164,12 @@ Above command setup docker with latest yardstick code. To execute
 
   docker exec -it yardstick bash
 
-It will also automatically download all the packages needed for NSB Testing setup.
-Refer chapter :doc:`04-installation` for more on docker **Install Yardstick using Docker (recommended)**
+It will also automatically download all the packages needed for NSB Testing
+setup. Refer chapter :doc:`04-installation` for more on docker
+**Install Yardstick using Docker (recommended)**
 
 System Topology:
-----------------
+================
 
 .. code-block:: console
 
@@ -187,13 +184,15 @@ System Topology:
 
 
 Environment parameters and credentials
---------------------------------------
+======================================
 
 Config yardstick conf
-^^^^^^^^^^^^^^^^^^^^^
+---------------------
 
-If user did not run 'yardstick env influxdb' inside the container, which will generate
-correct yardstick.conf, then create the config file manually (run inside the container):
+If user did not run 'yardstick env influxdb' inside the container, which will
+generate correct ``yardstick.conf``, then create the config file manually (run
+inside the container):
+::
 
     cp ./etc/yardstick/yardstick.conf.sample /etc/yardstick/yardstick.conf
     vi /etc/yardstick/yardstick.conf
@@ -219,11 +218,11 @@ Add trex_path, trex_client_lib and bin_path in 'nsb' section.
   trex_client_lib=/opt/nsb_bin/trex_client/stl
 
 Run Yardstick - Network Service Testcases
------------------------------------------
+=========================================
 
 
 NS testing - using yardstick CLI
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+--------------------------------
 
   See :doc:`04-installation`
 
@@ -236,13 +235,13 @@ NS testing - using yardstick CLI
   yardstick --debug task start yardstick/samples/vnf_samples/nsut/<vnf>/<test case>
 
 Network Service Benchmarking - Bare-Metal
------------------------------------------
+=========================================
 
 Bare-Metal Config pod.yaml describing Topology
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+----------------------------------------------
 
-Bare-Metal 2-Node setup:
-########################
+Bare-Metal 2-Node setup
+^^^^^^^^^^^^^^^^^^^^^^^
 .. code-block:: console
 
   +----------+              +----------+
@@ -254,8 +253,8 @@ Bare-Metal 2-Node setup:
   +----------+              +----------+
   trafficgen_1                   vnf
 
-Bare-Metal 3-Node setup - Correlated Traffic:
-#############################################
+Bare-Metal 3-Node setup - Correlated Traffic
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 .. code-block:: console
 
   +----------+              +----------+            +------------+
@@ -270,7 +269,7 @@ Bare-Metal 3-Node setup - Correlated Traffic:
 
 
 Bare-Metal Config pod.yaml
-^^^^^^^^^^^^^^^^^^^^^^^^^^
+--------------------------
 Before executing Yardstick test cases, make sure that pod.yaml reflects the
 topology and update all the required fields.::
 
@@ -345,13 +344,13 @@ topology and update all the required fields.::
 
 
 Network Service Benchmarking - Standalone Virtualization
---------------------------------------------------------
+========================================================
 
-SR-IOV:
-^^^^^^^
+SR-IOV
+------
 
 SR-IOV Pre-requisites
-#####################
+^^^^^^^^^^^^^^^^^^^^^
 
 On Host:
  a) Create a bridge for VM to connect to external network
@@ -387,10 +386,10 @@ On Host:
 
 
 SR-IOV Config pod.yaml describing Topology
-##########################################
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 SR-IOV 2-Node setup:
-####################
+^^^^^^^^^^^^^^^^^^^^
 .. code-block:: console
 
                                +--------------------+
@@ -418,7 +417,7 @@ SR-IOV 2-Node setup:
 
 
 SR-IOV 3-Node setup - Correlated Traffic
-########################################
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 .. code-block:: console
 
                                +--------------------+
@@ -454,7 +453,7 @@ topology and update all the required fields.
 .. note:: Update all the required fields like ip, user, password, pcis, etc...
 
 SR-IOV Config pod_trex.yaml
-###########################
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 .. code-block:: YAML
 
@@ -483,7 +482,7 @@ SR-IOV Config pod_trex.yaml
                 local_mac: "00:00.00:00:00:02"
 
 SR-IOV Config host_sriov.yaml
-#############################
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 .. code-block:: YAML
 
@@ -495,7 +494,8 @@ SR-IOV Config host_sriov.yaml
        user: ""
        password: ""
 
-SR-IOV testcase update: ``<yardstick>/samples/vnf_samples/nsut/vfw/tc_sriov_rfc2544_ipv4_1rule_1flow_64B_trex.yaml``
+SR-IOV testcase update:
+``<yardstick>/samples/vnf_samples/nsut/vfw/tc_sriov_rfc2544_ipv4_1rule_1flow_64B_trex.yaml``
 
 Update "contexts" section
 """""""""""""""""""""""""
@@ -542,11 +542,11 @@ Update "contexts" section
 
 
 
-OVS-DPDK:
-^^^^^^^^^
+OVS-DPDK
+--------
 
 OVS-DPDK Pre-requisites
-#######################
+^^^^^^^^^^^^^^^^^^^^^^^
 
 On Host:
  a) Create a bridge for VM to connect to external network
@@ -585,10 +585,10 @@ On Host:
 
 
 OVS-DPDK Config pod.yaml describing Topology
-############################################
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-OVS-DPDK 2-Node setup:
-######################
+OVS-DPDK 2-Node setup
+^^^^^^^^^^^^^^^^^^^^^
 
 
 .. code-block:: console
@@ -619,7 +619,7 @@ OVS-DPDK 2-Node setup:
 
 
 OVS-DPDK 3-Node setup - Correlated Traffic
-##########################################
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 .. code-block:: console
 
@@ -659,7 +659,7 @@ topology and update all the required fields.
 .. note:: Update all the required fields like ip, user, password, pcis, etc...
 
 OVS-DPDK Config pod_trex.yaml
-#############################
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 .. code-block:: YAML
 
@@ -687,7 +687,7 @@ OVS-DPDK Config pod_trex.yaml
               local_mac: "00:00.00:00:00:02"
 
 OVS-DPDK Config host_ovs.yaml
-#############################
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 .. code-block:: YAML
 
@@ -699,7 +699,8 @@ OVS-DPDK Config host_ovs.yaml
        user: ""
        password: ""
 
-ovs_dpdk testcase update: ``<yardstick>/samples/vnf_samples/nsut/vfw/tc_ovs_rfc2544_ipv4_1rule_1flow_64B_trex.yaml``
+ovs_dpdk testcase update:
+``<yardstick>/samples/vnf_samples/nsut/vfw/tc_ovs_rfc2544_ipv4_1rule_1flow_64B_trex.yaml``
 
 Update "contexts" section
 """""""""""""""""""""""""
@@ -757,7 +758,7 @@ Update "contexts" section
 
 
 Network Service Benchmarking - OpenStack with SR-IOV support
-------------------------------------------------------------
+============================================================
 
 This section describes how to run a Sample VNF test case, using Heat context,
 with SR-IOV. It also covers how to install OpenStack in Ubuntu 16.04, using
@@ -765,7 +766,7 @@ DevStack, with SR-IOV support.
 
 
 Single node OpenStack setup with external TG
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+--------------------------------------------
 
 .. code-block:: console
 
@@ -796,7 +797,7 @@ Single node OpenStack setup with external TG
 
 
 Host pre-configuration
-######################
+^^^^^^^^^^^^^^^^^^^^^^
 
 .. warning:: The following configuration requires sudo access to the system. Make
   sure that your user have the access.
@@ -896,7 +897,7 @@ Setup SR-IOV ports on the host:
 
 
 DevStack installation
-#####################
+^^^^^^^^^^^^^^^^^^^^^
 
 Use official `Devstack <https://docs.openstack.org/devstack/pike/>`_
 documentation to install OpenStack on a host. Please note, that stable
@@ -918,7 +919,7 @@ Start the devstack installation on a host.
 
 
 TG host configuration
-#####################
+^^^^^^^^^^^^^^^^^^^^^
 
 Yardstick automatically install and configure Trex traffic generator on TG
 host based on provided POD file (see below). Anyway, it's recommended to check
@@ -927,7 +928,7 @@ the manual at https://trex-tgn.cisco.com/trex/doc/trex_manual.html.
 
 
 Run the Sample VNF test case
-############################
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 There is an example of Sample VNF test case ready to be executed in an
 OpenStack environment with SR-IOV support: ``samples/vnf_samples/nsut/vfw/
@@ -952,7 +953,7 @@ context using steps described in `NS testing - using yardstick CLI`_ section.
 
 
 Multi node OpenStack TG and VNF setup (two nodes)
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+-------------------------------------------------
 
 .. code-block:: console
 
@@ -983,14 +984,14 @@ Multi node OpenStack TG and VNF setup (two nodes)
 
 
 Controller/Compute pre-configuration
-####################################
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Pre-configuration of the controller and compute hosts are the same as
 described in `Host pre-configuration`_ section. Follow the steps in the section.
 
 
 DevStack configuration
-######################
+^^^^^^^^^^^^^^^^^^^^^^
 
 Use official `Devstack <https://docs.openstack.org/devstack/pike/>`_
 documentation to install OpenStack on a host. Please note, that stable
@@ -1017,7 +1018,7 @@ Start the devstack installation on the controller and compute hosts.
 
 
 Run the sample vFW TC
-#####################
+^^^^^^^^^^^^^^^^^^^^^
 
 Install yardstick using `Install Yardstick (NSB Testing)`_ steps for OpenStack
 context.
@@ -1034,62 +1035,31 @@ and the following yardtick command line arguments:
 
 
 Enabling other Traffic generator
---------------------------------
+================================
 
-IxLoad:
-^^^^^^^
+IxLoad
+^^^^^^
 
-1. Software needed: IxLoadAPI ``<IxLoadTclApi verson>Linux64.bin.tgz and <IxOS
-   version>Linux64.bin.tar.gz`` (Download from ixia support site)
-   Install - ``<IxLoadTclApi verson>Linux64.bin.tgz & <IxOS version>Linux64.bin.tar.gz``
-   If the installation was not done inside the container, after installing the IXIA client,
-   check /opt/ixia/ixload/<ver>/bin/ixloadpython and make sure you can run this cmd
-   inside the yardstick container. Usually user is required to copy or link /opt/ixia/python/<ver>/bin/ixiapython
-   to /usr/bin/ixiapython<ver> inside the container.
+1. Software needed: IxLoadAPI ``<IxLoadTclApi verson>Linux64.bin.tgz`` and
+   ``<IxOS version>Linux64.bin.tar.gz`` (Download from ixia support site)
+   Install - ``<IxLoadTclApi verson>Linux64.bin.tgz`` and
+   ``<IxOS version>Linux64.bin.tar.gz``
+   If the installation was not done inside the container, after installing
+   the IXIA client, check ``/opt/ixia/ixload/<ver>/bin/ixloadpython`` and make
+   sure you can run this cmd inside the yardstick container. Usually user is
+   required to copy or link ``/opt/ixia/python/<ver>/bin/ixiapython`` to
+   ``/usr/bin/ixiapython<ver>`` inside the container.
 
-2. Update pod_ixia.yaml file with ixia details.
+2. Update ``pod_ixia.yaml`` file with ixia details.
 
   .. code-block:: console
 
     cp <repo>/etc/yardstick/nodes/pod.yaml.nsb.sample.ixia etc/yardstick/nodes/pod_ixia.yaml
 
-  Config pod_ixia.yaml
+  Config ``pod_ixia.yaml``
 
-  .. code-block:: yaml
-
-
-      nodes:
-          -
-            name: trafficgen_1
-            role: IxNet
-            ip: 1.2.1.1 #ixia machine ip
-            user: user
-            password: r00t
-            key_filename: /root/.ssh/id_rsa
-            tg_config:
-                ixchassis: "1.2.1.7" #ixia chassis ip
-                tcl_port: "8009" # tcl server port
-                lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
-                root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
-                py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
-                py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
-                dut_result_dir: "/mnt/ixia"
-                version: 8.1
-            interfaces:
-                xe0:  # logical name from topology.yaml and vnfd.yaml
-                    vpci: "2:5" # Card:port
-                    driver:    "none"
-                    dpdk_port_num: 0
-                    local_ip: "152.16.100.20"
-                    netmask:   "255.255.0.0"
-                    local_mac: "00:98:10:64:14:00"
-                xe1:  # logical name from topology.yaml and vnfd.yaml
-                    vpci: "2:6" # [(Card, port)]
-                    driver:    "none"
-                    dpdk_port_num: 1
-                    local_ip: "152.40.40.20"
-                    netmask:   "255.255.0.0"
-                    local_mac: "00:98:28:28:14:00"
+  .. literalinclude:: code/pod_ixia.yaml
+     :language: console
 
   for sriov/ovs_dpdk pod files, please refer to above Standalone Virtualization for ovs-dpdk/sriov configuration
 
@@ -1097,23 +1067,24 @@ IxLoad:
    You will also need to configure the IxLoad machine to start the IXIA
    IxosTclServer. This can be started like so:
 
-   - Connect to the IxLoad machine using RDP
-   - Go to:
+   * Connect to the IxLoad machine using RDP
+   * Go to:
      ``Start->Programs->Ixia->IxOS->IxOS 8.01-GA-Patch1->Ixia Tcl Server IxOS 8.01-GA-Patch1``
      or
      ``"C:\Program Files (x86)\Ixia\IxOS\8.01-GA-Patch1\ixTclServer.exe"``
 
-4. Create a folder "Results" in c:\ and share the folder on the network.
+4. Create a folder ``Results`` in c:\ and share the folder on the network.
 
-5. execute testcase in samplevnf folder.
-   eg ``<repo>/samples/vnf_samples/nsut/vfw/tc_baremetal_http_ixload_1b_Requests-65000_Concurrency.yaml``
+5. Execute testcase in samplevnf folder e.g.
+   ``<repo>/samples/vnf_samples/nsut/vfw/tc_baremetal_http_ixload_1b_Requests-65000_Concurrency.yaml``
 
-IxNetwork:
-^^^^^^^^^^
+IxNetwork
+---------
 
-1. Software needed: ``IxNetworkAPI<ixnetwork verson>Linux64.bin.tgz`` (Download from ixia support site)
-                     Install - ``IxNetworkAPI<ixnetwork verson>Linux64.bin.tgz``
-2. Update pod_ixia.yaml file with ixia details.
+IxNetwork testcases use IxNetwork API Python Bindings module, which is
+installed as part of the requirements of the project.
+
+1. Update ``pod_ixia.yaml`` file with ixia details.
 
   .. code-block:: console
 
@@ -1121,50 +1092,19 @@ IxNetwork:
 
   Config pod_ixia.yaml
 
-  .. code-block:: yaml
-
-      nodes:
-          -
-            name: trafficgen_1
-            role: IxNet
-            ip: 1.2.1.1 #ixia machine ip
-            user: user
-            password: r00t
-            key_filename: /root/.ssh/id_rsa
-            tg_config:
-                ixchassis: "1.2.1.7" #ixia chassis ip
-                tcl_port: "8009" # tcl server port
-                lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
-                root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
-                py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
-                py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
-                dut_result_dir: "/mnt/ixia"
-                version: 8.1
-            interfaces:
-                xe0:  # logical name from topology.yaml and vnfd.yaml
-                    vpci: "2:5" # Card:port
-                    driver:    "none"
-                    dpdk_port_num: 0
-                    local_ip: "152.16.100.20"
-                    netmask:   "255.255.0.0"
-                    local_mac: "00:98:10:64:14:00"
-                xe1:  # logical name from topology.yaml and vnfd.yaml
-                    vpci: "2:6" # [(Card, port)]
-                    driver:    "none"
-                    dpdk_port_num: 1
-                    local_ip: "152.40.40.20"
-                    netmask:   "255.255.0.0"
-                    local_mac: "00:98:28:28:14:00"
+  .. literalinclude:: code/pod_ixia.yaml
+     :language: console
 
   for sriov/ovs_dpdk pod files, please refer to above Standalone Virtualization for ovs-dpdk/sriov configuration
 
-3. Start IxNetwork TCL Server
+2. Start IxNetwork TCL Server
    You will also need to configure the IxNetwork machine to start the IXIA
    IxNetworkTclServer. This can be started like so:
 
-    - Connect to the IxNetwork machine using RDP
-    - Go to:     ``Start->Programs->Ixia->IxNetwork->IxNetwork 7.21.893.14 GA->IxNetworkTclServer`` (or ``IxNetworkApiServer``)
-
-4. execute testcase in samplevnf folder.
-   eg ``<repo>/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia.yaml``
+    * Connect to the IxNetwork machine using RDP
+    * Go to:
+      ``Start->Programs->Ixia->IxNetwork->IxNetwork 7.21.893.14 GA->IxNetworkTclServer``
+      (or ``IxNetworkApiServer``)
 
+3. Execute testcase in samplevnf folder e.g.
+   ``<repo>/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_ixia.yaml``
@@ -23,13 +23,14 @@ provider/external networks.
 Provider networks
 ^^^^^^^^^^^^^^^^^
 
-The VNFs require a clear L2 connect to the external network in order to generate
-realistic traffic from multiple address ranges and port
+The VNFs require a clear L2 connect to the external network in order to
+generate realistic traffic from multiple address ranges and ports.
 
-In order to prevent Neutron from filtering traffic we have to disable Neutron Port Security.
-We also disable DHCP on the data ports because we are binding the ports to DPDK and do not need
-DHCP addresses.  We also disable gateways because multiple default gateways can prevent SSH access
-to the VNF from the floating IP.  We only want a gateway on the mgmt network
+In order to prevent Neutron from filtering traffic we have to disable Neutron
+Port Security. We also disable DHCP on the data ports because we are binding
+the ports to DPDK and do not need DHCP addresses.  We also disable gateways
+because multiple default gateways can prevent SSH access to the VNF from the
+floating IP.  We only want a gateway on the mgmt network
 
 .. code-block:: yaml
 
@@ -42,8 +43,9 @@ to the VNF from the floating IP.  We only want a gateway on the mgmt network
 Heat Topologies
 ^^^^^^^^^^^^^^^
 
-By default Heat will attach every node to every Neutron network that is created.
-For scale-out tests we do not want to attach every node to every network.
+By default Heat will attach every node to every Neutron network that is
+created. For scale-out tests we do not want to attach every node to every
+network.
 
 For each node you can specify which ports are on which network using the
 network_ports dictionary.
@@ -85,11 +87,11 @@ In this example we have ``TRex xe0 <-> xe0 VNF xe1 <-> xe0 UDP_Replay``
 Collectd KPIs
 -------------
 
-NSB can collect KPIs from collected.  We have support for various plugins enabled by the
-Barometer project.
+NSB can collect KPIs from collected.  We have support for various plugins
+enabled by the Barometer project.
 
-The default yardstick-samplevnf has collectd installed.   This allows for collecting KPIs
-from the VNF.
+The default yardstick-samplevnf has collectd installed. This allows for
+collecting KPIs from the VNF.
 
 Collecting KPIs from the NFVi is more complicated and requires manual setup.
 We assume that collectd is not installed on the compute nodes.
@@ -126,40 +128,80 @@ To collectd KPIs from the NFVi compute nodes:
 
 
 Scale-Up
-------------------
+--------
 
 VNFs performance data with scale-up
 
-  * Helps to figure out optimal number of cores specification in the Virtual Machine template creation or VNF
+  * Helps to figure out optimal number of cores specification in the Virtual
+    Machine template creation or VNF
   * Helps in comparison between different VNF vendor offerings
-  * Better the scale-up index, indicates the performance scalability of a particular solution
+  * Better the scale-up index, indicates the performance scalability of a
+    particular solution
 
 Heat
 ^^^^
-
-For VNF scale-up tests we increase the number for VNF worker threads.  In the case of VNFs
-we also need to increase the number of VCPUs and memory allocated to the VNF.
+For VNF scale-up tests we increase the number for VNF worker threads.  In the
+case of VNFs we also need to increase the number of VCPUs and memory allocated
+to the VNF.
 
 An example scale-up Heat testcase is:
 
+.. literalinclude:: /submodules/yardstick/samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_scale-up.yaml
+   :language: yaml
+
+This testcase template requires specifying the number of VCPUs, Memory and Ports.
+We set the VCPUs and memory using the ``--task-args`` options
+
 .. code-block:: console
 
-  <repo>/samples/vnf_samples/nsut/acl/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_scale_up.yaml
+  yardstick task start --task-args='{"mem": 10480, "vcpus": 4, "ports": 2}' \
+  samples/vnf_samples/nsut/vfw/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_scale-up.yaml
 
-This testcase template requires specifying the number of VCPUs and Memory.
-We set the VCPUs and memory using the --task-args options
+In order to support ports scale-up, traffic and topology templates need to be used in testcase.
 
-.. code-block:: console
+A example topology template is:
+
+.. literalinclude:: /submodules/yardstick/samples/vnf_samples/nsut/vfw/vfw-tg-topology-scale-up.yaml
+   :language: yaml
+
+This template has ``vports`` as an argument. To pass this argument it needs to
+be configured in ``extra_args`` scenario definition. Please note that more
+argument can be defined in that section. All of them will be passed to topology
+and traffic profile templates
+
+For example:
+
+.. code-block:: yaml
 
-  yardstick --debug task start --task-args='{"mem": 20480, "vcpus": 10}'   samples/vnf_samples/nsut/acl/tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_scale_up.yaml
+   schema: yardstick:task:0.1
+   scenarios:
+   - type: NSPerf
+     traffic_profile: ../../traffic_profiles/ipv4_throughput-scale-up.yaml
+     extra_args:
+       vports: {{ vports }}
+     topology: vfw-tg-topology-scale-up.yaml
+
+A example traffic profile template is:
+
+.. literalinclude:: /submodules/yardstick/samples/vnf_samples/traffic_profiles/ipv4_throughput-scale-up.yaml
+   :language: yaml
+
+There is an option to provide predefined config for SampleVNFs. Path to config
+file may by specified in ``vnf_config`` scenario section.
+
+.. code-block:: yaml
+
+   vnf__0:
+      rules: acl_1rule.yaml
+      vnf_config: {lb_config: 'SW', file: vfw_vnf_pipeline_cores_4_ports_2_lb_1_sw.conf }
 
 
 Baremetal
 ^^^^^^^^^
   1. Follow above traffic generator section to setup.
-  2. edit num of threads in ``<repo>/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_scale_up.yaml``
-
-  e.g, 6 Threads  for given VNF
+  2. Edit num of threads in
+     ``<repo>/samples/vnf_samples/nsut/vfw/tc_baremetal_rfc2544_ipv4_1rule_1flow_64B_trex_scale_up.yaml``
+     e.g, 6 Threads for given VNF
 
 .. code-block:: yaml
 
@@ -202,11 +244,12 @@ Baremetal
 Scale-Out
 --------------------
 
-VNFs performance data with scale-out
+VNFs performance data with scale-out helps
 
-  * Helps in capacity planning to meet the given network node requirements
-  * Helps in comparison between different VNF vendor offerings
-  * Better the scale-out index, provides the flexibility in meeting future capacity requirements
+  * in capacity planning to meet the given network node requirements
+  * in comparison between different VNF vendor offerings
+  * better the scale-out index, provides the flexibility in meeting future
+    capacity requirements
 
 
 Standalone
@@ -236,7 +279,8 @@ Scale-out not supported on Baremetal.
 Heat
 ^^^^
 
-There are sample scale-out all-VM Heat tests.  These tests only use VMs and don't use external traffic.
+There are sample scale-out all-VM Heat tests. These tests only use VMs and
+don't use external traffic.
 
 The tests use UDP_Replay and correlated traffic.
 
@@ -250,11 +294,14 @@ To run the test you need to increase OpenStack CPU, Memory and Port quotas.
 Traffic Generator tuning
 ------------------------
 
-The TRex traffic generator can be setup to use multiple threads per core, this is for multiqueue testing.
+The TRex traffic generator can be setup to use multiple threads per core, this
+is for multiqueue testing.
 
-TRex does not automatically enable multiple threads because we currently cannot detect the number of queues on a device.
+TRex does not automatically enable multiple threads because we currently cannot
+detect the number of queues on a device.
 
-To enable multiple queue set the queues_per_port value in the TG VNF options section.
+To enable multiple queue set the ``queues_per_port`` value in the TG VNF
+options section.
 
 .. code-block:: yaml
 
@@ -266,5 +313,3 @@ To enable multiple queue set the queues_per_port value in the TG VNF options sec
       options:
         tg_0:
           queues_per_port: 2
-
-
index 47526cd..678f0f9 100644 (file)
@@ -14,11 +14,11 @@ This chapter lists available Yardstick test cases.
 Yardstick test cases are divided in two main categories:
 
 * *Generic NFVI Test Cases* - Test Cases developed to realize the methodology
-described in :doc:`02-methodology`
+  described in :doc:`02-methodology`
 
 * *OPNFV Feature Test Cases* - Test Cases developed to verify one or more
-aspect of a feature delivered by an OPNFV Project, including the test cases
-developed for the :term:`VTC`.
+  aspect of a feature delivered by an OPNFV Project, including the test cases
+  developed for the :term:`VTC`.
 
 Generic NFVI Test Case Descriptions
 ===================================
@@ -83,6 +83,7 @@ H A
    opnfv_yardstick_tc056.rst
    opnfv_yardstick_tc057.rst
    opnfv_yardstick_tc058.rst
+   opnfv_yardstick_tc087.rst
 
 IPv6
 ----
@@ -108,8 +109,8 @@ Parser
 
    opnfv_yardstick_tc040.rst
 
-   StorPerf
------------
+StorPerf
+--------
 
 .. toctree::
    :maxdepth: 1
diff --git a/docs/testing/user/userguide/code/pod_ixia.yaml b/docs/testing/user/userguide/code/pod_ixia.yaml
new file mode 100644 (file)
index 0000000..4ab56fe
--- /dev/null
@@ -0,0 +1,31 @@
+nodes:
+-
+    name: trafficgen_1
+    role: IxNet
+    ip: 1.2.1.1 #ixia machine ip
+    user: user
+    password: r00t
+    key_filename: /root/.ssh/id_rsa
+    tg_config:
+        ixchassis: "1.2.1.7" #ixia chassis ip
+        tcl_port: "8009" # tcl server port
+        lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
+        root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
+        py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
+        dut_result_dir: "/mnt/ixia"
+        version: 8.1
+    interfaces:
+        xe0:  # logical name from topology.yaml and vnfd.yaml
+            vpci: "2:5" # Card:port
+            driver:    "none"
+            dpdk_port_num: 0
+            local_ip: "152.16.100.20"
+            netmask:   "255.255.0.0"
+            local_mac: "00:98:10:64:14:00"
+        xe1:  # logical name from topology.yaml and vnfd.yaml
+            vpci: "2:6" # [(Card, port)]
+            driver:    "none"
+            dpdk_port_num: 1
+            local_ip: "152.40.40.20"
+            netmask:   "255.255.0.0"
+            local_mac: "00:98:28:28:14:00"
index 61e157e..b936e72 100644 (file)
@@ -17,15 +17,16 @@ Yardstick User Guide
    02-methodology
    03-architecture
    04-installation
-   05-yardstick_plugin
-   06-result-store-InfluxDB
-   07-grafana
-   08-api
-   09-yardstick_user_interface
-   10-vtc-overview
-   11-nsb-overview
-   12-nsb_installation
-   13-nsb_operation
+   05-operation
+   06-yardstick-plugin
+   07-result-store-InfluxDB
+   08-grafana
+   09-api
+   10-yardstick-user-interface
+   11-vtc-overview
+   12-nsb-overview
+   13-nsb-installation
+   14-nsb-operation
    15-list-of-tcs
    nsb/nsb-list-of-tcs
    glossary
index 8890c9d..82a491b 100644 (file)
@@ -34,23 +34,20 @@ Yardstick Test Case Description TC050
 |              | 2) host: which is the name of a control node being attacked. |
 |              | 3) interface: the network interface to be turned off.        |
 |              |                                                              |
-|              | There are four instance of the "close-interface" monitor:    |
-|              | attacker1(for public netork):                                |
-|              | -fault_type: "close-interface"                               |
-|              | -host: node1                                                 |
-|              | -interface: "br-ex"                                          |
-|              | attacker2(for management netork):                            |
-|              | -fault_type: "close-interface"                               |
-|              | -host: node1                                                 |
-|              | -interface: "br-mgmt"                                        |
-|              | attacker3(for storage netork):                               |
-|              | -fault_type: "close-interface"                               |
-|              | -host: node1                                                 |
-|              | -interface: "br-storage"                                     |
-|              | attacker4(for private netork):                               |
-|              | -fault_type: "close-interface"                               |
-|              | -host: node1                                                 |
-|              | -interface: "br-mesh"                                        |
+|              | The interface to be closed by the attacker can be set by the |
+|              | variable of "{{ interface_name }}"                           |
+|              |                                                              |
+|              | attackers:                                                   |
+|              |   -                                                          |
+|              |     fault_type: "general-attacker"                           |
+|              |     host: {{ attack_host }}                                  |
+|              |     key: "close-br-public"                                   |
+|              |     attack_key: "close-interface"                            |
+|              |     action_parameter:                                        |
+|              |       interface: {{ interface_name }}                        |
+|              |     rollback_parameter:                                      |
+|              |       interface: {{ interface_name }}                        |
+|              |                                                              |
 +--------------+--------------------------------------------------------------+
 |monitors      | In this test case, the monitor named "openstack-cmd" is      |
 |              | needed. The monitor needs needs two parameters:              |
@@ -61,17 +58,17 @@ Yardstick Test Case Description TC050
 |              |                                                              |
 |              | There are four instance of the "openstack-cmd" monitor:      |
 |              | monitor1:                                                    |
-|              | -monitor_type: "openstack-cmd"                               |
-|              | -command_name: "nova image-list"                             |
+|              |     - monitor_type: "openstack-cmd"                          |
+|              |     - command_name: "nova image-list"                        |
 |              | monitor2:                                                    |
-|              | -monitor_type: "openstack-cmd"                               |
-|              | -command_name: "neutron router-list"                         |
+|              |     - monitor_type: "openstack-cmd"                          |
+|              |     - command_name: "neutron router-list"                    |
 |              | monitor3:                                                    |
-|              | -monitor_type: "openstack-cmd"                               |
-|              | -command_name: "heat stack-list"                             |
+|              |     - monitor_type: "openstack-cmd"                          |
+|              |     - command_name: "heat stack-list"                        |
 |              | monitor4:                                                    |
-|              | -monitor_type: "openstack-cmd"                               |
-|              | -command_name: "cinder list"                                 |
+|              |     - monitor_type: "openstack-cmd"                          |
+|              |     - command_name: "cinder list"                            |
 +--------------+--------------------------------------------------------------+
 |metrics       | In this test case, there is one metric:                      |
 |              | 1)service_outage_time: which indicates the maximum outage    |
@@ -109,9 +106,9 @@ Yardstick Test Case Description TC050
 +--------------+--------------------------------------------------------------+
 |step 2        | do attacker: connect the host through SSH, and then execute  |
 |              | the turnoff network interface script with param value        |
-|              | specified by  "interface".                                   |
+|              | specified by "{{ interface_name }}".                         |
 |              |                                                              |
-|              | Result: Network interfaces will be turned down.              |
+|              | Result: The specified network interface will be down.        |
 |              |                                                              |
 +--------------+--------------------------------------------------------------+
 |step 3        | stop monitors after a period of time specified by            |
@@ -133,3 +130,4 @@ Yardstick Test Case Description TC050
 |              | execution problem.                                           |
 |              |                                                              |
 +--------------+--------------------------------------------------------------+
+
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc087.rst b/docs/testing/user/userguide/opnfv_yardstick_tc087.rst
new file mode 100644 (file)
index 0000000..99bfeeb
--- /dev/null
@@ -0,0 +1,182 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Ericsson and others.
+
+*************************************
+Yardstick Test Case Description TC087
+*************************************
+
++-----------------------------------------------------------------------------+
+|SDN Controller resilience in non-HA configuration                            |
+|                                                                             |
++--------------+--------------------------------------------------------------+
+|test case id  | OPNFV_YARDSTICK_TC087: SDN controller resilience in          |
+|              | non-HA configuration                                         |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test purpose  | This test validates that network data plane services are     |
+|              | highly available in the event of an SDN Controller failure,  |
+|              | even if the SDN controller is deployed in a non-HA           |
+|              | configuration. Specifically, the test verifies that          |
+|              | existing data plane connectivity is not impacted, i.e. all   |
+|              | configured network services such as DHCP, ARP, L2,           |
+|              | L3 Security Groups should continue to operate                |
+|              | between the existing VMs while the SDN controller is         |
+|              | offline or rebooting.                                        |
+|              |                                                              |
+|              | The test also validates that new network service operations  |
+|              | (creating a new VM in the existing L2/L3 network or in a new |
+|              | network, etc.) are operational after the SDN controller      |
+|              | has recovered from a failure.                                |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test method   | This test case fails the SDN controller service running      |
+|              | on the OpenStack controller node, then checks if already     |
+|              | configured DHCP/ARP/L2/L3/SNAT connectivity is not           |
+|              | impacted between VMs and the system is able to execute       |
+|              | new virtual network operations once the SDN controller       |
+|              | is restarted and has fully recovered                         |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|attackers     | In this test case, an attacker called “kill-process” is      |
+|              | needed. This attacker includes three parameters:             |
+|              |  1. fault_type: which is used for finding the attacker's     |
+|              |     scripts. It should be set to 'kill-process' in this test |
+|              |                                                              |
+|              |  2. process_name: should be set to the name of the SDN       |
+|              |     controller process                                       |
+|              |                                                              |
+|              |  3. host: which is the name of a control node where the      |
+|              |     SDN controller process is running                        |
+|              |                                                              |
+|              | e.g. -fault_type: "kill-process"                             |
+|              |      -process_name: "opendaylight"                           |
+|              |      -host: node1                                            |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|monitors      | This test case utilizes two monitors of type "ip-status"     |
+|              | and one monitor of type "process" to track the following     |
+|              | conditions:                                                  |
+|              |  1. "ping_same_network_l2": monitor ICMP traffic between     |
+|              |     VMs in the same Neutron network                          |
+|              |                                                              |
+|              |  2. "ping_external_snat": monitor ICMP traffic from VMs to   |
+|              |     an external host on the Internet to verify SNAT          |
+|              |     functionality.                                           |
+|              |                                                              |
+|              |  3. "SDN controller process monitor": a monitor checking the |
+|              |     state of a specified SDN controller process. It measures |
+|              |     the recovery time of the given process.                  |
+|              |                                                              |
+|              | Monitors of type "ip-status" use the "ping" utility to       |
+|              | verify reachability of a given target IP.                    |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|operations    | In this test case, the following operations are needed:      |
+|              |  1. "nova-create-instance-in_network": create a VM instance  |
+|              |     in one of the existing Neutron network.                  |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|metrics       | In this test case, there are two metrics:                    |
+|              |  1. process_recover_time: which indicates the maximun        |
+|              |     time (seconds) from the process being killed to          |
+|              |     recovered                                                |
+|              |                                                              |
+|              |  2. packet_drop: measure the packets that have been dropped  |
+|              |     by the monitors using pktgen.                            |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test tool     | Developed by the project. Please see folder:                 |
+|              | "yardstick/benchmark/scenarios/availability/ha_tools"        |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|references    | none                                                         |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|configuration | This test case needs two configuration files:                |
+|              |  1. test case file: opnfv_yardstick_tc087.yaml               |
+|              |     - Attackers: see above “attackers” discription           |
+|              |     - waiting_time: which is the time (seconds) from the     |
+|              |       process being killed to stoping monitors the monitors  |
+|              |     - Monitors: see above “monitors” discription             |
+|              |     - SLA: see above “metrics” discription                   |
+|              |                                                              |
+|              |  2. POD file: pod.yaml The POD configuration should record   |
+|              |     on pod.yaml first. the “host” item in this test case     |
+|              |     will use the node name in the pod.yaml.                  |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test sequence | Description and expected result                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|pre-action    |  1. The OpenStack cluster is set up with a single SDN        |
+|              |     controller in a non-HA configuration.                    |
+|              |                                                              |
+|              |  2. One or more Neutron networks are created with two or     |
+|              |     more VMs attached to each of the Neutron networks.       |
+|              |                                                              |
+|              |  3. The Neutron networks are attached to a Neutron router    |
+|              |     which is attached to an external network towards the     |
+|              |     DCGW.                                                    |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 1        | Start IP connectivity monitors:                              |
+|              |  1. Check the L2 connectivity between the VMs in the same    |
+|              |     Neutron network.                                         |
+|              |                                                              |
+|              |  2. Check connectivity from one VM to an external host on    |
+|              |     the Internet to verify SNAT functionality.
+|              |                                                              |
+|              | Result: The monitor info will be collected.                  |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 2        | Start attacker:                                              |
+|              | SSH connect to the VIM node and kill the SDN controller      |
+|              | process                                                      |
+|              |                                                              |
+|              | Result: the SDN controller service will be shutdown          |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 3        | Verify the results of the IP connectivity monitors.          |
+|              |                                                              |
+|              | Result: The outage_time metric reported by the monitors      |
+|              | is zero.                                                     |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 4        | Restart the SDN controller.                                  |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 5        | Create a new VM in the existing Neutron network              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 6        | Verify connectivity between VMs as follows:                  |
+|              |  1. Check the L2 connectivity between the previously         |
+|              |     existing VM and the newly created VM on the same         |
+|              |     Neutron network by sending ICMP messages                 |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 7        | Stop IP connectivity monitors after a period of time         |
+|              | specified by “waiting_time”                                  |
+|              |                                                              |
+|              | Result: The monitor info will be aggregated                  |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 8        | Verify the IP connectivity monitor results                   |
+|              |                                                              |
+|              | Result: IP connectivity monitor should not have any packet   |
+|              | drop failures reported                                       |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test verdict  | This test fails if the SLAs are not met or if there is a     |
+|              | test case execution problem. The SLAs are define as follows  |
+|              | for this test:                                               |
+|              |  * SDN Controller recovery                                   |
+|              |    * process_recover_time <= 30 sec                          |
+|              |                                                              |
+|              |  * no impact on data plane connectivity during SDN           |
+|              |    controller failure and recovery.                          |
+|              |    * packet_drop == 0                                        |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc090.rst b/docs/testing/user/userguide/opnfv_yardstick_tc090.rst
new file mode 100644 (file)
index 0000000..1f8747b
--- /dev/null
@@ -0,0 +1,151 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Yin Kanglin and others.
+.. 14_ykl@tongji.edu.cn
+
+*************************************
+Yardstick Test Case Description TC090
+*************************************
+
++-----------------------------------------------------------------------------+
+|Control Node OpenStack Service High Availability - Database Instances        |
+|                                                                             |
++--------------+--------------------------------------------------------------+
+|test case id  | OPNFV_YARDSTICK_TC090: Control node OpenStack service down - |
+|              | database instances                                           |
++--------------+--------------------------------------------------------------+
+|test purpose  | This test case will verify the high availability of the      |
+|              | data base instances used by OpenStack (mysql) on control     |
+|              | node.                                                        |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test method   | This test case kills the processes of database service on a  |
+|              | selected control node, then checks whether the request of    |
+|              | the related OpenStack command is OK and the killed processes |
+|              | are recovered.                                               |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|attackers     | In this test case, an attacker called "kill-process" is      |
+|              | needed. This attacker includes three parameters:             |
+|              | 1) fault_type: which is used for finding the attacker's      |
+|              | scripts. It should be always set to "kill-process" in this   |
+|              | test case.                                                   |
+|              | 2) process_name: which is the process name of the specified  |
+|              | OpenStack service. If there are multiple processes use the   |
+|              | same name on the host, all of them are killed by this        |
+|              | attacker.                                                    |
+|              | In this case. This parameter should always set to the name   |
+|              | of the database service of OpenStack.                        |
+|              | 3) host: which is the name of a control node being attacked. |
+|              |                                                              |
+|              | e.g.                                                         |
+|              | -fault_type: "kill-process"                                  |
+|              | -process_name: "mysql"                                       |
+|              | -host: node1                                                 |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|monitors      | In this test case, two kinds of monitor are needed:          |
+|              | 1. the "openstack-cmd" monitor constantly request a specific |
+|              | Openstack command, which needs two parameters:               |
+|              | 1) monitor_type: which is used for finding the monitor class |
+|              | and related scritps. It should be always set to              |
+|              | "openstack-cmd" for this monitor.                            |
+|              | 2) command_name: which is the command name used for request. |
+|              | In this case, the command name should be neutron related     |
+|              | commands.                                                    |
+|              |                                                              |
+|              | 2. the "process" monitor check whether a process is running  |
+|              | on a specific node, which needs three parameters:            |
+|              | 1) monitor_type: which used for finding the monitor class and|
+|              | related scripts. It should be always set to "process"        |
+|              | for this monitor.                                            |
+|              | 2) process_name: which is the process name for monitor       |
+|              | 3) host: which is the name of the node running the process   |
+|              |                                                              |
+|              | The examples of monitors show as follows, there are four     |
+|              | instance of the "openstack-cmd" monitor, in order to check   |
+|              | the database connection of different OpenStack components.   |
+|              |                                                              |
+|              | monitor1:                                                    |
+|              | -monitor_type: "openstack-cmd"                               |
+|              | -api_name: "openstack image list"                            |
+|              | monitor2:                                                    |
+|              | -monitor_type: "openstack-cmd"                               |
+|              | -api_name: "openstack router list"                           |
+|              | monitor3:                                                    |
+|              | -monitor_type: "openstack-cmd"                               |
+|              | -api_name: "openstack stack list"                            |
+|              | monitor4:                                                    |
+|              | -monitor_type: "openstack-cmd"                               |
+|              | -api_name: "openstack volume list"                           |
+|              | monitor5:                                                    |
+|              | -monitor_type: "process"                                     |
+|              | -process_name: "mysql"                                       |
+|              | -host: node1                                                 |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|metrics       | In this test case, there are two metrics:                    |
+|              | 1)service_outage_time: which indicates the maximum outage    |
+|              | time (seconds) of the specified OpenStack command request.   |
+|              | 2)process_recover_time: which indicates the maximum time     |
+|              | (seconds) from the process being killed to recovered         |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test tool     | Developed by the project. Please see folder:                 |
+|              | "yardstick/benchmark/scenarios/availability/ha_tools"        |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|references    | ETSI NFV REL001                                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|configuration | This test case needs two configuration files:                |
+|              | 1) test case file: opnfv_yardstick_tc090.yaml                |
+|              | -Attackers: see above "attackers" description                |
+|              | -waiting_time: which is the time (seconds) from the process  |
+|              | being killed to stopping monitors the monitors               |
+|              | -Monitors: see above "monitors" description                  |
+|              | -SLA: see above "metrics" description                        |
+|              |                                                              |
+|              | 2)POD file: pod.yaml                                         |
+|              | The POD configuration should record on pod.yaml first.       |
+|              | the "host" item in this test case will use the node name in  |
+|              | the pod.yaml.                                                |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 1        | start monitors:                                              |
+|              | each monitor will run with independently process             |
+|              |                                                              |
+|              | Result: The monitor info will be collected.                  |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 2        | do attacker: connect the host through SSH, and then execute  |
+|              | the kill process script with param value specified by        |
+|              | "process_name"                                               |
+|              |                                                              |
+|              | Result: Process will be killed.                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 3        | stop monitors after a period of time specified by            |
+|              | "waiting_time"                                               |
+|              |                                                              |
+|              | Result: The monitor info will be aggregated.                 |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 4        | verify the SLA                                               |
+|              |                                                              |
+|              | Result: The test case is passed or not.                      |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|post-action   | It is the action when the test cases exist. It will check the|
+|              | status of the specified process on the host, and restart the |
+|              | process if it is not running for next test cases             |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test verdict  | Fails only if SLA is not passed, or if there is a test case  |
+|              | execution problem.                                           |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc091.rst b/docs/testing/user/userguide/opnfv_yardstick_tc091.rst
new file mode 100644 (file)
index 0000000..8e89b64
--- /dev/null
@@ -0,0 +1,138 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Yin Kanglin and others.
+.. 14_ykl@tongji.edu.cn
+
+*************************************
+Yardstick Test Case Description TC091
+*************************************
+
++-----------------------------------------------------------------------------+
+|Control Node Openstack Service High Availability - Heat Api                  |
+|                                                                             |
++--------------+--------------------------------------------------------------+
+|test case id  | OPNFV_YARDSTICK_TC091: Control node OpenStack service down - |
+|              | heat api                                                     |
++--------------+--------------------------------------------------------------+
+|test purpose  | This test case will verify the high availability of the      |
+|              | orchestration service provided by OpenStack (heat-api) on    |
+|              | control node.                                                |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test method   | This test case kills the processes of heat-api service on a  |
+|              | selected control node, then checks whether the request of    |
+|              | the related OpenStack command is OK and the killed processes |
+|              | are recovered.                                               |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|attackers     | In this test case, an attacker called "kill-process" is      |
+|              | needed. This attacker includes three parameters:             |
+|              | 1) fault_type: which is used for finding the attacker's      |
+|              | scripts. It should be always set to "kill-process" in this   |
+|              | test case.                                                   |
+|              | 2) process_name: which is the process name of the specified  |
+|              | OpenStack service. If there are multiple processes use the   |
+|              | same name on the host, all of them are killed by this        |
+|              | attacker.                                                    |
+|              | In this case. This parameter should always set to "heat-api".|
+|              | 3) host: which is the name of a control node being attacked. |
+|              |                                                              |
+|              | e.g.                                                         |
+|              | -fault_type: "kill-process"                                  |
+|              | -process_name: "heat-api"                                    |
+|              | -host: node1                                                 |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|monitors      | In this test case, two kinds of monitor are needed:          |
+|              | 1. the "openstack-cmd" monitor constantly request a specific |
+|              | OpenStack command, which needs two parameters:               |
+|              | 1) monitor_type: which is used for finding the monitor class |
+|              | and related scripts. It should be always set to              |
+|              | "openstack-cmd" for this monitor.                            |
+|              | 2) command_name: which is the command name used for request. |
+|              | In this case, the command name should be neutron related     |
+|              | commands.                                                    |
+|              |                                                              |
+|              | 2. the "process" monitor check whether a process is running  |
+|              | on a specific node, which needs three parameters:            |
+|              | 1) monitor_type: which used for finding the monitor class and|
+|              | related scripts. It should be always set to "process"        |
+|              | for this monitor.                                            |
+|              | 2) process_name: which is the process name for monitor       |
+|              | 3) host: which is the name of the node running the process   |
+|              |                                                              |
+|              | e.g.                                                         |
+|              | monitor1:                                                    |
+|              | -monitor_type: "openstack-cmd"                               |
+|              | -command_name: "heat stack list"                             |
+|              | monitor2:                                                    |
+|              | -monitor_type: "process"                                     |
+|              | -process_name: "heat-api"                                    |
+|              | -host: node1                                                 |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|metrics       | In this test case, there are two metrics:                    |
+|              | 1)service_outage_time: which indicates the maximum outage    |
+|              | time (seconds) of the specified OpenStack command request.   |
+|              | 2)process_recover_time: which indicates the maximum time     |
+|              | (seconds) from the process being killed to recovered         |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test tool     | Developed by the project. Please see folder:                 |
+|              | "yardstick/benchmark/scenarios/availability/ha_tools"        |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|references    | ETSI NFV REL001                                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|configuration | This test case needs two configuration files:                |
+|              | 1) test case file: opnfv_yardstick_tc091.yaml                |
+|              | -Attackers: see above "attackers" description                |
+|              | -waiting_time: which is the time (seconds) from the process  |
+|              | being killed to the monitor stopped                          |
+|              | -Monitors: see above "monitors" description                  |
+|              | -SLA: see above "metrics" description                        |
+|              |                                                              |
+|              | 2)POD file: pod.yaml                                         |
+|              | The POD configuration should record on pod.yaml first.       |
+|              | the "host" item in this test case will use the node name in  |
+|              | the pod.yaml.                                                |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test sequence | description and expected result                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 1        | start monitors:                                              |
+|              | each monitor will run with independently process             |
+|              |                                                              |
+|              | Result: The monitor info will be collected.                  |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 2        | do attacker: connect the host through SSH, and then execute  |
+|              | the kill process script with param value specified by        |
+|              | "process_name"                                               |
+|              |                                                              |
+|              | Result: Process will be killed.                              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 3        | stop monitors after a period of time specified by            |
+|              | "waiting_time"                                               |
+|              |                                                              |
+|              | Result: The monitor info will be aggregated.                 |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 4        | verify the SLA                                               |
+|              |                                                              |
+|              | Result: The test case is passed or not.                      |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|post-action   | It is the action when the test cases exist. It will check the|
+|              | status of the specified process on the host, and restart the |
+|              | process if it is not running for next test cases             |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|test verdict  | Fails only if SLA is not passed, or if there is a test case  |
+|              | execution problem.                                           |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
diff --git a/etc/infra/infra_deploy_multi.yaml.sample b/etc/infra/infra_deploy_multi.yaml.sample
new file mode 100644 (file)
index 0000000..aa27b73
--- /dev/null
@@ -0,0 +1,97 @@
+nodes:
+  - name: Deployment and Controller node number 1 VM
+    openstack_node: controller
+    hostname: control-01
+    interfaces:
+      - network: management
+        ip: 192.168.1.10
+        netmask: 255.255.255.0
+      - network: traffic
+        ip: 192.20.1.10
+        netmask: 255.255.255.0
+    user: ubuntu
+    password: password
+    image: /tmp/image_cntrl_1.img
+    disk: 13000
+    ram: 9000
+    vcpus: 4
+
+  - name: Controller node number 2 VM
+    openstack_node: controller
+    hostname: control-02
+    interfaces:
+      - network: management
+        ip: 192.168.1.11
+        netmask: 255.255.255.0
+      - network: traffic
+        ip: 192.20.1.11
+        netmask: 255.255.255.0
+    user: ubuntu
+    password: password
+    image: /tmp/image_cntrl_2.img
+    disk: 11000
+    ram: 6000
+    vcpus: 2
+
+  - name: Compute node number 1 VM
+    openstack_node: compute
+    hostname: compute-01
+    interfaces:
+      - network: management
+        ip: 192.168.1.12
+        netmask: 255.255.255.0
+      - network: traffic
+        ip: 192.20.1.12
+        netmask: 255.255.255.0
+    user: ubuntu
+    password: password
+    image: /tmp/image_comp_1.img
+    disk: 30000
+    ram: 16000
+    vcpus: 12
+
+  - name: Compute node number 2 VM
+    openstack_node: compute
+    hostname: compute-02
+    interfaces:
+      - network: management
+        ip: 192.168.1.13
+        netmask: 255.255.255.0
+      - network: traffic
+        ip: 192.20.1.13
+        netmask: 255.255.255.0
+    user: ubuntu
+    password: password
+    image: /tmp/image_comp_2.img
+    disk: 12000
+    ram: 6000
+    vcpus: 4
+
+  - name: Jump host
+    hostname: yardstickvm
+    interfaces:
+      - network: management
+        ip: 192.168.1.14
+        netmask: 255.255.255.0
+      - network: traffic
+        ip: 192.20.1.14
+        netmask: 255.255.255.0
+    user: ubuntu
+    password: password
+    image: /tmp/image_yardstick.img
+    disk: 28000
+    ram: 12000
+    vcpus: 4
+
+networks:
+  - name: management
+    default_gateway: True
+    host_ip: 192.168.1.1
+    netmask: 255.255.255.0
+
+  - name: traffic
+    default_gateway: False  # This parameter is not mandatory, default value: False
+    host_ip: 192.20.1.1
+    netmask: 255.255.255.0
+    dhcp_ip_start: 192.20.1.200
+    dhcp_ip_stop: 192.20.1.250
similarity index 51%
rename from etc/infra/infra_deploy.yaml.sample
rename to etc/infra/infra_deploy_one.yaml.sample
index 8ed7936..f8759d4 100644 (file)
@@ -1,32 +1,35 @@
 nodes:
-  - name: Yardstick VM
-    hostname: yardstickvm
+  - name: Deployment, Controller and Compute single VM
+    openstack_node: controller  # if no compute nodes are defined means a standalone deployment
+    hostname: allinone
     interfaces:
       - network: management
-        ip: 192.168.1.10
+        ip: 192.168.1.21
+        netmask: 255.255.255.0
+      - network: traffic
+        ip: 192.20.1.21
         netmask: 255.255.255.0
     user: ubuntu
     password: password
-    image: /tmp/image1.qcow
-    disk: 50000
-    ram: 8192
-    vcpus: 4
+    image: /tmp/image_one.img
+    disk: 22000
+    ram: 14000
+    vcpus: 12
 
-  - name: Controller_Compute VM
-    openstack_node: controller_compute
-    hostname: controller_compute
+  - name: Jump host
+    hostname: yardstickvm
     interfaces:
       - network: management
-        ip: 192.168.1.20
+        ip: 192.168.1.22
         netmask: 255.255.255.0
       - network: traffic
-        ip: 192.20.1.20
+        ip: 192.20.1.22
         netmask: 255.255.255.0
     user: ubuntu
     password: password
-    image: /tmp/image_2.qcow
-    disk: 40000
-    ram: 32768
+    image: /tmp/image_yardstick.img
+    disk: 22000
+    ram: 10000
     vcpus: 4
 
 networks:
@@ -39,3 +42,5 @@ networks:
     default_gateway: False  # This parameter is not mandatory, default value: False
     host_ip: 192.20.1.1
     netmask: 255.255.255.0
+    dhcp_ip_start: 192.20.1.200
+    dhcp_ip_stop: 192.20.1.250
diff --git a/etc/infra/infra_deploy_two.yaml.sample b/etc/infra/infra_deploy_two.yaml.sample
new file mode 100644 (file)
index 0000000..a29f754
--- /dev/null
@@ -0,0 +1,63 @@
+nodes:
+  - name: Deployment and Controller node number 1 VM
+    openstack_node: controller
+    hostname: control-01
+    interfaces:
+      - network: management
+        ip: 192.168.1.118
+        netmask: 255.255.255.0
+      - network: traffic
+        ip: 192.20.1.118
+        netmask: 255.255.255.0
+    user: ubuntu
+    password: password
+    image: /tmp/image_cntrl_1.img
+    disk: 12000
+    ram: 10000
+    vcpus: 6
+
+  - name: Compute node number 1 VM
+    openstack_node: compute
+    hostname: compute-01
+    interfaces:
+      - network: management
+        ip: 192.168.1.119
+        netmask: 255.255.255.0
+      - network: traffic
+        ip: 192.20.1.119
+        netmask: 255.255.255.0
+    user: ubuntu
+    password: password
+    image: /tmp/image_comp_1.img
+    disk: 44000
+    ram: 30000
+    vcpus: 14
+
+  - name: Jump host
+    hostname: yardstickvm
+    interfaces:
+      - network: management
+        ip: 192.168.1.120
+        netmask: 255.255.255.0
+      - network: traffic
+        ip: 192.20.1.120
+        netmask: 255.255.255.0
+    user: ubuntu
+    password: password
+    image: /tmp/image_yardstick.img
+    disk: 22000
+    ram: 10000
+    vcpus: 4
+
+networks:
+  - name: management
+    default_gateway: True
+    host_ip: 192.168.1.1
+    netmask: 255.255.255.0
+
+  - name: traffic
+    default_gateway: False  # This parameter is not mandatory, default value: False
+    host_ip: 192.20.1.1
+    netmask: 255.255.255.0
+    dhcp_ip_start: 192.20.1.200
+    dhcp_ip_stop: 192.20.1.250
index 57a8305..1f755dc 100644 (file)
@@ -26,7 +26,6 @@ nodes:
         lib_path: "/opt/ixia/ixos-api/8.01.0.2/lib/ixTcl1.0"
         root_dir: "/opt/ixia/ixos-api/8.01.0.2/"
         py_bin_path: "/opt/ixia/ixload/8.01.106.3/bin/"
-        py_lib_path: "/opt/ixia/ixnetwork/8.01.1029.14/lib/PythonApi"
         dut_result_dir: "/mnt/ixia"
         version: 8.1
     interfaces:
index 7250c4c..ef63ea0 100644 (file)
@@ -26,13 +26,12 @@ nodes:
     user: {{gen.user}}
     password: {{gen.password}}
     key_filename: {{gen.key_filename}}
-    tg_config: 
+    tg_config:
         ixchassis: "{{gen.tg_config.ixchassis}}" #ixia chassis ip
         tcl_port: "{{gen.tg_config.tcl_port}}" # tcl server port
         lib_path: "{{gen.tg_config.lib_path}}"
         root_dir: "{{gen.tg_config.root_dir}}"
         py_bin_path: "{{gen.tg_config.py_bin_path}}"
-        py_lib_path: "{{gen.tg_config.py_lib_path}}"
         dut_result_dir: "{{gen.tg_config.dut_result_dir}}"
         version: "{{gen.tg_config.version}}"
     interfaces:
index 617a651..98ed8c5 100644 (file)
@@ -32,7 +32,6 @@ nodes:
       lib_path: "{{gen.tg_config.lib_path}}"
       root_dir: "{{gen.tg_config.root_dir}}"
       py_bin_path: "{{gen.tg_config.py_bin_path}}"
-      py_lib_path: "{{gen.tg_config.py_lib_path}}"
       dut_result_dir: "{{gen.tg_config.dut_result_dir}}"
       version: "{{gen.tg_config.version}}"
     interfaces:
index 1dbf64d..7492934 100755 (executable)
@@ -84,7 +84,8 @@ apt-get update && apt-get install -y \
     libxft-dev \
     libxss-dev \
     sudo \
-    iputils-ping
+    iputils-ping \
+    rabbitmq-server
 
 if [[ "${DOCKER_ARCH}" != "aarch64" ]]; then
     apt-get install -y libc6:arm64
@@ -94,6 +95,11 @@ apt-get -y autoremove && apt-get clean
 
 git config --global http.sslVerify false
 
+# Start and configure RabbitMQ
+service rabbitmq-server restart
+rabbitmqctl start_app
+rabbitmqctl add_user yardstick yardstick
+rabbitmqctl set_permissions yardstick ".*" ".*" ".*"
 
 # install yardstick + dependencies
 easy_install -U pip==9.0.1
@@ -113,4 +119,4 @@ tar xvf ${NSB_DIR}/trex_client.tar.gz -C ${NSB_DIR}
 rm -f ${NSB_DIR}/trex_client.tar.gz
 
 service nginx restart
-uwsgi -i /etc/yardstick/yardstick.ini
+uwsgi -i /etc/yardstick/yardstick.ini
\ No newline at end of file
index 409bcd8..4679bc8 100644 (file)
@@ -26,6 +26,7 @@ flask==0.11.1           # BSD; OSI Approved  BSD License
 functools32==3.2.3.post2; python_version <= "2.7"    # PSF license
 futures==3.1.1;python_version=='2.7'    # BSD; OSI Approved  BSD License
 influxdb==4.1.1         # MIT License; OSI Approved  MIT License
+IxNetwork==8.40.1124.9  # MIT License; OSI Approved  MIT License
 jinja2schema==0.1.4     # OSI Approved  BSD License
 keystoneauth1==3.1.0    # OSI Approved  Apache Software License
 kubernetes==3.0.0a1     # OSI Approved  Apache Software License
@@ -37,6 +38,7 @@ os-client-config==1.28.0    # OSI Approved  Apache Software License
 osc-lib==1.7.0          # OSI Approved  Apache Software License
 oslo.config==4.11.1     # OSI Approved  Apache Software License
 oslo.i18n==3.17.0       # OSI Approved  Apache Software License
+oslo.messaging===5.30.2 # OSI Approved  Apache Software License
 oslo.privsep===1.22.1   # OSI Approved  Apache Software License
 oslo.serialization==2.20.1  # OSI Approved  Apache Software License
 oslo.utils==3.28.0      # OSI Approved  Apache Software License
@@ -55,7 +57,7 @@ python-keystoneclient==3.13.0   # OSI Approved  Apache Software License
 python-neutronclient==6.5.0     # OSI Approved  Apache Software License
 python-novaclient==9.1.1        # OSI Approved  Apache Software License
 pyzmq==16.0.2           # LGPL+BSD; OSI Approved  GNU Library or Lesser General Public License (LGPL); OSI Approved  BSD License
-requests==2.18.2        # Apache 2.0; OSI Approved  Apache Software License
+requests==2.11.1        # Apache 2.0; OSI Approved  Apache Software License
 requestsexceptions==1.3.0   # OSI Approved  Apache Software License
 scp==0.10.2             # LGPL
 shade==1.22.2           # OSI Approved  Apache Software License
index 7667e5a..e4ace44 100644 (file)
@@ -14,7 +14,7 @@ schema: "yardstick:task:0.1"
 scenarios:
 -
   type: Dummy
-
+  name: Dummy
   runner:
     type: Duration
     duration: 5
index e2e4b66..682c113 100644 (file)
@@ -27,3 +27,4 @@ scenarios:
 
 context:
   type: Dummy
+  name: Dummy
index 2ea0221..00f74c1 100644 (file)
@@ -38,3 +38,4 @@ scenarios:
 
 context:
   type: Dummy
+  name: Dummy
index 9b2a152..aca7c21 100644 (file)
@@ -29,7 +29,6 @@ vnfd:vnfd-catalog:
                 tcl_port: '{{tg_config.tcl_port}}' # tcl server port
                 lib_path: '{{tg_config.lib_path}}'
                 root_dir: '{{tg_config.root_dir}}'
-                py_lib_path: '{{tg_config.py_lib_path}}'
                 py_bin_path: '{{tg_config.py_bin_path}}'
                 dut_result_dir: '{{tg_config.dut_result_dir}}'
                 version: '{{tg_config.version}}'
index ad4953f..0324bb8 100644 (file)
@@ -29,7 +29,6 @@ vnfd:vnfd-catalog:
                 tcl_port: '{{tg_config.tcl_port}}' # tcl server port
                 lib_path: '{{tg_config.lib_path}}'
                 root_dir: '{{tg_config.root_dir}}'
-                py_lib_path: '{{tg_config.py_lib_path}}'
                 py_bin_path: '{{tg_config.py_bin_path}}'
                 dut_result_dir: '{{tg_config.dut_result_dir}}'
                 version: '{{tg_config.version}}'
index ffbfbde..def8cdc 100644 (file)
@@ -28,7 +28,6 @@ vnfd:vnfd-catalog:
                 tcl_port: '{{tg_config.tcl_port}}' # tcl server port
                 lib_path: '{{tg_config.lib_path}}'
                 root_dir: '{{tg_config.root_dir}}'
-                py_lib_path: '{{tg_config.py_lib_path}}'
                 dut_result_dir: '{{tg_config.dut_result_dir}}'
                 version: '{{tg_config.version}}'
         vdu:
index 5df769c..1e1591c 100755 (executable)
@@ -43,6 +43,12 @@ if [ "${YARD_IMG_ARCH}" == "arm64" ]; then
     fi
 fi
 
+cleanup_loopbacks() {
+    # try again to cleanup loopbacks in case of error
+    losetup -a
+    losetup -O NAME,BACK-FILE | awk '/yardstick/ { print $1 }' | xargs -l1 losetup -v -d || true
+}
+
 build_yardstick_image()
 {
     echo
@@ -56,6 +62,7 @@ build_yardstick_image()
             # Build the image. Retry once if the build fails
             $cmd || $cmd
 
+            cleanup_loopbacks
             if [ ! -f "${RAW_IMAGE}" ]; then
                 echo "Failed building RAW image"
                 exit 1
@@ -70,11 +77,25 @@ build_yardstick_image()
                      -e YARD_IMG_ARCH=${YARD_IMG_ARCH} \
                      -vvv -i inventory.ini build_yardstick_image.yml
 
+            cleanup_loopbacks
             if [ ! -f "${QCOW_IMAGE}" ]; then
                 echo "Failed building QCOW image"
                 exit 1
             fi
         fi
+        # DPDK compile is not enabled for arm64 yet so disable for now
+        # JIRA: YARSTICK-1124
+        if [[ ! -f "${QCOW_NSB_IMAGE}"  && ${DEPLOY_SCENARIO} == *[_-]ovs_dpdk[_-]* && "${YARD_IMG_ARCH}" != "arm64" ]]; then
+            ansible-playbook \
+                     -e img_property="nsb" \
+                     -e YARD_IMG_ARCH=${YARD_IMG_ARCH} \
+                     -vvv -i inventory.ini build_yardstick_image.yml
+            cleanup_loopbacks
+            if [ ! -f "${QCOW_NSB_IMAGE}" ]; then
+                echo "Failed building QCOW NSB image"
+                exit 1
+            fi
+        fi
     fi
 }
 
@@ -112,6 +133,18 @@ load_yardstick_image()
             ${EXTRA_PARAMS} \
             --file ${QCOW_IMAGE} \
             yardstick-image)
+        # DPDK compile is not enabled for arm64 yet so disable NSB images for now
+        # JIRA: YARSTICK-1124
+        if [[ $DEPLOY_SCENARIO == *[_-]ovs_dpdk[_-]* && "${YARD_IMG_ARCH}" != "arm64" ]]; then
+            nsb_output=$(eval openstack ${SECURE} image create \
+                --public \
+                --disk-format qcow2 \
+                --container-format bare \
+                ${EXTRA_PARAMS} \
+                --file ${QCOW_NSB_IMAGE} \
+                yardstick-samplevnfs)
+            echo "$nsb_output"
+        fi
     fi
 
     echo "$output"
@@ -232,6 +265,7 @@ create_nova_flavor()
 main()
 {
     QCOW_IMAGE="/tmp/workspace/yardstick/yardstick-image.img"
+    QCOW_NSB_IMAGE="/tmp/workspace/yardstick/yardstick-nsb-image.img"
     RAW_IMAGE="/tmp/workspace/yardstick/yardstick-image.tar.gz"
 
     if [ -f /home/opnfv/images/yardstick-image.img ];then
index f46eb84..ab41912 100644 (file)
@@ -19,6 +19,8 @@ scenarios:
   options:
     packetsize: 64
     rate: 100
+    eth1: ens4
+    eth2: ens5
 
   host: demeter.yardstick-TC042
   target: poseidon.yardstick-TC042
@@ -34,8 +36,13 @@ scenarios:
 
 context:
   name: yardstick-TC042
-  image: yardstick-image-pktgen-ready
-  flavor: yardstick-pktgen-dpdk.flavor
+  image: yardstick-samplevnfs
+  flavor:
+    vcpus: 4
+    ram: 4096
+    disk: 7
+    extra_specs:
+      hw:mem_page_size: "large"
   user: ubuntu
 
   placement_groups:
index dde3a10..faddfab 100644 (file)
@@ -13,12 +13,9 @@ description: >
     Yardstick TC050 config file;
     HA test case: OpenStack Controller Node Network High Availability.
 
-{% set file = file or '/etc/yardstick/pod.yaml' %}
 {% set attack_host = attack_host or "node1" %}
-{% set external_net = external_net or 'br-ex' %}
-{% set management_net = management_net or 'br-mgmt' %}
-{% set storage_net = storage_net or 'br-storage' %}
-{% set internal_net = internal_net or 'br-mesh' %}
+{% set interface_name = interface_name or 'br-mgmt' %}
+{% set file = file or '/etc/yardstick/pod.yaml' %}
 
 scenarios:
   -
@@ -27,43 +24,13 @@ scenarios:
       attackers:
         -
           fault_type: "general-attacker"
-          host: {{attack_host}}
+          host: {{ attack_host }}
           key: "close-br-public"
           attack_key: "close-interface"
           action_parameter:
-            interface: {{external_net}}
-          rollback_parameter:
-            interface: {{external_net}}
-
-        -
-          fault_type: "general-attacker"
-          host: {{attack_host}}
-          key: "close-br-mgmt"
-          attack_key: "close-interface"
-          action_parameter:
-            interface: {{management_net}}
-          rollback_parameter:
-            interface: {{management_net}}
-
-        -
-          fault_type: "general-attacker"
-          host: {{attack_host}}
-          key: "close-br-storage"
-          attack_key: "close-interface"
-          action_parameter:
-            interface: {{storage_net}}
-          rollback_parameter:
-            interface: {{storage_net}}
-
-        -
-          fault_type: "general-attacker"
-          host: {{attack_host}}
-          key: "close-br-private"
-          attack_key: "close-interface"
-          action_parameter:
-            interface: {{internal_net}}
+            interface: {{ interface_name }}
           rollback_parameter:
-            interface: {{internal_net}}
+            interface: {{ interface_name }}
 
       monitors:
         -
@@ -104,49 +71,35 @@ scenarios:
 
 
       steps:
-        -
-          actionKey: "close-br-public"
-          actionType: "attacker"
-          index: 1
-
-        -
-          actionKey: "close-br-mgmt"
-          actionType: "attacker"
-          index: 2
-
-        -
-          actionKey: "close-br-storage"
-          actionType: "attacker"
-          index: 3
-
-        -
-          actionKey: "close-br-private"
-          actionType: "attacker"
-          index: 4
-
         -
           actionKey: "nova-image-list"
           actionType: "monitor"
-          index: 5
+          index: 1
 
         -
           actionKey: "neutron-router-list"
           actionType: "monitor"
-          index: 6
+          index: 2
 
         -
           actionKey: "heat-stack-list"
           actionType: "monitor"
-          index: 7
+          index: 3
 
         -
           actionKey: "cinder-list"
           actionType: "monitor"
-          index: 8
+          index: 4
+
+        -
+          actionKey: "close-br-public"
+          actionType: "attacker"
+          index: 5
+
 
 
     nodes:
-      {{attack_host}}: {{attack_host}}.LF
+      {{ attack_host }}: {{ attack_host }}.LF
     runner:
       type: Duration
       duration: 1
@@ -157,4 +110,4 @@ scenarios:
 context:
   type: Node
   name: LF
-  file: {{file}}
+  file: {{ file }}
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc090.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc090.yaml
new file mode 100644 (file)
index 0000000..4137204
--- /dev/null
@@ -0,0 +1,78 @@
+##############################################################################
+# Copyright (c) 2017 14_ykl@tongji.edu.cn and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+schema: "yardstick:task:0.1"
+description: >
+    Yardstick TC090 config file;
+    HA test case: Control node Openstack service down - database instance.
+
+{% set file = file or '/etc/yardstick/pod.yaml' %}
+{% set attack_host = attack_host or "node1" %}
+{% set attack_process = attack_process or "mysql" %}
+{% set monitor_time = monitor_time or 30 %}
+
+scenarios:
+-
+  type: ServiceHA
+  options:
+    attackers:
+    - fault_type: "kill-process"
+      process_name: "{{ attack_process }}"
+      host: {{attack_host}}
+
+    monitors:
+    - monitor_type: "openstack-cmd"
+      command_name: "openstack image list"
+      monitor_time: {{monitor_time}}
+      monitor_number: 3
+      sla:
+        max_outage_time: 5
+    - monitor_type: "openstack-cmd"
+      command_name: "openstack router list"
+      monitor_time: {{monitor_time}}
+      monitor_number: 3
+      sla:
+        max_outage_time: 5
+    - monitor_type: "openstack-cmd"
+      command_name: "openstack stack list"
+      monitor_time: {{monitor_time}}
+      monitor_number: 3
+      sla:
+        max_outage_time: 5
+    - monitor_type: "openstack-cmd"
+      command_name: "openstack volume list"
+      monitor_time: {{monitor_time}}
+      monitor_number: 3
+      sla:
+        max_outage_time: 5
+    - monitor_type: "process"
+      process_name: "{{ attack_process }}"
+      host: {{attack_host}}
+      monitor_time: {{monitor_time}}
+      monitor_number: 3
+      sla:
+        max_recover_time: 30
+
+  nodes:
+    {{attack_host}}: {{attack_host}}.LF
+
+  runner:
+    type: Duration
+    duration: 1
+  sla:
+    outage_time: 5
+    action: monitor
+
+
+context:
+  type: Node
+  name: LF
+  file: {{file}}
+
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc091.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc091.yaml
new file mode 100644 (file)
index 0000000..d952464
--- /dev/null
@@ -0,0 +1,59 @@
+##############################################################################
+# Copyright (c) 2017 14_ykl@tongji.edu.cn and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+
+schema: "yardstick:task:0.1"
+description: >
+    Yardstick TC091 config file;
+    HA test case: Control node Openstack service down - heat-api.
+
+{% set file = file or '/etc/yardstick/pod.yaml' %}
+{% set attack_host = attack_host or "node1" %}
+{% set attack_process = attack_process or "heat-api" %}
+
+scenarios:
+-
+  type: ServiceHA
+  options:
+    attackers:
+    - fault_type: "kill-process"
+      process_name: "{{ attack_process }}"
+      host: {{attack_host}}
+
+    monitors:
+    - monitor_type: "openstack-cmd"
+      command_name: "openstack stack list"
+      monitor_time: 10
+      monitor_number: 3
+      sla:
+        max_outage_time: 5
+    - monitor_type: "process"
+      process_name: "{{ attack_process }}"
+      host: {{attack_host}}
+      monitor_time: 30
+      monitor_number: 3
+      sla:
+        max_recover_time: 30
+
+  nodes:
+    {{attack_host}}: {{attack_host}}.LF
+
+  runner:
+    type: Duration
+    duration: 1
+  sla:
+    outage_time: 5
+    action: monitor
+
+
+context:
+  type: Node
+  name: LF
+  file: {{file}}
+
@@ -1,5 +1,5 @@
 ##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
 #
 # All rights reserved. This program and the accompanying materials
 # are made available under the terms of the Apache License, Version 2.0
@@ -7,18 +7,12 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 ---
-# ERICSSON POD1 VTC daily task suite
+# k8 canal lb noha daily task suite
 
 schema: "yardstick:suite:0.1"
 
-name: "opnfv_vTC_daily"
+name: "k8-canal-lb-noha"
 test_cases_dir: "tests/opnfv/test_cases/"
 test_cases:
 -
-  file_name: opnfv_yardstick_tc006.yaml
--
-  file_name: opnfv_yardstick_tc007.yaml
--
-  file_name: opnfv_yardstick_tc020.yaml
--
-  file_name: opnfv_yardstick_tc021.yaml
+  file_name: opnfv_yardstick_tc080.yaml
@@ -1,5 +1,5 @@
 ##############################################################################
-# Copyright (c) 2017 Ericsson AB and others.
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
 #
 # All rights reserved. This program and the accompanying materials
 # are made available under the terms of the Apache License, Version 2.0
@@ -7,18 +7,12 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 ---
-# ERICSSON POD1 VTC weekly task suite
+# k8 multus lb noha daily task suite
 
 schema: "yardstick:suite:0.1"
 
-name: "opnfv_vTC_weekly"
+name: "k8-multus-lb-noha"
 test_cases_dir: "tests/opnfv/test_cases/"
 test_cases:
 -
-  file_name: opnfv_yardstick_tc006.yaml
--
-  file_name: opnfv_yardstick_tc007.yaml
--
-  file_name: opnfv_yardstick_tc020.yaml
--
-  file_name: opnfv_yardstick_tc021.yaml
+  file_name: opnfv_yardstick_tc080.yaml
diff --git a/tests/opnfv/test_suites/opnfv_k8-multus-nofeature-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_k8-multus-nofeature-noha_daily.yaml
new file mode 100644 (file)
index 0000000..8b1ffeb
--- /dev/null
@@ -0,0 +1,18 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+# k8 multus nofeature noha daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "k8-multus-nofeature-noha"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+  file_name: opnfv_yardstick_tc080.yaml
diff --git a/tests/opnfv/test_suites/opnfv_k8-nosdn-nofeature-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_k8-nosdn-nofeature-ha_daily.yaml
new file mode 100644 (file)
index 0000000..d5386fd
--- /dev/null
@@ -0,0 +1,18 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+# k8 nosdn nofeature ha daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "k8-nosdn-nofeature-ha"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+  file_name: opnfv_yardstick_tc080.yaml
index 4fd7659..0ce9455 100644 (file)
@@ -15,48 +15,4 @@ name: "k8-nosdn-nofeature-noha"
 test_cases_dir: "tests/opnfv/test_cases/"
 test_cases:
 -
-  file_name: opnfv_yardstick_tc002.yaml
--
-  file_name: opnfv_yardstick_tc005.yaml
--
-  file_name: opnfv_yardstick_tc010.yaml
--
-  file_name: opnfv_yardstick_tc011.yaml
--
-  file_name: opnfv_yardstick_tc012.yaml
--
-  file_name: opnfv_yardstick_tc014.yaml
--
-  file_name: opnfv_yardstick_tc037.yaml
--
-  file_name: opnfv_yardstick_tc055.yaml
-  constraint:
-      installer: compass
-      pod: huawei-pod1
-  task_args:
-      huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml",
-      "host": "node5"}'
--
-  file_name: opnfv_yardstick_tc063.yaml
-  constraint:
-      installer: compass
-      pod: huawei-pod1
-  task_args:
-      huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml",
-      "host": "node5"}'
--
-  file_name: opnfv_yardstick_tc069.yaml
--
-  file_name: opnfv_yardstick_tc070.yaml
--
-  file_name: opnfv_yardstick_tc071.yaml
--
-  file_name: opnfv_yardstick_tc072.yaml
--
-  file_name: opnfv_yardstick_tc075.yaml
-  constraint:
-      installer: compass
-      pod: huawei-pod1
-  task_args:
-      huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml",
-      "host": "node1"}'
+  file_name: opnfv_yardstick_tc080.yaml
diff --git a/tests/opnfv/test_suites/opnfv_k8-nosdn-stor4nfv-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_k8-nosdn-stor4nfv-ha_daily.yaml
new file mode 100644 (file)
index 0000000..cb2b131
--- /dev/null
@@ -0,0 +1,18 @@
+##############################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+# k8 nosdn stor4nfv ha daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "k8-nosdn-stor4nfv-ha"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+  file_name: opnfv_yardstick_tc080.yaml
diff --git a/tests/opnfv/test_suites/opnfv_k8-nosdn-stor4nfv-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_k8-nosdn-stor4nfv-noha_daily.yaml
new file mode 100644 (file)
index 0000000..961b8da
--- /dev/null
@@ -0,0 +1,18 @@
+##############################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+# k8 nosdn stor4nfv noha daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "k8-nosdn-stor4nfv-noha"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+  file_name: opnfv_yardstick_tc080.yaml
diff --git a/tests/opnfv/test_suites/opnfv_k8-ocl-lb-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_k8-ocl-lb-noha_daily.yaml
new file mode 100644 (file)
index 0000000..a6ef9e1
--- /dev/null
@@ -0,0 +1,18 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+# k8 ocl lb noha daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "k8-ocl-lb-noha"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+  file_name: opnfv_yardstick_tc080.yaml
diff --git a/tests/opnfv/test_suites/opnfv_k8-sriov-cni-nofeature-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_k8-sriov-cni-nofeature-noha_daily.yaml
new file mode 100644 (file)
index 0000000..e0114f4
--- /dev/null
@@ -0,0 +1,18 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+# k8 sriov-cni nofeature noha daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "k8-sriov-cni-nofeature-noha"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+  file_name: opnfv_yardstick_tc080.yaml
diff --git a/tests/opnfv/test_suites/opnfv_os-nosdn-calipso-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-nosdn-calipso-noha_daily.yaml
new file mode 100644 (file)
index 0000000..3f0c4a9
--- /dev/null
@@ -0,0 +1,62 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+# os-nosdn-calipso-noha daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "os-nosdn-calipso-noha"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+  file_name: opnfv_yardstick_tc002.yaml
+-
+  file_name: opnfv_yardstick_tc005.yaml
+-
+  file_name: opnfv_yardstick_tc010.yaml
+-
+  file_name: opnfv_yardstick_tc011.yaml
+-
+  file_name: opnfv_yardstick_tc012.yaml
+-
+  file_name: opnfv_yardstick_tc014.yaml
+-
+  file_name: opnfv_yardstick_tc037.yaml
+-
+  file_name: opnfv_yardstick_tc055.yaml
+  constraint:
+      installer: compass
+      pod: huawei-pod1
+  task_args:
+      huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml",
+      "host": "node5"}'
+-
+  file_name: opnfv_yardstick_tc063.yaml
+  constraint:
+      installer: compass
+      pod: huawei-pod1
+  task_args:
+      huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml",
+      "host": "node5"}'
+-
+  file_name: opnfv_yardstick_tc069.yaml
+-
+  file_name: opnfv_yardstick_tc070.yaml
+-
+  file_name: opnfv_yardstick_tc071.yaml
+-
+  file_name: opnfv_yardstick_tc072.yaml
+-
+  file_name: opnfv_yardstick_tc075.yaml
+  constraint:
+      installer: compass
+      pod: huawei-pod1
+  task_args:
+      huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml",
+      "host": "node1"}'
index ec0fd22..bd91a75 100644 (file)
@@ -20,18 +20,12 @@ test_cases:
   file_name: opnfv_yardstick_tc002.yaml
 -
   file_name: opnfv_yardstick_tc006.yaml
--
-  file_name: opnfv_yardstick_tc007.yaml
 -
   file_name: opnfv_yardstick_tc008.yaml
 -
   file_name: opnfv_yardstick_tc009.yaml
 -
   file_name: opnfv_yardstick_tc011.yaml
--
-  file_name: opnfv_yardstick_tc020.yaml
--
-  file_name: opnfv_yardstick_tc021.yaml
 -
   file_name: opnfv_yardstick_tc037.yaml
 -
index ef47b9f..201a271 100644 (file)
@@ -33,6 +33,11 @@ test_cases:
   file_name: opnfv_yardstick_tc014.yaml
 -
   file_name: opnfv_yardstick_tc037.yaml
+-
+  file_name: opnfv_yardstick_tc042.yaml
+  constraint:
+      installer: compass
+      pod: huawei-pod1
 -
   file_name: opnfv_yardstick_tc055.yaml
   constraint:
index 2def5c2..e8db0de 100644 (file)
@@ -33,6 +33,11 @@ test_cases:
   file_name: opnfv_yardstick_tc014.yaml
 -
   file_name: opnfv_yardstick_tc037.yaml
+-
+  file_name: opnfv_yardstick_tc042.yaml
+  constraint:
+      installer: compass
+      pod: huawei-pod1
 -
   file_name: opnfv_yardstick_tc055.yaml
   constraint:
index fa3c789..7c213e2 100644 (file)
@@ -93,12 +93,10 @@ test_cases:
     file_name: opnfv_yardstick_tc027.yaml
     constraint:
         installer: compass,fuel
-        pod: huawei-pod1,lf-pod2,ericsson-pod3,ericsson-pod4
+        pod: huawei-pod1,lf-pod2
     task_args:
         huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
         lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml", "openrc":"/root/openrc", "external_network":"admin_floating_net"}'
-        ericsson-pod3: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml", "openrc":"/root/openrc", "external_network":"admin_floating_net"}'
-        ericsson-pod4: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml", "openrc":"/root/openrc", "external_network":"admin_floating_net"}'
 -
     file_name: opnfv_yardstick_tc074.yaml
     constraint:
@@ -115,72 +113,82 @@ test_cases:
     file_name: opnfv_yardstick_tc045.yaml
     constraint:
         installer: compass,fuel
-        pod: huawei-pod2,ericsson-pod1
+        pod: huawei-pod1,lf-pod2
     task_args:
-        huawei-pod2: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
-        ericsson-pod1: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
+        huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
+        lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
 -
     file_name: opnfv_yardstick_tc046.yaml
     constraint:
-        installer: fuel
+        installer: compass,fuel
+        pod: huawei-pod1,lf-pod2
     task_args:
-        default: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
+        huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
+        lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
 -
     file_name: opnfv_yardstick_tc047.yaml
     constraint:
         installer: compass,fuel
-        pod: huawei-pod2,ericsson-pod1
+        pod: huawei-pod1,lf-pod2
     task_args:
-        huawei-pod2: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
-        ericsson-pod1: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
+        huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
+        lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
 -
     file_name: opnfv_yardstick_tc048.yaml
     constraint:
         installer: compass,fuel
-        pod: huawei-pod2,ericsson-pod1
+        pod: huawei-pod1,lf-pod2
     task_args:
-        huawei-pod2: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
-        ericsson-pod1: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
+        huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
+        lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
 -
     file_name: opnfv_yardstick_tc049.yaml
     constraint:
-        installer: fuel
+        installer: compass,fuel
+        pod: huawei-pod1,lf-pod2
     task_args:
-        default: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
+        huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
+        lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
 -
     file_name: opnfv_yardstick_tc050.yaml
     constraint:
-        installer: fuel
+        installer: compass,fuel
+        pod: huawei-pod1,lf-pod2
     task_args:
-        default: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
+        huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
+        lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
 -
     file_name: opnfv_yardstick_tc051.yaml
     constraint:
         installer: compass,fuel
-        pod: huawei-pod2,ericsson-pod1
+        pod: huawei-pod1,lf-pod2
     task_args:
-        huawei-pod2: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
-        ericsson-pod1: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
+        huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
+        lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
 -
     file_name: opnfv_yardstick_tc052.yaml
     constraint:
-        installer: fuel
+        installer: compass,fuel
+        pod: huawei-pod1,lf-pod2
     task_args:
-        default: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
+        huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
+        lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
 -
     file_name: opnfv_yardstick_tc053.yaml
     constraint:
-        installer: fuel
+        installer: compass,fuel
+        pod: huawei-pod1,lf-pod2
     task_args:
-        default: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
+        huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
+        lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
 -
     file_name: opnfv_yardstick_tc019.yaml
     constraint:
         installer: compass,fuel
-        pod: huawei-pod2,ericsson-pod1
+        pod: huawei-pod1,lf-pod2
     task_args:
-        huawei-pod2: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
-        ericsson-pod1: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
+        huawei-pod1: '{"file": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}'
+        lf-pod2: '{"file": "etc/yardstick/nodes/fuel_baremetal/pod.yaml"}'
 -
     file_name: opnfv_yardstick_tc025.yaml
     constraint:
diff --git a/tests/opnfv/test_suites/opnfv_os-odl-ovs_dpdk-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-odl-ovs_dpdk-ha_daily.yaml
new file mode 100644 (file)
index 0000000..734d31d
--- /dev/null
@@ -0,0 +1,38 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+# os-odl-ovs_dpdk-ha daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "os-odl-ovs_dpdk-ha"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+  file_name: opnfv_yardstick_tc002.yaml
+-
+  file_name: opnfv_yardstick_tc005.yaml
+-
+  file_name: opnfv_yardstick_tc010.yaml
+-
+  file_name: opnfv_yardstick_tc011.yaml
+-
+  file_name: opnfv_yardstick_tc012.yaml
+-
+  file_name: opnfv_yardstick_tc014.yaml
+-
+  file_name: opnfv_yardstick_tc037.yaml
+-
+  file_name: opnfv_yardstick_tc069.yaml
+-
+  file_name: opnfv_yardstick_tc070.yaml
+-
+  file_name: opnfv_yardstick_tc071.yaml
+-
+  file_name: opnfv_yardstick_tc072.yaml
diff --git a/tests/opnfv/test_suites/opnfv_os-odl-ovs_dpdk-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-odl-ovs_dpdk-noha_daily.yaml
new file mode 100644 (file)
index 0000000..e87dba7
--- /dev/null
@@ -0,0 +1,38 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+# os-odl-ovs_dpdk-noha daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "os-odl-ovs_dpdk-noha"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+  file_name: opnfv_yardstick_tc002.yaml
+-
+  file_name: opnfv_yardstick_tc005.yaml
+-
+  file_name: opnfv_yardstick_tc010.yaml
+-
+  file_name: opnfv_yardstick_tc011.yaml
+-
+  file_name: opnfv_yardstick_tc012.yaml
+-
+  file_name: opnfv_yardstick_tc014.yaml
+-
+  file_name: opnfv_yardstick_tc037.yaml
+-
+  file_name: opnfv_yardstick_tc069.yaml
+-
+  file_name: opnfv_yardstick_tc070.yaml
+-
+  file_name: opnfv_yardstick_tc071.yaml
+-
+  file_name: opnfv_yardstick_tc072.yaml
diff --git a/tests/opnfv/test_suites/opnfv_os-odl-ovs_offload-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-odl-ovs_offload-noha_daily.yaml
new file mode 100644 (file)
index 0000000..27d100a
--- /dev/null
@@ -0,0 +1,38 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+# os-odl-ovs_offload-noha daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "os-odl-ovs_offload-noha"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+  file_name: opnfv_yardstick_tc002.yaml
+-
+  file_name: opnfv_yardstick_tc005.yaml
+-
+  file_name: opnfv_yardstick_tc010.yaml
+-
+  file_name: opnfv_yardstick_tc011.yaml
+-
+  file_name: opnfv_yardstick_tc012.yaml
+-
+  file_name: opnfv_yardstick_tc014.yaml
+-
+  file_name: opnfv_yardstick_tc037.yaml
+-
+  file_name: opnfv_yardstick_tc069.yaml
+-
+  file_name: opnfv_yardstick_tc070.yaml
+-
+  file_name: opnfv_yardstick_tc071.yaml
+-
+  file_name: opnfv_yardstick_tc072.yaml
diff --git a/tests/opnfv/test_suites/opnfv_os-odl-sriov-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-odl-sriov-noha_daily.yaml
new file mode 100644 (file)
index 0000000..b6d0dd6
--- /dev/null
@@ -0,0 +1,38 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+# os-odl-sriov-noha daily task suite
+
+schema: "yardstick:suite:0.1"
+
+name: "os-odl-sriov-noha"
+test_cases_dir: "tests/opnfv/test_cases/"
+test_cases:
+-
+  file_name: opnfv_yardstick_tc002.yaml
+-
+  file_name: opnfv_yardstick_tc005.yaml
+-
+  file_name: opnfv_yardstick_tc010.yaml
+-
+  file_name: opnfv_yardstick_tc011.yaml
+-
+  file_name: opnfv_yardstick_tc012.yaml
+-
+  file_name: opnfv_yardstick_tc014.yaml
+-
+  file_name: opnfv_yardstick_tc037.yaml
+-
+  file_name: opnfv_yardstick_tc069.yaml
+-
+  file_name: opnfv_yardstick_tc070.yaml
+-
+  file_name: opnfv_yardstick_tc071.yaml
+-
+  file_name: opnfv_yardstick_tc072.yaml
index 7172979..722d885 100644 (file)
@@ -20,18 +20,12 @@ test_cases:
   file_name: opnfv_yardstick_tc002.yaml
 -
   file_name: opnfv_yardstick_tc006.yaml
--
-  file_name: opnfv_yardstick_tc007.yaml
 -
   file_name: opnfv_yardstick_tc008.yaml
 -
   file_name: opnfv_yardstick_tc009.yaml
 -
   file_name: opnfv_yardstick_tc011.yaml
--
-  file_name: opnfv_yardstick_tc020.yaml
--
-  file_name: opnfv_yardstick_tc021.yaml
 -
   file_name: opnfv_yardstick_tc037.yaml
 -
index 3b6c89d..dc74b18 100644 (file)
@@ -143,8 +143,6 @@ class TestMultiPortConfig(unittest.TestCase):
     def setUp(self):
         self._mock_open = mock.patch.object(six.moves.builtins, 'open')
         self.mock_open = self._mock_open.start()
-        self._mock_os = mock.patch.object(os, 'path')
-        self.mock_os = self._mock_os.start()
         self._mock_config_parser = mock.patch.object(
             samplevnf_helper, 'ConfigParser')
         self.mock_config_parser = self._mock_config_parser.start()
@@ -153,7 +151,6 @@ class TestMultiPortConfig(unittest.TestCase):
 
     def _cleanup(self):
         self._mock_open.stop()
-        self._mock_os.stop()
         self._mock_config_parser.stop()
 
     def test_validate_ip_and_prefixlen(self):
@@ -185,7 +182,8 @@ class TestMultiPortConfig(unittest.TestCase):
             samplevnf_helper.MultiPortConfig.validate_ip_and_prefixlen(
                 '::1', '129')
 
-    def test___init__(self):
+    @mock.patch.object(os.path, 'isfile', return_value=False)
+    def test___init__(self, *args):
         topology_file = mock.Mock()
         config_tpl = mock.Mock()
         tmp_file = mock.Mock()
@@ -193,8 +191,6 @@ class TestMultiPortConfig(unittest.TestCase):
         opnfv_vnf = samplevnf_helper.MultiPortConfig(
             topology_file, config_tpl, tmp_file, vnfd_mock)
         self.assertEqual(0, opnfv_vnf.swq)
-        self.mock_os.path = mock.MagicMock()
-        self.mock_os.path.isfile = mock.Mock(return_value=False)
         opnfv_vnf = samplevnf_helper.MultiPortConfig(
             topology_file, config_tpl, tmp_file, vnfd_mock)
         self.assertEqual(0, opnfv_vnf.swq)
index f5f7f0f..9f337c6 100644 (file)
@@ -19,7 +19,8 @@ import unittest
 
 from yardstick.network_services.nfvi.resource import ResourceProfile
 from yardstick.network_services.nfvi import resource, collectd
-
+from yardstick.common.exceptions import ResourceCommandError
+from yardstick import ssh
 
 class TestResourceProfile(unittest.TestCase):
     VNFD = {'vnfd:vnfd-catalog':
@@ -128,8 +129,31 @@ class TestResourceProfile(unittest.TestCase):
         self.assertEqual(val, ('error', 'Invalid', '', ''))
 
     def test__start_collectd(self):
-            self.assertIsNone(
-                self.resource_profile._start_collectd(self.ssh_mock, "/opt/nsb_bin"))
+        ssh_mock = mock.Mock()
+        ssh_mock.execute = mock.Mock(return_value=(0, "", ""))
+        self.assertIsNone(self.resource_profile._start_collectd(ssh_mock,
+                                                                "/opt/nsb_bin"))
+
+        ssh_mock.execute = mock.Mock(side_effect=ssh.SSHError)
+        with self.assertRaises(ssh.SSHError):
+            self.resource_profile._start_collectd(ssh_mock, "/opt/nsb_bin")
+
+        ssh_mock.execute = mock.Mock(return_value=(1, "", ""))
+        self.assertIsNone(self.resource_profile._start_collectd(ssh_mock,
+                                                                "/opt/nsb_bin"))
+
+    def test__start_rabbitmq(self):
+        ssh_mock = mock.Mock()
+        ssh_mock.execute = mock.Mock(return_value=(0, "RabbitMQ", ""))
+        self.assertIsNone(self.resource_profile._start_rabbitmq(ssh_mock))
+
+        ssh_mock.execute = mock.Mock(return_value=(0, "", ""))
+        with self.assertRaises(ResourceCommandError):
+            self.resource_profile._start_rabbitmq(ssh_mock)
+
+        ssh_mock.execute = mock.Mock(return_value=(1, "", ""))
+        with self.assertRaises(ResourceCommandError):
+            self.resource_profile._start_rabbitmq(ssh_mock)
 
     def test__prepare_collectd_conf(self):
             self.assertIsNone(
@@ -154,11 +178,12 @@ class TestResourceProfile(unittest.TestCase):
 
     def test_initiate_systemagent(self):
         self.resource_profile._start_collectd = mock.Mock()
+        self.resource_profile._start_rabbitmq = mock.Mock()
         self.assertIsNone(
             self.resource_profile.initiate_systemagent("/opt/nsb_bin"))
 
     def test_initiate_systemagent_raise(self):
-        self.resource_profile._start_collectd = mock.Mock(side_effect=RuntimeError)
+        self.resource_profile._start_rabbitmq = mock.Mock(side_effect=RuntimeError)
         with self.assertRaises(RuntimeError):
             self.resource_profile.initiate_systemagent("/opt/nsb_bin")
 
index c7d2abc..eb59c28 100644 (file)
@@ -533,10 +533,12 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase):
     @mock.patch.object(six, 'BytesIO', return_value=six.BytesIO(b'100\n'))
     @mock.patch.object(utils, 'read_meminfo',
                        return_value={'Hugepagesize': '2048'})
-    def test__setup_hugepages(self, mock_meminfo, *args):
+    def test__setup_hugepages_no_hugepages_defined(self, mock_meminfo, *args):
         ssh_helper = mock.Mock()
+        scenario_helper = mock.Mock()
+        scenario_helper.all_options = {}
         dpdk_setup_helper = DpdkVnfSetupEnvHelper(
-            mock.ANY, ssh_helper, mock.ANY)
+            mock.ANY, ssh_helper, scenario_helper)
         with mock.patch.object(sample_vnf.LOG, 'info') as mock_info:
             dpdk_setup_helper._setup_hugepages()
             mock_info.assert_called_once_with(
@@ -544,6 +546,22 @@ class TestDpdkVnfSetupEnvHelper(unittest.TestCase):
                 '%s', 2048, 8192, 100)
         mock_meminfo.assert_called_once_with(ssh_helper)
 
+    @mock.patch.object(six, 'BytesIO', return_value=six.BytesIO(b'100\n'))
+    @mock.patch.object(utils, 'read_meminfo',
+                       return_value={'Hugepagesize': '1048576'})
+    def test__setup_hugepages_8gb_hugepages_defined(self, mock_meminfo, *args):
+        ssh_helper = mock.Mock()
+        scenario_helper = mock.Mock()
+        scenario_helper.all_options = {'hugepages_gb': 8}
+        dpdk_setup_helper = DpdkVnfSetupEnvHelper(
+            mock.ANY, ssh_helper, scenario_helper)
+        with mock.patch.object(sample_vnf.LOG, 'info') as mock_info:
+            dpdk_setup_helper._setup_hugepages()
+            mock_info.assert_called_once_with(
+                'Hugepages size (kB): %s, number claimed: %s, number set: '
+                '%s', 1048576, 8, 100)
+        mock_meminfo.assert_called_once_with(ssh_helper)
+
     @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.open')
     @mock.patch.object(utils, 'find_relative_file')
     @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.MultiPortConfig')
@@ -1643,42 +1661,6 @@ class TestSampleVnf(unittest.TestCase):
         # test the default resource helper is MyResourceHelper, not subclass
         self.assertEqual(type(sample_vnf.resource_helper), MyResourceHelper)
 
-    def test__get_port0localip6(self):
-        sample_vnf = SampleVNF('vnf1', self.VNFD_0)
-        expected = '0064:ff9b:0:0:0:0:9810:6414'
-        result = sample_vnf._get_port0localip6()
-        self.assertEqual(result, expected)
-
-    def test__get_port1localip6(self):
-        sample_vnf = SampleVNF('vnf1', self.VNFD_0)
-        expected = '0064:ff9b:0:0:0:0:9810:2814'
-        result = sample_vnf._get_port1localip6()
-        self.assertEqual(result, expected)
-
-    def test__get_port0prefixip6(self):
-        sample_vnf = SampleVNF('vnf1', self.VNFD_0)
-        expected = '112'
-        result = sample_vnf._get_port0prefixlen6()
-        self.assertEqual(result, expected)
-
-    def test__get_port1prefixip6(self):
-        sample_vnf = SampleVNF('vnf1', self.VNFD_0)
-        expected = '112'
-        result = sample_vnf._get_port1prefixlen6()
-        self.assertEqual(result, expected)
-
-    def test__get_port0gateway6(self):
-        sample_vnf = SampleVNF('vnf1', self.VNFD_0)
-        expected = '0064:ff9b:0:0:0:0:9810:6414'
-        result = sample_vnf._get_port0gateway6()
-        self.assertEqual(result, expected)
-
-    def test__get_port1gateway6(self):
-        sample_vnf = SampleVNF('vnf1', self.VNFD_0)
-        expected = '0064:ff9b:0:0:0:0:9810:2814'
-        result = sample_vnf._get_port1gateway6()
-        self.assertEqual(result, expected)
-
     @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.Process')
     def test__start_vnf(self, *args):
         vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
@@ -1767,16 +1749,6 @@ class TestSampleVnf(unittest.TestCase):
 
         self.assertEqual(sample_vnf.wait_for_instantiate(), 0)
 
-    def test__build_ports(self):
-        vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
-        sample_vnf = SampleVNF('vnf1', vnfd)
-
-        self.assertIsNone(sample_vnf._build_ports())
-        self.assertIsNotNone(sample_vnf.networks)
-        self.assertIsNotNone(sample_vnf.uplink_ports)
-        self.assertIsNotNone(sample_vnf.downlink_ports)
-        self.assertIsNotNone(sample_vnf.my_ports)
-
     @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
     def test_vnf_execute_with_queue_data(self, *args):
         queue_size_list = [
index 6a9f2e7..aaf162c 100755 (executable)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+env_http_proxy=$(sed -ne "s/^http_proxy=[\"\']\(.*\)[\"\']/\1/p" /etc/environment)
+if [[ -z ${http_proxy} ]] && [[ ! -z ${env_http_proxy} ]]; then
+    export http_proxy=${env_http_proxy}
+fi
+env_https_proxy=$(sed -ne "s/^https_proxy=[\"\']\(.*\)[\"\']/\1/p" /etc/environment)
+if [[ -z ${https_proxy} ]] && [[ ! -z ${env_https_proxy} ]]; then
+    export https_proxy=${env_https_proxy}
+fi
+env_ftp_proxy=$(sed -ne "s/^ftp_proxy=[\"\']\(.*\)[\"\']/\1/p" /etc/environment)
+if [[ -z ${ftp_proxy} ]] && [[ ! -z ${env_ftp_proxy} ]]; then
+    export ftp_proxy=${env_ftp_proxy}
+fi
+if [[ ! -z ${http_proxy} ]] || [[ ! -z ${https_proxy} ]]; then
+    export no_proxy="${no_proxy}"
+    extra_args="${extra_args} -e @/tmp/proxy.yml "
+    cat <<EOF > /tmp/proxy.yml
+---
+proxy_env:
+  http_proxy: ${http_proxy}
+  https_proxy: ${https_proxy}
+  ftp_proxy: ${ftp_proxy}
+  no_proxy: ${no_proxy}
+EOF
+fi
 ANSIBLE_SCRIPTS="${0%/*}/../ansible"
 
-cd ${ANSIBLE_SCRIPTS} &&\
+cd ${ANSIBLE_SCRIPTS} && \
 sudo -EH ansible-playbook \
-         -e rs_file='../etc/infra/infra_deploy.yaml' \
+         -e RS_FILE='../etc/infra/infra_deploy_two.yaml' -e CLEAN_UP=False ${extra_args} \
          -i inventory.ini infra_deploy.yml
index ae8319e..692c168 100644 (file)
@@ -6,17 +6,20 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
+
 import abc
 import six
 
-import yardstick.common.utils as utils
+from yardstick.common import constants
+from yardstick.common import utils
 
 
 class Flags(object):
     """Class to represent the status of the flags in a context"""
 
     _FLAGS = {'no_setup': False,
-              'no_teardown': False}
+              'no_teardown': False,
+              'os_cloud_config': constants.OS_CLOUD_DEFAULT_CONFIG}
 
     def __init__(self, **kwargs):
         for name, value in self._FLAGS.items():
index 0d1dfb8..8286182 100644 (file)
@@ -7,9 +7,6 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-from __future__ import absolute_import
-from __future__ import print_function
-
 import collections
 import logging
 import os
@@ -328,8 +325,10 @@ class HeatContext(Context):
         if not os.path.exists(self.key_filename):
             SSH.gen_keys(self.key_filename)
 
-        heat_template = HeatTemplate(self.name, self.template_file,
-                                     self.heat_parameters)
+        heat_template = HeatTemplate(
+            self.name, template_file=self.template_file,
+            heat_parameters=self.heat_parameters,
+            os_cloud_config=self._flags.os_cloud_config)
 
         if self.template_file is None:
             self._add_resources_to_template(heat_template)
index 14738da..4d43f26 100644 (file)
@@ -12,7 +12,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from __future__ import absolute_import
 import os
 import re
 import time
@@ -25,11 +24,12 @@ from netaddr import IPNetwork
 import xml.etree.ElementTree as ET
 
 from yardstick import ssh
-from yardstick.common.constants import YARDSTICK_ROOT_PATH
+from yardstick.common import constants
+from yardstick.common import exceptions
 from yardstick.common.yaml_loader import yaml_load
 from yardstick.network_services.utils import PciAddress
 from yardstick.network_services.helpers.cpu import CpuSysCores
-from yardstick.common.utils import write_file
+
 
 LOG = logging.getLogger(__name__)
 
@@ -106,12 +106,17 @@ class Libvirt(object):
 
     @staticmethod
     def virsh_create_vm(connection, cfg):
-        err = connection.execute("virsh create %s" % cfg)[0]
-        LOG.info("VM create status: %s", err)
+        LOG.info('VM create, XML config: %s', cfg)
+        status, _, error = connection.execute('virsh create %s' % cfg)
+        if status:
+            raise exceptions.LibvirtCreateError(error=error)
 
     @staticmethod
     def virsh_destroy_vm(vm_name, connection):
-        connection.execute("virsh destroy %s" % vm_name)
+        LOG.info('VM destroy, VM name: %s', vm_name)
+        status, _, error = connection.execute('virsh destroy %s' % vm_name)
+        if status:
+            LOG.warning('Error destroying VM %s. Error: %s', vm_name, error)
 
     @staticmethod
     def _add_interface_address(interface, pci_address):
@@ -132,7 +137,7 @@ class Libvirt(object):
         return vm_pci
 
     @classmethod
-    def add_ovs_interface(cls, vpath, port_num, vpci, vports_mac, xml):
+    def add_ovs_interface(cls, vpath, port_num, vpci, vports_mac, xml_str):
         """Add a DPDK OVS 'interface' XML node in 'devices' node
 
         <devices>
@@ -156,7 +161,7 @@ class Libvirt(object):
 
         vhost_path = ('{0}/var/run/openvswitch/dpdkvhostuser{1}'.
                       format(vpath, port_num))
-        root = ET.parse(xml)
+        root = ET.fromstring(xml_str)
         pci_address = PciAddress(vpci.strip())
         device = root.find('devices')
 
@@ -181,10 +186,10 @@ class Libvirt(object):
 
         cls._add_interface_address(interface, pci_address)
 
-        root.write(xml)
+        return ET.tostring(root)
 
     @classmethod
-    def add_sriov_interfaces(cls, vm_pci, vf_pci, vf_mac, xml):
+    def add_sriov_interfaces(cls, vm_pci, vf_pci, vf_mac, xml_str):
         """Add a SR-IOV 'interface' XML node in 'devices' node
 
         <devices>
@@ -207,7 +212,7 @@ class Libvirt(object):
             -sr_iov-how_sr_iov_libvirt_works
         """
 
-        root = ET.parse(xml)
+        root = ET.fromstring(xml_str)
         device = root.find('devices')
 
         interface = ET.SubElement(device, 'interface')
@@ -224,20 +229,47 @@ class Libvirt(object):
         pci_vm_address = PciAddress(vm_pci.strip())
         cls._add_interface_address(interface, pci_vm_address)
 
-        root.write(xml)
+        return ET.tostring(root)
 
     @staticmethod
-    def create_snapshot_qemu(connection, index, vm_image):
-        # build snapshot image
-        image = "/var/lib/libvirt/images/%s.qcow2" % index
-        connection.execute("rm %s" % image)
-        qemu_template = "qemu-img create -f qcow2 -o backing_file=%s %s"
-        connection.execute(qemu_template % (vm_image, image))
+    def create_snapshot_qemu(connection, index, base_image):
+        """Create the snapshot image for a VM using a base image
 
-        return image
+        :param connection: SSH connection to the remote host
+        :param index: index of the VM to be spawn
+        :param base_image: path of the VM base image in the remote host
+        :return: snapshot image path
+        """
+        vm_image = '/var/lib/libvirt/images/%s.qcow2' % index
+        connection.execute('rm -- "%s"' % vm_image)
+        status, _, _ = connection.execute('test -r %s' % base_image)
+        if status:
+            if not os.access(base_image, os.R_OK):
+                raise exceptions.LibvirtQemuImageBaseImageNotPresent(
+                    vm_image=vm_image, base_image=base_image)
+            # NOTE(ralonsoh): done in two steps to avoid root permission
+            # issues.
+            LOG.info('Copy %s from execution host to remote host', base_image)
+            file_name = os.path.basename(os.path.normpath(base_image))
+            connection.put_file(base_image, '/tmp/%s' % file_name)
+            status, _, error = connection.execute(
+                'mv -- "/tmp/%s" "%s"' % (file_name, base_image))
+            if status:
+                raise exceptions.LibvirtQemuImageCreateError(
+                    vm_image=vm_image, base_image=base_image, error=error)
+
+        LOG.info('Convert image %s to %s', base_image, vm_image)
+        qemu_cmd = ('qemu-img create -f qcow2 -o backing_file=%s %s' %
+                    (base_image, vm_image))
+        status, _, error = connection.execute(qemu_cmd)
+        if status:
+            raise exceptions.LibvirtQemuImageCreateError(
+                vm_image=vm_image, base_image=base_image, error=error)
+        return vm_image
 
     @classmethod
-    def build_vm_xml(cls, connection, flavor, cfg, vm_name, index):
+    def build_vm_xml(cls, connection, flavor, vm_name, index):
+        """Build the XML from the configuration parameters"""
         memory = flavor.get('ram', '4096')
         extra_spec = flavor.get('extra_specs', {})
         cpu = extra_spec.get('hw:cpu_cores', '2')
@@ -261,9 +293,7 @@ class Libvirt(object):
             socket=socket, threads=threads,
             vm_image=image, cpuset=cpuset, cputune=cputune)
 
-        write_file(cfg, vm_xml)
-
-        return [vcpu, mac]
+        return vm_xml, mac
 
     @staticmethod
     def update_interrupts_hugepages_perf(connection):
@@ -283,6 +313,13 @@ class Libvirt(object):
         cpuset = "%s,%s" % (cores, threads)
         return cpuset
 
+    @classmethod
+    def write_file(cls, file_name, xml_str):
+        """Dump a XML string to a file"""
+        root = ET.fromstring(xml_str)
+        et = ET.ElementTree(element=root)
+        et.write(file_name, encoding='utf-8', method='xml')
+
 
 class StandaloneContextHelper(object):
     """ This class handles all the common code for standalone
@@ -374,7 +411,8 @@ class StandaloneContextHelper(object):
         except IOError as io_error:
             if io_error.errno != errno.ENOENT:
                 raise
-            self.file_path = os.path.join(YARDSTICK_ROOT_PATH, file_path)
+            self.file_path = os.path.join(constants.YARDSTICK_ROOT_PATH,
+                                          file_path)
             cfg = self.read_config_file()
 
         nodes.extend([node for node in cfg["nodes"] if str(node["role"]) != nfvi_role])
@@ -506,7 +544,7 @@ class OvsDeploy(object):
         StandaloneContextHelper.install_req_libs(self.connection, pkgs)
 
     def ovs_deploy(self):
-        ovs_deploy = os.path.join(YARDSTICK_ROOT_PATH,
+        ovs_deploy = os.path.join(constants.YARDSTICK_ROOT_PATH,
                                   "yardstick/resources/scripts/install/",
                                   self.OVS_DEPLOY_SCRIPT)
         if os.path.isfile(ovs_deploy):
@@ -522,4 +560,6 @@ class OvsDeploy(object):
 
             cmd = "sudo -E %s --ovs='%s' --dpdk='%s' -p='%s'" % (remote_ovs_deploy,
                                                                  ovs, dpdk, http_proxy)
-            self.connection.execute(cmd)
+            exit_status, _, stderr = self.connection.execute(cmd)
+            if exit_status:
+                raise exceptions.OVSDeployError(stderr=stderr)
index a18b42e..b9e66a4 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from __future__ import absolute_import
-import os
-import logging
+import io
 import collections
+import logging
+import os
+import re
 import time
 
-from collections import OrderedDict
-
 from yardstick import ssh
 from yardstick.network_services.utils import get_nsb_option
-from yardstick.network_services.utils import provision_tool
 from yardstick.benchmark.contexts.base import Context
-from yardstick.benchmark.contexts.standalone.model import Libvirt
-from yardstick.benchmark.contexts.standalone.model import StandaloneContextHelper
-from yardstick.benchmark.contexts.standalone.model import Server
-from yardstick.benchmark.contexts.standalone.model import OvsDeploy
-from yardstick.network_services.utils import PciAddress
+from yardstick.benchmark.contexts.standalone import model
+from yardstick.common import exceptions
+from yardstick.network_services import utils
+
 
 LOG = logging.getLogger(__name__)
 
+MAIN_BRIDGE = 'br0'
+
 
 class OvsDpdkContext(Context):
     """ This class handles OVS standalone nodes - VM running on Non-Managed NFVi
@@ -50,14 +49,15 @@ class OvsDpdkContext(Context):
     }
 
     DEFAULT_OVS = '2.6.0'
-
-    PKILL_TEMPLATE = "pkill %s %s"
+    CMD_TIMEOUT = 30
+    DEFAULT_USER_PATH = '/usr/local'
 
     def __init__(self):
         self.file_path = None
         self.sriov = []
         self.first_run = True
-        self.dpdk_devbind = ''
+        self.dpdk_devbind = os.path.join(get_nsb_option('bin_path'),
+                                         'dpdk-devbind.py')
         self.vm_names = []
         self.nfvi_host = []
         self.nodes = []
@@ -65,8 +65,8 @@ class OvsDpdkContext(Context):
         self.attrs = {}
         self.vm_flavor = None
         self.servers = None
-        self.helper = StandaloneContextHelper()
-        self.vnf_node = Server()
+        self.helper = model.StandaloneContextHelper()
+        self.vnf_node = model.Server()
         self.ovs_properties = {}
         self.wait_for_vswitchd = 10
         super(OvsDpdkContext, self).__init__()
@@ -93,34 +93,32 @@ class OvsDpdkContext(Context):
         LOG.debug("Networks: %r", self.networks)
 
     def setup_ovs(self):
-        vpath = self.ovs_properties.get("vpath", "/usr/local")
-        xargs_kill_cmd = self.PKILL_TEMPLATE % ('-9', 'ovs')
-
+        """Initialize OVS-DPDK"""
+        vpath = self.ovs_properties.get('vpath', self.DEFAULT_USER_PATH)
         create_from = os.path.join(vpath, 'etc/openvswitch/conf.db')
         create_to = os.path.join(vpath, 'share/openvswitch/vswitch.ovsschema')
 
         cmd_list = [
-            "chmod 0666 /dev/vfio/*",
-            "chmod a+x /dev/vfio",
-            "pkill -9 ovs",
-            xargs_kill_cmd,
-            "killall -r 'ovs*'",
-            "mkdir -p {0}/etc/openvswitch".format(vpath),
-            "mkdir -p {0}/var/run/openvswitch".format(vpath),
-            "rm {0}/etc/openvswitch/conf.db".format(vpath),
-            "ovsdb-tool create {0} {1}".format(create_from, create_to),
-            "modprobe vfio-pci",
-            "chmod a+x /dev/vfio",
-            "chmod 0666 /dev/vfio/*",
+            'killall -r "ovs.*" -q | true',
+            'mkdir -p {0}/etc/openvswitch'.format(vpath),
+            'mkdir -p {0}/var/run/openvswitch'.format(vpath),
+            'rm {0}/etc/openvswitch/conf.db | true'.format(vpath),
+            'ovsdb-tool create {0} {1}'.format(create_from, create_to),
+            'modprobe vfio-pci',
+            'chmod a+x /dev/vfio',
+            'chmod 0666 /dev/vfio/*',
         ]
-        for cmd in cmd_list:
-            self.connection.execute(cmd)
-        bind_cmd = "{dpdk_devbind} --force -b {driver} {port}"
-        phy_driver = "vfio-pci"
+
+        bind_cmd = '%s --force -b vfio-pci {port}' % self.dpdk_devbind
         for port in self.networks.values():
-            vpci = port.get("phy_port")
-            self.connection.execute(bind_cmd.format(
-                dpdk_devbind=self.dpdk_devbind, driver=phy_driver, port=vpci))
+            cmd_list.append(bind_cmd.format(port=port.get('phy_port')))
+
+        for cmd in cmd_list:
+            LOG.info(cmd)
+            exit_status, _, stderr = self.connection.execute(
+                cmd, timeout=self.CMD_TIMEOUT)
+            if exit_status:
+                raise exceptions.OVSSetupError(command=cmd, error=stderr)
 
     def start_ovs_serverswitch(self):
         vpath = self.ovs_properties.get("vpath")
@@ -166,56 +164,78 @@ class OvsDpdkContext(Context):
         vpath = self.ovs_properties.get("vpath", "/usr/local")
         version = self.ovs_properties.get('version', {})
         ovs_ver = [int(x) for x in version.get('ovs', self.DEFAULT_OVS).split('.')]
-        ovs_add_port = \
-            "ovs-vsctl add-port {br} {port} -- set Interface {port} type={type_}{dpdk_args}"
-        ovs_add_queue = "ovs-vsctl set Interface {port} options:n_rxq={queue}"
-        chmod_vpath = "chmod 0777 {0}/var/run/openvswitch/dpdkvhostuser*"
-
-        cmd_dpdk_list = [
-            "ovs-vsctl del-br br0",
-            "rm -rf {0}/var/run/openvswitch/dpdkvhostuser*".format(vpath),
-            "ovs-vsctl add-br br0 -- set bridge br0 datapath_type=netdev",
+        ovs_add_port = ('ovs-vsctl add-port {br} {port} -- '
+                        'set Interface {port} type={type_}{dpdk_args}')
+        ovs_add_queue = 'ovs-vsctl set Interface {port} options:n_rxq={queue}'
+        chmod_vpath = 'chmod 0777 {0}/var/run/openvswitch/dpdkvhostuser*'
+
+        cmd_list = [
+            'ovs-vsctl --if-exists del-br {0}'.format(MAIN_BRIDGE),
+            'rm -rf {0}/var/run/openvswitch/dpdkvhostuser*'.format(vpath),
+            'ovs-vsctl add-br {0} -- set bridge {0} datapath_type=netdev'.
+            format(MAIN_BRIDGE)
         ]
 
-        ordered_network = OrderedDict(self.networks)
+        ordered_network = collections.OrderedDict(self.networks)
         for index, vnf in enumerate(ordered_network.values()):
             if ovs_ver >= [2, 7, 0]:
                 dpdk_args = " options:dpdk-devargs=%s" % vnf.get("phy_port")
-            dpdk_list.append(ovs_add_port.format(br='br0', port='dpdk%s' % vnf.get("port_num", 0),
-                                                 type_='dpdk', dpdk_args=dpdk_args))
-            dpdk_list.append(ovs_add_queue.format(port='dpdk%s' % vnf.get("port_num", 0),
-                                                  queue=self.ovs_properties.get("queues", 1)))
+            dpdk_list.append(ovs_add_port.format(
+                br=MAIN_BRIDGE, port='dpdk%s' % vnf.get("port_num", 0),
+                type_='dpdk', dpdk_args=dpdk_args))
+            dpdk_list.append(ovs_add_queue.format(
+                port='dpdk%s' % vnf.get("port_num", 0),
+                queue=self.ovs_properties.get("queues", 1)))
 
         # Sorting the array to make sure we execute dpdk0... in the order
         list.sort(dpdk_list)
-        cmd_dpdk_list.extend(dpdk_list)
+        cmd_list.extend(dpdk_list)
 
         # Need to do two for loop to maintain the dpdk/vhost ports.
         for index, _ in enumerate(ordered_network):
-            cmd_dpdk_list.append(ovs_add_port.format(br='br0', port='dpdkvhostuser%s' % index,
-                                                     type_='dpdkvhostuser', dpdk_args=""))
-
-        for cmd in cmd_dpdk_list:
-            LOG.info(cmd)
-            self.connection.execute(cmd)
-
-        # Fixme: add flows code
-        ovs_flow = "ovs-ofctl add-flow br0 in_port=%s,action=output:%s"
+            cmd_list.append(ovs_add_port.format(
+                br=MAIN_BRIDGE, port='dpdkvhostuser%s' % index,
+                type_='dpdkvhostuser', dpdk_args=""))
 
+        ovs_flow = ("ovs-ofctl add-flow {0} in_port=%s,action=output:%s".
+                    format(MAIN_BRIDGE))
         network_count = len(ordered_network) + 1
         for in_port, out_port in zip(range(1, network_count),
                                      range(network_count, network_count * 2)):
-            self.connection.execute(ovs_flow % (in_port, out_port))
-            self.connection.execute(ovs_flow % (out_port, in_port))
+            cmd_list.append(ovs_flow % (in_port, out_port))
+            cmd_list.append(ovs_flow % (out_port, in_port))
 
-        self.connection.execute(chmod_vpath.format(vpath))
+        cmd_list.append(chmod_vpath.format(vpath))
+
+        for cmd in cmd_list:
+            LOG.info(cmd)
+            exit_status, _, stderr = self.connection.execute(
+                cmd, timeout=self.CMD_TIMEOUT)
+            if exit_status:
+                raise exceptions.OVSSetupError(command=cmd, error=stderr)
+
+    def _check_hugepages(self):
+        meminfo = io.BytesIO()
+        self.connection.get_file_obj('/proc/meminfo', meminfo)
+        regex = re.compile(r"HugePages_Total:\s+(?P<hp_total>\d+)[\n\r]"
+                           r"HugePages_Free:\s+(?P<hp_free>\d+)")
+        match = regex.search(meminfo.getvalue().decode('utf-8'))
+        if not match:
+            raise exceptions.OVSHugepagesInfoError()
+        if int(match.group('hp_total')) == 0:
+            raise exceptions.OVSHugepagesNotConfigured()
+        if int(match.group('hp_free')) == 0:
+            raise exceptions.OVSHugepagesZeroFree(
+                total_hugepages=int(match.group('hp_total')))
 
     def cleanup_ovs_dpdk_env(self):
-        self.connection.execute("ovs-vsctl del-br br0")
+        self.connection.execute(
+            'ovs-vsctl --if-exists del-br {0}'.format(MAIN_BRIDGE))
         self.connection.execute("pkill -9 ovs")
 
     def check_ovs_dpdk_env(self):
         self.cleanup_ovs_dpdk_env()
+        self._check_hugepages()
 
         version = self.ovs_properties.get("version", {})
         ovs_ver = version.get("ovs", self.DEFAULT_OVS)
@@ -223,13 +243,15 @@ class OvsDpdkContext(Context):
 
         supported_version = self.SUPPORTED_OVS_TO_DPDK_MAP.get(ovs_ver, None)
         if supported_version is None or supported_version.split('.')[:2] != dpdk_ver[:2]:
-            raise Exception("Unsupported ovs '{}'. Please check the config...".format(ovs_ver))
+            raise exceptions.OVSUnsupportedVersion(
+                ovs_version=ovs_ver,
+                ovs_to_dpdk_map=self.SUPPORTED_OVS_TO_DPDK_MAP)
 
         status = self.connection.execute("ovs-vsctl -V | grep -i '%s'" % ovs_ver)[0]
         if status:
-            deploy = OvsDeploy(self.connection,
-                               get_nsb_option("bin_path"),
-                               self.ovs_properties)
+            deploy = model.OvsDeploy(self.connection,
+                                     utils.get_nsb_option("bin_path"),
+                                     self.ovs_properties)
             deploy.ovs_deploy()
 
     def deploy(self):
@@ -240,15 +262,12 @@ class OvsDpdkContext(Context):
             return
 
         self.connection = ssh.SSH.from_node(self.host_mgmt)
-        self.dpdk_devbind = provision_tool(
-            self.connection,
-            os.path.join(get_nsb_option("bin_path"), "dpdk-devbind.py"))
 
         # Check dpdk/ovs version, if not present install
         self.check_ovs_dpdk_env()
         #    Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.
-        StandaloneContextHelper.install_req_libs(self.connection)
-        self.networks = StandaloneContextHelper.get_nic_details(
+        model.StandaloneContextHelper.install_req_libs(self.connection)
+        self.networks = model.StandaloneContextHelper.get_nic_details(
             self.connection, self.networks, self.dpdk_devbind)
 
         self.setup_ovs()
@@ -256,9 +275,8 @@ class OvsDpdkContext(Context):
         self.setup_ovs_bridge_add_flows()
         self.nodes = self.setup_ovs_dpdk_context()
         LOG.debug("Waiting for VM to come up...")
-        self.nodes = StandaloneContextHelper.wait_for_vnfs_to_start(self.connection,
-                                                                    self.servers,
-                                                                    self.nodes)
+        self.nodes = model.StandaloneContextHelper.wait_for_vnfs_to_start(
+            self.connection, self.servers, self.nodes)
 
     def undeploy(self):
 
@@ -278,7 +296,7 @@ class OvsDpdkContext(Context):
 
         # Todo: NFVi undeploy (sriov, vswitch, ovs etc) based on the config.
         for vm in self.vm_names:
-            Libvirt.check_if_vm_exists_and_delete(vm, self.connection)
+            model.Libvirt.check_if_vm_exists_and_delete(vm, self.connection)
 
     def _get_server(self, attr_name):
         """lookup server info by name from context
@@ -333,50 +351,53 @@ class OvsDpdkContext(Context):
         return result
 
     def configure_nics_for_ovs_dpdk(self):
-        portlist = OrderedDict(self.networks)
+        portlist = collections.OrderedDict(self.networks)
         for key in portlist:
-            mac = StandaloneContextHelper.get_mac_address()
+            mac = model.StandaloneContextHelper.get_mac_address()
             portlist[key].update({'mac': mac})
         self.networks = portlist
         LOG.info("Ports %s", self.networks)
 
-    def _enable_interfaces(self, index, vfs, cfg):
+    def _enable_interfaces(self, index, vfs, xml_str):
         vpath = self.ovs_properties.get("vpath", "/usr/local")
         vf = self.networks[vfs[0]]
         port_num = vf.get('port_num', 0)
-        vpci = PciAddress(vf['vpci'].strip())
+        vpci = utils.PciAddress(vf['vpci'].strip())
         # Generate the vpci for the interfaces
         slot = index + port_num + 10
         vf['vpci'] = \
             "{}:{}:{:02x}.{}".format(vpci.domain, vpci.bus, slot, vpci.function)
-        Libvirt.add_ovs_interface(vpath, port_num, vf['vpci'], vf['mac'], str(cfg))
+        return model.Libvirt.add_ovs_interface(
+            vpath, port_num, vf['vpci'], vf['mac'], xml_str)
 
     def setup_ovs_dpdk_context(self):
         nodes = []
 
         self.configure_nics_for_ovs_dpdk()
 
-        for index, (key, vnf) in enumerate(OrderedDict(self.servers).items()):
+        for index, (key, vnf) in enumerate(collections.OrderedDict(
+                self.servers).items()):
             cfg = '/tmp/vm_ovs_%d.xml' % index
             vm_name = "vm_%d" % index
 
             # 1. Check and delete VM if already exists
-            Libvirt.check_if_vm_exists_and_delete(vm_name, self.connection)
+            model.Libvirt.check_if_vm_exists_and_delete(vm_name,
+                                                        self.connection)
+            xml_str, mac = model.Libvirt.build_vm_xml(
+                self.connection, self.vm_flavor, vm_name, index)
 
-            _, mac = Libvirt.build_vm_xml(self.connection, self.vm_flavor,
-                                          cfg, vm_name, index)
             # 2: Cleanup already available VMs
-            for vkey, vfs in OrderedDict(vnf["network_ports"]).items():
-                if vkey == "mgmt":
-                    continue
-                self._enable_interfaces(index, vfs, cfg)
+            for vfs in [vfs for vfs_name, vfs in vnf["network_ports"].items()
+                        if vfs_name != 'mgmt']:
+                xml_str = self._enable_interfaces(index, vfs, xml_str)
 
             # copy xml to target...
+            model.Libvirt.write_file(cfg, xml_str)
             self.connection.put(cfg, cfg)
 
             # NOTE: launch through libvirt
             LOG.info("virsh create ...")
-            Libvirt.virsh_create_vm(self.connection, cfg)
+            model.Libvirt.virsh_create_vm(self.connection, cfg)
 
             self.vm_names.append(vm_name)
 
index d762055..95472fd 100644 (file)
@@ -16,15 +16,11 @@ from __future__ import absolute_import
 import os
 import logging
 import collections
-from collections import OrderedDict
 
 from yardstick import ssh
 from yardstick.network_services.utils import get_nsb_option
-from yardstick.network_services.utils import provision_tool
 from yardstick.benchmark.contexts.base import Context
-from yardstick.benchmark.contexts.standalone.model import Libvirt
-from yardstick.benchmark.contexts.standalone.model import StandaloneContextHelper
-from yardstick.benchmark.contexts.standalone.model import Server
+from yardstick.benchmark.contexts.standalone import model
 from yardstick.network_services.utils import PciAddress
 
 LOG = logging.getLogger(__name__)
@@ -41,7 +37,8 @@ class SriovContext(Context):
         self.file_path = None
         self.sriov = []
         self.first_run = True
-        self.dpdk_devbind = ''
+        self.dpdk_devbind = os.path.join(get_nsb_option('bin_path'),
+                                         'dpdk-devbind.py')
         self.vm_names = []
         self.nfvi_host = []
         self.nodes = []
@@ -49,8 +46,8 @@ class SriovContext(Context):
         self.attrs = {}
         self.vm_flavor = None
         self.servers = None
-        self.helper = StandaloneContextHelper()
-        self.vnf_node = Server()
+        self.helper = model.StandaloneContextHelper()
+        self.vnf_node = model.Server()
         self.drivers = []
         super(SriovContext, self).__init__()
 
@@ -82,20 +79,16 @@ class SriovContext(Context):
             return
 
         self.connection = ssh.SSH.from_node(self.host_mgmt)
-        self.dpdk_devbind = provision_tool(
-            self.connection,
-            os.path.join(get_nsb_option("bin_path"), "dpdk-devbind.py"))
 
         #    Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.
-        StandaloneContextHelper.install_req_libs(self.connection)
-        self.networks = StandaloneContextHelper.get_nic_details(
+        model.StandaloneContextHelper.install_req_libs(self.connection)
+        self.networks = model.StandaloneContextHelper.get_nic_details(
             self.connection, self.networks, self.dpdk_devbind)
         self.nodes = self.setup_sriov_context()
 
         LOG.debug("Waiting for VM to come up...")
-        self.nodes = StandaloneContextHelper.wait_for_vnfs_to_start(self.connection,
-                                                                    self.servers,
-                                                                    self.nodes)
+        self.nodes = model.StandaloneContextHelper.wait_for_vnfs_to_start(
+            self.connection, self.servers, self.nodes)
 
     def undeploy(self):
         """don't need to undeploy"""
@@ -105,7 +98,7 @@ class SriovContext(Context):
 
         # Todo: NFVi undeploy (sriov, vswitch, ovs etc) based on the config.
         for vm in self.vm_names:
-            Libvirt.check_if_vm_exists_and_delete(vm, self.connection)
+            model.Libvirt.check_if_vm_exists_and_delete(vm, self.connection)
 
         # Bind nics back to kernel
         for ports in self.networks.values():
@@ -136,8 +129,8 @@ class SriovContext(Context):
         except StopIteration:
             pass
         else:
-            raise ValueError("Duplicate nodes!!! Nodes: %s %s" %
-                             (node, duplicate))
+            raise ValueError("Duplicate nodes!!! Nodes: %s %s"
+                             (node, duplicate))
 
         node["name"] = attr_name
         return node
@@ -179,7 +172,7 @@ class SriovContext(Context):
             self.connection.execute(build_vfs.format(ports.get('phy_port')))
 
             # configure VFs...
-            mac = StandaloneContextHelper.get_mac_address()
+            mac = model.StandaloneContextHelper.get_mac_address()
             interface = ports.get('interface')
             if interface is not None:
                 self.connection.execute(vf_cmd.format(interface, mac))
@@ -201,7 +194,7 @@ class SriovContext(Context):
         slot = index + idx + 10
         vf['vpci'] = \
             "{}:{}:{:02x}.{}".format(vpci.domain, vpci.bus, slot, vpci.function)
-        Libvirt.add_sriov_interfaces(
+        model.Libvirt.add_sriov_interfaces(
             vf['vpci'], vf['vf_pci']['vf_pci'], vf['mac'], str(cfg))
         self.connection.execute("ifconfig %s up" % vf['interface'])
         self.connection.execute(vf_spoofchk.format(vf['interface']))
@@ -212,34 +205,37 @@ class SriovContext(Context):
         #   1 : modprobe host_driver with num_vfs
         self.configure_nics_for_sriov()
 
-        for index, (key, vnf) in enumerate(OrderedDict(self.servers).items()):
+        for index, (key, vnf) in enumerate(collections.OrderedDict(
+                self.servers).items()):
             cfg = '/tmp/vm_sriov_%s.xml' % str(index)
             vm_name = "vm_%s" % str(index)
 
             # 1. Check and delete VM if already exists
-            Libvirt.check_if_vm_exists_and_delete(vm_name, self.connection)
+            model.Libvirt.check_if_vm_exists_and_delete(vm_name,
+                                                        self.connection)
+            xml_str, mac = model.Libvirt.build_vm_xml(
+                self.connection, self.vm_flavor, vm_name, index)
 
-            _, mac = Libvirt.build_vm_xml(self.connection, self.vm_flavor, cfg, vm_name, index)
             # 2: Cleanup already available VMs
-            for idx, (vkey, vfs) in enumerate(OrderedDict(vnf["network_ports"]).items()):
-                if vkey == "mgmt":
-                    continue
+            network_ports = collections.OrderedDict(
+                {k: v for k, v in vnf["network_ports"].items() if k != 'mgmt'})
+            for idx, vfs in enumerate(network_ports.values()):
                 self._enable_interfaces(index, idx, vfs, cfg)
 
             # copy xml to target...
+            model.Libvirt.write_file(cfg, xml_str)
             self.connection.put(cfg, cfg)
 
             # NOTE: launch through libvirt
             LOG.info("virsh create ...")
-            Libvirt.virsh_create_vm(self.connection, cfg)
+            model.Libvirt.virsh_create_vm(self.connection, cfg)
 
             self.vm_names.append(vm_name)
 
             # build vnf node details
-            nodes.append(self.vnf_node.generate_vnf_instance(self.vm_flavor,
-                                                             self.networks,
-                                                             self.host_mgmt.get('ip'),
-                                                             key, vnf, mac))
+            nodes.append(self.vnf_node.generate_vnf_instance(
+                self.vm_flavor, self.networks, self.host_mgmt.get('ip'),
+                key, vnf, mac))
 
         return nodes
 
@@ -248,7 +244,8 @@ class SriovContext(Context):
             "mac": vfmac,
             "pf_if": pfif
         }
-        vfs = StandaloneContextHelper.get_virtual_devices(self.connection, value)
+        vfs = model.StandaloneContextHelper.get_virtual_devices(
+            self.connection, value)
         for k, v in vfs.items():
             m = PciAddress(k.strip())
             m1 = PciAddress(value.strip())
index 4272a6d..697cc00 100644 (file)
@@ -112,9 +112,9 @@ class Task(object):     # pragma: no cover
                 continue
 
             try:
-                data = self._run(tasks[i]['scenarios'],
-                                 tasks[i]['run_in_parallel'],
-                                 output_config)
+                success, data = self._run(tasks[i]['scenarios'],
+                                          tasks[i]['run_in_parallel'],
+                                          output_config)
             except KeyboardInterrupt:
                 raise
             except Exception:  # pylint: disable=broad-except
@@ -123,9 +123,15 @@ class Task(object):     # pragma: no cover
                 testcases[tasks[i]['case_name']] = {'criteria': 'FAIL',
                                                     'tc_data': []}
             else:
-                LOG.info('Testcase: "%s" SUCCESS!!!', tasks[i]['case_name'])
-                testcases[tasks[i]['case_name']] = {'criteria': 'PASS',
-                                                    'tc_data': data}
+                if success:
+                    LOG.info('Testcase: "%s" SUCCESS!!!', tasks[i]['case_name'])
+                    testcases[tasks[i]['case_name']] = {'criteria': 'PASS',
+                                                        'tc_data': data}
+                else:
+                    LOG.error('Testcase: "%s" FAILED!!!', tasks[i]['case_name'],
+                              exc_info=True)
+                    testcases[tasks[i]['case_name']] = {'criteria': 'FAIL',
+                                                        'tc_data': data}
 
             if args.keep_deploy:
                 # keep deployment, forget about stack
@@ -240,6 +246,7 @@ class Task(object):     # pragma: no cover
 
         background_runners = []
 
+        task_success = True
         result = []
         # Start all background scenarios
         for scenario in filter(_is_background_scenario, scenarios):
@@ -258,8 +265,8 @@ class Task(object):     # pragma: no cover
             for runner in runners:
                 status = runner_join(runner, background_runners, self.outputs, result)
                 if status != 0:
-                    raise RuntimeError(
-                        "{0} runner status {1}".format(runner.__execution_type__, status))
+                    LOG.error("%s runner status %s", runner.__execution_type__, status)
+                    task_success = False
                 LOG.info("Runner ended")
         else:
             # run serially
@@ -271,8 +278,8 @@ class Task(object):     # pragma: no cover
                         LOG.error('Scenario NO.%s: "%s" ERROR!',
                                   scenarios.index(scenario) + 1,
                                   scenario.get('type'))
-                        raise RuntimeError(
-                            "{0} runner status {1}".format(runner.__execution_type__, status))
+                        LOG.error("%s runner status %s", runner.__execution_type__, status)
+                        task_success = False
                     LOG.info("Runner ended")
 
         # Abort background runners
@@ -289,7 +296,7 @@ class Task(object):     # pragma: no cover
             base_runner.Runner.release(runner)
 
             print("Background task ended")
-        return result
+        return task_success, result
 
     def atexit_handler(self):
         """handler for process termination"""
@@ -614,15 +621,25 @@ class TaskParser(object):       # pragma: no cover
             vnf__0: vnf_0.yardstick
         """
         def qualified_name(name):
-            node_name, context_name = name.split('.')
+            try:
+                # for openstack
+                node_name, context_name = name.split('.')
+                sep = '.'
+            except ValueError:
+                # for kubernetes, some kubernetes resources don't support
+                # name format like 'xxx.xxx', so we use '-' instead
+                # need unified later
+                node_name, context_name = name.split('-')
+                sep = '-'
+
             try:
                 ctx = next((context for context in contexts
-                       if context.assigned_name == context_name))
+                            if context.assigned_name == context_name))
             except StopIteration:
                 raise y_exc.ScenarioConfigContextNameNotFound(
                     context_name=context_name)
 
-            return '{}.{}'.format(node_name, ctx.name)
+            return '{}{}{}'.format(node_name, sep, ctx.name)
 
         if 'host' in scenario:
             scenario['host'] = qualified_name(scenario['host'])
index 99386a4..fbdf6c2 100755 (executable)
@@ -121,7 +121,7 @@ class Runner(object):
     @staticmethod
     def terminate_all():
         """Terminate all runners (subprocesses)"""
-        log.debug("Terminating all runners", exc_info=True)
+        log.debug("Terminating all runners")
 
         # release dumper process as some errors before any runner is created
         if not Runner.runners:
index fbf72a7..60b0348 100644 (file)
@@ -66,6 +66,8 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
         data = {}
         errors = ""
 
+        benchmark.pre_run_wait_time(interval)
+
         try:
             result = method(data)
         except AssertionError as assertion:
@@ -77,7 +79,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
                 errors = assertion.args
         # catch all exceptions because with multiprocessing we can have un-picklable exception
         # problems  https://bugs.python.org/issue9400
-        except Exception:
+        except Exception:  # pylint: disable=broad-except
             errors = traceback.format_exc()
             LOG.exception("")
         else:
@@ -86,7 +88,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
                 # if we do timeout we don't care about dropping individual KPIs
                 output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
 
-        time.sleep(interval)
+        benchmark.post_run_wait_time(interval)
 
         benchmark_output = {
             'timestamp': time.time(),
index cb04243..20d6da0 100644 (file)
@@ -71,6 +71,8 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
             data = {}
             errors = ""
 
+            benchmark.pre_run_wait_time(interval)
+
             try:
                 result = method(data)
             except AssertionError as assertion:
@@ -90,7 +92,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
                     scenario_cfg['options']['rate'] -= delta
                     sequence = 1
                     continue
-            except Exception:
+            except Exception:  # pylint: disable=broad-except
                 errors = traceback.format_exc()
                 LOG.exception("")
             else:
@@ -99,7 +101,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
                     # if we do timeout we don't care about dropping individual KPIs
                     output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
 
-            time.sleep(interval)
+            benchmark.post_run_wait_time(interval)
 
             benchmark_output = {
                 'timestamp': time.time(),
index 9ac5547..1fadd25 100644 (file)
@@ -26,7 +26,6 @@ class ScenarioGeneral(base.Scenario):
         self.scenario_cfg = scenario_cfg
         self.context_cfg = context_cfg
         self.intermediate_variables = {}
-        self.pass_flag = True
 
     def setup(self):
         self.director = Director(self.scenario_cfg, self.context_cfg)
@@ -47,7 +46,7 @@ class ScenarioGeneral(base.Scenario):
                     step['actionType'], step['actionKey'])
                 if actionRollbacker:
                     self.director.executionSteps.append(actionRollbacker)
-            except Exception:
+            except Exception:  # pylint: disable=broad-except
                 LOG.exception("Exception")
                 LOG.debug(
                     "\033[91m exception when running step: %s .... \033[0m",
@@ -59,31 +58,16 @@ class ScenarioGeneral(base.Scenario):
         self.director.stopMonitors()
 
         verify_result = self.director.verify()
-
-        self.director.store_result(result)
-
         for k, v in self.director.data.items():
             if v == 0:
                 result['sla_pass'] = 0
                 verify_result = False
-                self.pass_flag = False
-                LOG.info(
-                    "\033[92m The service process not found in the host \
-envrioment, the HA test case NOT pass")
+                LOG.info("\033[92m The service process (%s) not found in the host environment", k)
 
-        if verify_result:
-            result['sla_pass'] = 1
-            LOG.info(
-                "\033[92m Congratulations, "
-                "the HA test case PASS! \033[0m")
-        else:
-            result['sla_pass'] = 0
-            self.pass_flag = False
-            LOG.info(
-                "\033[91m Aoh, the HA test case FAIL,"
-                "please check the detail debug information! \033[0m")
+        result['sla_pass'] = 1 if verify_result else 0
+        self.director.store_result(result)
+
+        assert verify_result is True, "The HA test case NOT passed"
 
     def teardown(self):
         self.director.knockoff()
-
-        assert self.pass_flag, "The HA test case NOT passed"
index 6d0d812..dcd0fe5 100755 (executable)
@@ -29,7 +29,6 @@ class ServiceHA(base.Scenario):
         self.context_cfg = context_cfg
         self.setup_done = False
         self.data = {}
-        self.pass_flag = True
 
     def setup(self):
         """scenario setup"""
@@ -73,18 +72,12 @@ class ServiceHA(base.Scenario):
         sla_pass = self.monitorMgr.verify_SLA()
         for k, v in self.data.items():
             if v == 0:
-                result['sla_pass'] = 0
-                self.pass_flag = False
-                LOG.info("The service process not found in the host envrioment, \
-the HA test case NOT pass")
-                return
+                sla_pass = False
+                LOG.info("The service process (%s) not found in the host envrioment", k)
+
+        result['sla_pass'] = 1 if sla_pass else 0
         self.monitorMgr.store_result(result)
-        if sla_pass:
-            result['sla_pass'] = 1
-            LOG.info("The HA test case PASS the SLA")
-        else:
-            result['sla_pass'] = 0
-            self.pass_flag = False
+
         assert sla_pass is True, "The HA test case NOT pass the SLA"
 
         return
@@ -94,8 +87,6 @@ the HA test case NOT pass")
         for attacker in self.attackers:
             attacker.recover()
 
-        assert self.pass_flag, "The HA test case NOT passed"
-
 
 def _test():    # pragma: no cover
     """internal test function"""
index 10a7288..58a0280 100644 (file)
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-# yardstick comment: this is a modified copy of
-# rally/rally/benchmark/scenarios/base.py
+import abc
+import time
 
+import six
 from stevedore import extension
 
 import yardstick.common.utils as utils
@@ -37,20 +38,29 @@ def _iter_scenario_classes(scenario_type=None):
             yield scenario
 
 
+@six.add_metaclass(abc.ABCMeta)
 class Scenario(object):
 
     def setup(self):
-        """ default impl for scenario setup """
+        """Default setup implementation for Scenario classes"""
         pass
 
+    @abc.abstractmethod
     def run(self, *args):
-        """ catcher for not implemented run methods in subclasses """
-        raise RuntimeError("run method not implemented")
+        """Entry point for scenario classes, called from runner worker"""
 
     def teardown(self):
-        """ default impl for scenario teardown """
+        """Default teardown implementation for Scenario classes"""
         pass
 
+    def pre_run_wait_time(self, time_seconds):
+        """Time waited before executing the run method"""
+        pass
+
+    def post_run_wait_time(self, time_seconds):
+        """Time waited after executing the run method"""
+        time.sleep(time_seconds)
+
     @staticmethod
     def get_types():
         """return a list of known runner type (class) names"""
@@ -88,10 +98,14 @@ class Scenario(object):
         """
         return cls.__doc__.splitlines()[0] if cls.__doc__ else str(None)
 
-    def _push_to_outputs(self, keys, values):
+    @staticmethod
+    def _push_to_outputs(keys, values):
+        """Return a dictionary given the keys and the values"""
         return dict(zip(keys, values))
 
-    def _change_obj_to_dict(self, obj):
+    @staticmethod
+    def _change_obj_to_dict(obj):
+        """Return a dictionary from the __dict__ attribute of an object"""
         dic = {}
         for k, v in vars(obj).items():
             try:
index 5a5dbc3..9f18048 100644 (file)
@@ -18,7 +18,7 @@ OUTPUT_FILE=/tmp/unixbench-out.log
 # run unixbench test
 run_unixbench()
 {
-    cd /opt/tempT/UnixBench/
+    cd /opt/tempT/UnixBench/UnixBench/
     ./Run $OPTIONS > $OUTPUT_FILE
 }
 
index 8812496..96dd130 100644 (file)
@@ -6,30 +6,31 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
 import logging
 
 from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
 
 LOG = logging.getLogger(__name__)
 
 
 class AttachVolume(base.Scenario):
-    """Attach a volmeu to an instance"""
+    """Attach a volume to an instance"""
 
     __scenario_type__ = "AttachVolume"
 
     def __init__(self, scenario_cfg, context_cfg):
         self.scenario_cfg = scenario_cfg
         self.context_cfg = context_cfg
-        self.options = self.scenario_cfg['options']
+        self.options = self.scenario_cfg["options"]
 
-        self.server_id = self.options.get("server_id", "TestServer")
-        self.volume_id = self.options.get("volume_id", None)
+        self.server_name_or_id = self.options["server_name_or_id"]
+        self.volume_name_or_id = self.options["volume_name_or_id"]
+        self.device = self.options.get("device")
+        self.wait = self.options.get("wait", True)
+        self.timeout = self.options.get("timeout")
+        self.shade_client = openstack_utils.get_shade_client()
 
         self.setup_done = False
 
@@ -44,10 +45,14 @@ class AttachVolume(base.Scenario):
         if not self.setup_done:
             self.setup()
 
-        status = op_utils.attach_server_volume(self.server_id,
-                                               self.volume_id)
+        status = openstack_utils.attach_volume_to_server(
+            self.shade_client, self.server_name_or_id, self.volume_name_or_id,
+            device=self.device, wait=self.wait, timeout=self.timeout)
+
+        if not status:
+            result.update({"attach_volume": 0})
+            LOG.error("Attach volume to server failed!")
+            raise exceptions.ScenarioAttachVolumeError
 
-        if status:
-            LOG.info("Attach volume to server successful!")
-        else:
-            LOG.info("Attach volume to server failed!")
+        result.update({"attach_volume": 1})
+        LOG.info("Attach volume to server successful!")
index 7108722..e29f9d1 100644 (file)
@@ -11,7 +11,8 @@ import logging
 import os
 
 from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
 
 
 LOG = logging.getLogger(__name__)
@@ -26,9 +27,18 @@ class CreateFloatingIp(base.Scenario):
         self.scenario_cfg = scenario_cfg
         self.context_cfg = context_cfg
         self.ext_net_id = os.getenv("EXTERNAL_NETWORK", "external")
+        self.options = self.scenario_cfg["options"]
+
+        self.network_name_or_id = self.options.get("network_name_or_id", self.ext_net_id)
+        self.server = self.options.get("server")
+        self.fixed_address = self.options.get("fixed_address")
+        self.nat_destination = self.options.get("nat_destination")
+        self.port = self.options.get("port")
+        self.wait = self.options.get("wait", False)
+        self.timeout = self.options.get("timeout", 60)
+
+        self.shade_client = openstack_utils.get_shade_client()
 
-        self.neutron_client = op_utils.get_neutron_client()
-        self.shade_client = op_utils.get_shade_client()
         self.setup_done = False
 
     def setup(self):
@@ -36,21 +46,25 @@ class CreateFloatingIp(base.Scenario):
 
         self.setup_done = True
 
-    def run(self, *args):
+    def run(self, result):
         """execute the test"""
 
         if not self.setup_done:
             self.setup()
 
-        net_id = op_utils.get_network_id(self.shade_client, self.ext_net_id)
-        floating_info = op_utils.create_floating_ip(self.neutron_client,
-                                                    extnet_id=net_id)
+        floating_info = openstack_utils.create_floating_ip(
+            self.shade_client, network_name_or_id=self.network_name_or_id,
+            server=self.server, fixed_address=self.fixed_address,
+            nat_destination=self.nat_destination, port=self.port,
+            wait=self.wait, timeout=self.timeout)
 
         if not floating_info:
+            result.update({"floating_ip_create": 0})
             LOG.error("Creating floating ip failed!")
-            return
+            raise exceptions.ScenarioCreateFloatingIPError
 
+        result.update({"floating_ip_create": 1})
         LOG.info("Creating floating ip successful!")
-        keys = self.scenario_cfg.get('output', '').split()
+        keys = self.scenario_cfg.get("output", '').split()
         values = [floating_info["fip_id"], floating_info["fip_addr"]]
         return self._push_to_outputs(keys, values)
index f5b1fff..ee9bc44 100644 (file)
@@ -6,15 +6,11 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
 import logging
-import paramiko
 
 from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
 
 LOG = logging.getLogger(__name__)
 
@@ -27,10 +23,11 @@ class CreateKeypair(base.Scenario):
     def __init__(self, scenario_cfg, context_cfg):
         self.scenario_cfg = scenario_cfg
         self.context_cfg = context_cfg
-        self.options = self.scenario_cfg['options']
+        self.options = self.scenario_cfg["options"]
 
-        self.key_name = self.options.get("key_name", "yardstick_key")
-        self.key_filename = self.options.get("key_path", "/tmp/yardstick_key")
+        self.name = self.options["key_name"]
+        self.public_key = self.options.get("public_key")
+        self.shade_client = openstack_utils.get_shade_client()
 
         self.setup_done = False
 
@@ -45,27 +42,17 @@ class CreateKeypair(base.Scenario):
         if not self.setup_done:
             self.setup()
 
-        rsa_key = paramiko.RSAKey.generate(bits=2048, progress_func=None)
-        rsa_key.write_private_key_file(self.key_filename)
-        LOG.info("Writing key_file %s ...", self.key_filename)
-        with open(self.key_filename + ".pub", "w") as pubkey_file:
-            pubkey_file.write(
-                "%s %s\n" % (rsa_key.get_name(), rsa_key.get_base64()))
-        del rsa_key
-
-        keypair = op_utils.create_keypair(self.key_name,
-                                          self.key_filename + ".pub")
+        keypair = openstack_utils.create_keypair(
+            self.shade_client, self.name, public_key=self.public_key)
 
-        if keypair:
-            result.update({"keypair_create": 1})
-            LOG.info("Create keypair successful!")
-        else:
+        if not keypair:
             result.update({"keypair_create": 0})
-            LOG.info("Create keypair failed!")
-        try:
-            keys = self.scenario_cfg.get('output', '').split()
-        except KeyError:
-            pass
-        else:
-            values = [keypair.id]
-            return self._push_to_outputs(keys, values)
+            LOG.error("Create keypair failed!")
+            raise exceptions.ScenarioCreateKeypairError
+
+        result.update({"keypair_create": 1})
+        LOG.info("Create keypair successful!")
+        keys = self.scenario_cfg.get("output", '').split()
+        keypair_id = keypair["id"]
+        values = [keypair_id]
+        return self._push_to_outputs(keys, values)
index 3d1aec9..1d2e364 100644 (file)
@@ -7,13 +7,11 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-from __future__ import print_function
-from __future__ import absolute_import
-
 import logging
 
 from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
 
 LOG = logging.getLogger(__name__)
 
@@ -26,11 +24,12 @@ class CreateSecgroup(base.Scenario):
     def __init__(self, scenario_cfg, context_cfg):
         self.scenario_cfg = scenario_cfg
         self.context_cfg = context_cfg
-        self.options = self.scenario_cfg['options']
+        self.options = self.scenario_cfg["options"]
 
-        self.sg_name = self.options.get("sg_name", "yardstick_sec_group")
-        self.description = self.options.get("description", None)
-        self.neutron_client = op_utils.get_neutron_client()
+        self.sg_name = self.options["sg_name"]
+        self.description = self.options.get("description", "")
+        self.project_id = self.options.get("project_id")
+        self.shade_client = openstack_utils.get_shade_client()
 
         self.setup_done = False
 
@@ -45,21 +44,16 @@ class CreateSecgroup(base.Scenario):
         if not self.setup_done:
             self.setup()
 
-        sg_id = op_utils.create_security_group_full(self.neutron_client,
-                                                    sg_name=self.sg_name,
-                                                    sg_description=self.description)
-
-        if sg_id:
-            result.update({"sg_create": 1})
-            LOG.info("Create security group successful!")
-        else:
+        sg_id = openstack_utils.create_security_group_full(
+            self.shade_client, self.sg_name, sg_description=self.description,
+            project_id=self.project_id)
+        if not sg_id:
             result.update({"sg_create": 0})
             LOG.error("Create security group failed!")
+            raise exceptions.ScenarioCreateSecurityGroupError
 
-        try:
-            keys = self.scenario_cfg.get('output', '').split()
-        except KeyError:
-            pass
-        else:
-            values = [sg_id]
-            return self._push_to_outputs(keys, values)
+        result.update({"sg_create": 1})
+        LOG.info("Create security group successful!")
+        keys = self.scenario_cfg.get("output", '').split()
+        values = [sg_id]
+        return self._push_to_outputs(keys, values)
index 31ba18e..e2748ae 100644 (file)
@@ -6,14 +6,11 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
 import logging
 
 from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
 
 LOG = logging.getLogger(__name__)
 
@@ -26,15 +23,27 @@ class CreateServer(base.Scenario):
     def __init__(self, scenario_cfg, context_cfg):
         self.scenario_cfg = scenario_cfg
         self.context_cfg = context_cfg
-        self.options = self.scenario_cfg['options']
-
-        self.image_name = self.options.get("image_name", None)
-        self.flavor_name = self.options.get("flavor_name", None)
-        self.openstack = self.options.get("openstack_paras", None)
-
-        self.glance_client = op_utils.get_glance_client()
-        self.neutron_client = op_utils.get_neutron_client()
-        self.nova_client = op_utils.get_nova_client()
+        self.options = self.scenario_cfg["options"]
+
+        self.name = self.options["name"]
+        self.image = self.options["image"]
+        self.flavor = self.options["flavor"]
+        self.auto_ip = self.options.get("auto_ip", True)
+        self.ips = self.options.get("ips")
+        self.ip_pool = self.options.get("ip_pool")
+        self.root_volume = self.options.get("root_volume")
+        self.terminate_volume = self.options.get("terminate_volume", False)
+        self.wait = self.options.get("wait", True)
+        self.timeout = self.options.get("timeout", 180)
+        self.reuse_ips = self.options.get("reuse_ips", True)
+        self.network = self.options.get("network")
+        self.boot_from_volume = self.options.get("boot_from_volume", False)
+        self.volume_size = self.options.get("volume_size", "20")
+        self.boot_volume = self.options.get("boot_volume")
+        self.volumes = self.options.get("volumes")
+        self.nat_destination = self.options.get("nat_destination")
+
+        self.shade_client = openstack_utils.get_shade_client()
 
         self.setup_done = False
 
@@ -49,26 +58,23 @@ class CreateServer(base.Scenario):
         if not self.setup_done:
             self.setup()
 
-        if self.image_name is not None:
-            self.openstack['image'] = op_utils.get_image_id(self.glance_client,
-                                                            self.image_name)
-        if self.flavor_name is not None:
-            self.openstack['flavor'] = op_utils.get_flavor_id(self.nova_client,
-                                                              self.flavor_name)
-
-        vm = op_utils.create_instance_and_wait_for_active(self.openstack)
-
-        if vm:
-            result.update({"instance_create": 1})
-            LOG.info("Create server successful!")
-        else:
+        server = openstack_utils.create_instance_and_wait_for_active(
+            self.shade_client, self.name, self.image,
+            self.flavor, auto_ip=self.auto_ip, ips=self.ips,
+            ip_pool=self.ip_pool, root_volume=self.root_volume,
+            terminate_volume=self.terminate_volume, wait=self.wait,
+            timeout=self.timeout, reuse_ips=self.reuse_ips,
+            network=self.network, boot_from_volume=self.boot_from_volume,
+            volume_size=self.volume_size, boot_volume=self.boot_volume,
+            volumes=self.volumes, nat_destination=self.nat_destination)
+
+        if not server:
             result.update({"instance_create": 0})
             LOG.error("Create server failed!")
+            raise exceptions.ScenarioCreateServerError
 
-        try:
-            keys = self.scenario_cfg.get('output', '').split()
-        except KeyError:
-            pass
-        else:
-            values = [vm.id]
-            return self._push_to_outputs(keys, values)
+        result.update({"instance_create": 1})
+        LOG.info("Create instance successful!")
+        keys = self.scenario_cfg.get("output", '').split()
+        values = [server["id"]]
+        return self._push_to_outputs(keys, values)
index 4314952..a35445f 100644 (file)
@@ -7,13 +7,12 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-from __future__ import print_function
-from __future__ import absolute_import
-
 import logging
 
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
 from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+
 
 LOG = logging.getLogger(__name__)
 
@@ -28,9 +27,10 @@ class DeleteFloatingIp(base.Scenario):
         self.context_cfg = context_cfg
         self.options = self.scenario_cfg['options']
 
-        self.floating_ip_id = self.options.get("floating_ip_id", None)
+        self.floating_ip_id = self.options["floating_ip_id"]
+        self.retry = self.options.get("retry", 1)
 
-        self.nova_client = op_utils.get_nova_client()
+        self.shade_client = openstack_utils.get_shade_client()
         self.setup_done = False
 
     def setup(self):
@@ -44,11 +44,13 @@ class DeleteFloatingIp(base.Scenario):
         if not self.setup_done:
             self.setup()
 
-        status = op_utils.delete_floating_ip(nova_client=self.nova_client,
-                                             floatingip_id=self.floating_ip_id)
-        if status:
-            result.update({"delete_floating_ip": 1})
-            LOG.info("Delete floating ip successful!")
-        else:
+        status = openstack_utils.delete_floating_ip(
+            self.shade_client, self.floating_ip_id,
+            retry=self.retry)
+        if not status:
             result.update({"delete_floating_ip": 0})
             LOG.error("Delete floating ip failed!")
+            raise exceptions.ScenarioDeleteFloatingIPError
+
+        result.update({"delete_floating_ip": 1})
+        LOG.info("Delete floating ip successful!")
index 1351399..a52a385 100644 (file)
@@ -6,14 +6,12 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
 import logging
 
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
 from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+
 
 LOG = logging.getLogger(__name__)
 
@@ -26,11 +24,11 @@ class DeleteKeypair(base.Scenario):
     def __init__(self, scenario_cfg, context_cfg):
         self.scenario_cfg = scenario_cfg
         self.context_cfg = context_cfg
-        self.options = self.scenario_cfg['options']
+        self.options = self.scenario_cfg["options"]
 
-        self.key_name = self.options.get("key_name", "yardstick_key")
+        self.key_name = self.options["key_name"]
 
-        self.nova_client = op_utils.get_nova_client()
+        self.shade_client = openstack_utils.get_shade_client()
 
         self.setup_done = False
 
@@ -45,12 +43,13 @@ class DeleteKeypair(base.Scenario):
         if not self.setup_done:
             self.setup()
 
-        status = op_utils.delete_keypair(self.nova_client,
-                                         self.key_name)
+        status = openstack_utils.delete_keypair(self.shade_client,
+                                                self.key_name)
 
-        if status:
-            result.update({"delete_keypair": 1})
-            LOG.info("Delete keypair successful!")
-        else:
+        if not status:
             result.update({"delete_keypair": 0})
-            LOG.info("Delete keypair failed!")
+            LOG.error("Delete keypair failed!")
+            raise exceptions.ScenarioDeleteKeypairError
+
+        result.update({"delete_keypair": 1})
+        LOG.info("Delete keypair successful!")
index 2e8b595..8874e8b 100644 (file)
@@ -10,7 +10,8 @@
 import logging
 
 from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
 
 
 LOG = logging.getLogger(__name__)
@@ -24,11 +25,11 @@ class DeleteNetwork(base.Scenario):
     def __init__(self, scenario_cfg, context_cfg):
         self.scenario_cfg = scenario_cfg
         self.context_cfg = context_cfg
-        self.options = self.scenario_cfg['options']
+        self.options = self.scenario_cfg["options"]
 
-        self.network_id = self.options.get("network_id", None)
+        self.network_name_or_id = self.options["network_name_or_id"]
 
-        self.shade_client = op_utils.get_shade_client()
+        self.shade_client = openstack_utils.get_shade_client()
 
         self.setup_done = False
 
@@ -43,12 +44,13 @@ class DeleteNetwork(base.Scenario):
         if not self.setup_done:
             self.setup()
 
-        status = op_utils.delete_neutron_net(self.shade_client,
-                                             network_id=self.network_id)
-        if status:
-            result.update({"delete_network": 1})
-            LOG.info("Delete network successful!")
-        else:
+        status = openstack_utils.delete_neutron_net(self.shade_client,
+                                                    self.network_name_or_id)
+
+        if not status:
             result.update({"delete_network": 0})
             LOG.error("Delete network failed!")
-        return status
+            raise exceptions.ScenarioDeleteNetworkError
+
+        result.update({"delete_network": 1})
+        LOG.info("Delete network successful!")
index 117c808..e71aed3 100644 (file)
@@ -7,13 +7,11 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-from __future__ import print_function
-from __future__ import absolute_import
-
 import logging
 
 from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
 
 LOG = logging.getLogger(__name__)
 
@@ -28,10 +26,11 @@ class DeleteRouterInterface(base.Scenario):
         self.context_cfg = context_cfg
         self.options = self.scenario_cfg['options']
 
-        self.subnet_id = self.options.get("subnet_id", None)
-        self.router_id = self.options.get("router_id", None)
+        self.router = self.options["router"]
+        self.subnet_id = self.options.get("subnet_id")
+        self.port_id = self.options.get("port_id")
 
-        self.neutron_client = op_utils.get_neutron_client()
+        self.shade_client = openstack_utils.get_shade_client()
 
         self.setup_done = False
 
@@ -46,12 +45,13 @@ class DeleteRouterInterface(base.Scenario):
         if not self.setup_done:
             self.setup()
 
-        status = op_utils.remove_interface_router(self.neutron_client,
-                                                  router_id=self.router_id,
-                                                  subnet_id=self.subnet_id)
-        if status:
-            result.update({"delete_router_interface": 1})
-            LOG.info("Delete router interface successful!")
-        else:
+        status = openstack_utils.remove_router_interface(
+            self.shade_client, self.router, subnet_id=self.subnet_id,
+            port_id=self.port_id)
+        if not status:
             result.update({"delete_router_interface": 0})
             LOG.error("Delete router interface failed!")
+            raise exceptions.ScenarioRemoveRouterIntError
+
+        result.update({"delete_router_interface": 1})
+        LOG.info("Delete router interface successful!")
index bcd8fab..46229ff 100644 (file)
@@ -6,14 +6,11 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
 import logging
 
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
 from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
 
 LOG = logging.getLogger(__name__)
 
@@ -26,9 +23,13 @@ class DeleteServer(base.Scenario):
     def __init__(self, scenario_cfg, context_cfg):
         self.scenario_cfg = scenario_cfg
         self.context_cfg = context_cfg
-        self.options = self.scenario_cfg['options']
-        self.server_id = self.options.get("server_id", None)
-        self.nova_client = op_utils.get_nova_client()
+        self.options = self.scenario_cfg["options"]
+        self.server_name_or_id = self.options["name_or_id"]
+        self.wait = self.options.get("wait", False)
+        self.timeout = self.options.get("timeout", 180)
+        self.delete_ips = self.options.get("delete_ips", False)
+        self.delete_ip_retry = self.options.get("delete_ip_retry", 1)
+        self.shade_client = openstack_utils.get_shade_client()
 
         self.setup_done = False
 
@@ -43,9 +44,15 @@ class DeleteServer(base.Scenario):
         if not self.setup_done:
             self.setup()
 
-        status = op_utils.delete_instance(self.nova_client,
-                                          instance_id=self.server_id)
-        if status:
-            LOG.info("Delete server successful!")
-        else:
+        status = openstack_utils.delete_instance(
+            self.shade_client, self.server_name_or_id, wait=self.wait,
+            timeout=self.timeout, delete_ips=self.delete_ips,
+            delete_ip_retry=self.delete_ip_retry)
+
+        if not status:
+            result.update({"delete_server": 0})
             LOG.error("Delete server failed!")
+            raise exceptions.ScenarioDeleteServerError
+
+        result.update({"delete_server": 1})
+        LOG.info("Delete server successful!")
index d5e3394..6727a73 100644 (file)
@@ -6,14 +6,11 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
 import logging
 
 from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
 
 LOG = logging.getLogger(__name__)
 
@@ -26,8 +23,12 @@ class GetFlavor(base.Scenario):
     def __init__(self, scenario_cfg, context_cfg):
         self.scenario_cfg = scenario_cfg
         self.context_cfg = context_cfg
-        self.options = self.scenario_cfg['options']
-        self.flavor_name = self.options.get("flavor_name", "TestFlavor")
+        self.options = self.scenario_cfg["options"]
+        self.name_or_id = self.options["name_or_id"]
+        self.filters = self.options.get("filters")
+        self.get_extra = self.options.get("get_extra", True)
+        self.shade_client = openstack_utils.get_shade_client()
+
         self.setup_done = False
 
     def setup(self):
@@ -41,14 +42,18 @@ class GetFlavor(base.Scenario):
         if not self.setup_done:
             self.setup()
 
-        LOG.info("Querying flavor: %s", self.flavor_name)
-        flavor = op_utils.get_flavor_by_name(self.flavor_name)
-        if flavor:
-            LOG.info("Get flavor successful!")
-            values = [self._change_obj_to_dict(flavor)]
-        else:
-            LOG.info("Get flavor: no flavor matched!")
-            values = []
+        LOG.info("Querying flavor: %s", self.name_or_id)
+        flavor = openstack_utils.get_flavor(
+            self.shade_client, self.name_or_id, filters=self.filters,
+            get_extra=self.get_extra)
+
+        if not flavor:
+            result.update({"get_flavor": 0})
+            LOG.error("Get flavor failed!")
+            raise exceptions.ScenarioGetFlavorError
 
-        keys = self.scenario_cfg.get('output', '').split()
+        result.update({"get_flavor": 1})
+        LOG.info("Get flavor successful!")
+        values = [flavor]
+        keys = self.scenario_cfg.get("output", '').split()
         return self._push_to_outputs(keys, values)
index fcf47c8..f65fa9e 100644 (file)
@@ -6,14 +6,11 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
 import logging
 
 from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
 
 LOG = logging.getLogger(__name__)
 
@@ -21,63 +18,58 @@ LOG = logging.getLogger(__name__)
 class GetServer(base.Scenario):
     """Get a server instance
 
-  Parameters
-    server_id - ID of the server
-        type:    string
-        unit:    N/A
-        default: null
-    server_name - name of the server
-        type:    string
-        unit:    N/A
-        default: null
-
-    Either server_id or server_name is required.
-
-  Outputs
+    Parameters:
+    name_or_id - Name or ID of the server
+        type: string
+    filters - meta data to use for further filtering
+        type: dict
+    detailed: Whether or not to add detailed additional information.
+        type: bool
+    bare: Whether to skip adding any additional information to the server
+          record.
+        type: bool
+    all_projects: Whether to get server from all projects or just the current
+                  auth scoped project.
+        type: bool
+
+    Outputs:
     rc - response code of getting server instance
-        0 for success
-        1 for failure
+        1 for success
+        0 for failure
         type:    int
-        unit:    N/A
     server - instance of the server
         type:    dict
-        unit:    N/A
+
     """
 
-    __scenario_type__ = "GetServer"
+    __scenario_type__ = 'GetServer'
 
     def __init__(self, scenario_cfg, context_cfg):
         self.scenario_cfg = scenario_cfg
         self.context_cfg = context_cfg
-        self.options = self.scenario_cfg.get('options', {})
+        self.options = self.scenario_cfg['options']
 
-        self.server_id = self.options.get("server_id")
-        if self.server_id:
-            LOG.debug('Server id is %s', self.server_id)
+        self.server_name_or_id = self.options.get('name_or_id')
+        self.filters = self.options.get('filters')
+        self.detailed = self.options.get('detailed', False)
+        self.bare = self.options.get('bare', False)
 
-        default_name = self.scenario_cfg.get('host',
-                                             self.scenario_cfg.get('target'))
-        self.server_name = self.options.get('server_name', default_name)
-        if self.server_name:
-            LOG.debug('Server name is %s', self.server_name)
-
-        self.nova_client = op_utils.get_nova_client()
+        self.shade_client = openstack_utils.get_shade_client()
 
     def run(self, result):
         """execute the test"""
 
-        if self.server_id:
-            server = self.nova_client.servers.get(self.server_id)
-        else:
-            server = op_utils.get_server_by_name(self.server_name)
-
-        keys = self.scenario_cfg.get('output', '').split()
+        server = openstack_utils.get_server(
+            self.shade_client, name_or_id=self.server_name_or_id,
+            filters=self.filters, detailed=self.detailed, bare=self.bare)
 
-        if server:
-            LOG.info("Get server successful!")
-            values = [0, self._change_obj_to_dict(server)]
-        else:
-            LOG.info("Get server failed!")
-            values = [1]
+        if not server:
+            result.update({'get_server': 0})
+            LOG.error('Get Server failed!')
+            raise exceptions.ScenarioGetServerError
 
+        result.update({'get_server': 1})
+        LOG.info('Get Server successful!')
+        keys = self.scenario_cfg.get('output', '').split()
+        values = [server]
         return self._push_to_outputs(keys, values)
index ce8a7f4..9a7b975 100644 (file)
@@ -70,39 +70,42 @@ class PktgenDPDKLatency(base.Scenario):
     def run(self, result):
         """execute the benchmark"""
 
+        options = self.scenario_cfg['options']
+        eth1 = options.get("eth1", "ens4")
+        eth2 = options.get("eth2", "ens5")
         if not self.setup_done:
             self.setup()
 
         if not self.testpmd_args:
-            self.testpmd_args = utils.get_port_mac(self.client, 'eth2')
+            self.testpmd_args = utils.get_port_mac(self.client, eth2)
 
         if not self.pktgen_args:
-            server_rev_mac = utils.get_port_mac(self.server, 'eth1')
-            server_send_mac = utils.get_port_mac(self.server, 'eth2')
-            client_src_ip = utils.get_port_ip(self.client, 'eth1')
-            client_dst_ip = utils.get_port_ip(self.client, 'eth2')
+            server_rev_mac = utils.get_port_mac(self.server, eth1)
+            server_send_mac = utils.get_port_mac(self.server, eth2)
+            client_src_ip = utils.get_port_ip(self.client, eth1)
+            client_dst_ip = utils.get_port_ip(self.client, eth2)
 
             self.pktgen_args = [client_src_ip, client_dst_ip,
                                 server_rev_mac, server_send_mac]
 
-        options = self.scenario_cfg['options']
         packetsize = options.get("packetsize", 64)
         rate = options.get("rate", 100)
 
-        cmd = "screen sudo -E bash ~/testpmd_fwd.sh %s " % (self.testpmd_args)
+        cmd = "screen sudo -E bash ~/testpmd_fwd.sh %s %s %s" % \
+            (self.testpmd_args, eth1, eth2)
         LOG.debug("Executing command: %s", cmd)
         self.server.send_command(cmd)
 
         time.sleep(1)
 
-        cmd = "screen sudo -E bash ~/pktgen_dpdk.sh %s %s %s %s %s %s" % \
+        cmd = "screen sudo -E bash ~/pktgen_dpdk.sh %s %s %s %s %s %s %s %s" % \
             (self.pktgen_args[0], self.pktgen_args[1], self.pktgen_args[2],
-             self.pktgen_args[3], rate, packetsize)
+             self.pktgen_args[3], rate, packetsize, eth1, eth2)
         LOG.debug("Executing command: %s", cmd)
         self.client.send_command(cmd)
 
         # wait for finishing test
-        time.sleep(1)
+        time.sleep(60)
 
         cmd = r"""\
 cat ~/result.log -vT \
index b872aa3..dcd5a9b 100644 (file)
@@ -7,7 +7,7 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-!/bin/sh
+#!/bin/sh
 
 set -e
 
@@ -18,6 +18,11 @@ FWD_REV_MAC=$3    # MAC address of forwarding receiver in VM B
 FWD_SEND_MAC=$4   # MAC address of forwarding sender in VM B
 RATE=$5           # packet rate in percentage
 PKT_SIZE=$6       # packet size
+ETH1=$7
+ETH2=$8
+
+DPDK_VERSION="dpdk-17.02"
+PKTGEN_VERSION="pktgen-3.2.12"
 
 
 load_modules()
@@ -31,13 +36,13 @@ load_modules()
     if lsmod | grep "igb_uio" &> /dev/null ; then
     echo "igb_uio module is loaded"
     else
-    insmod /dpdk/x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
+    insmod /opt/tempT/$DPDK_VERSION/x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
     fi
 
     if lsmod | grep "rte_kni" &> /dev/null ; then
     echo "rte_kni module is loaded"
     else
-    insmod /dpdk/x86_64-native-linuxapp-gcc/kmod/rte_kni.ko
+    insmod /opt/tempT/$DPDK_VERSION/x86_64-native-linuxapp-gcc/kmod/rte_kni.ko
     fi
 }
 
@@ -48,8 +53,10 @@ change_permissions()
 }
 
 add_interface_to_dpdk(){
+    ip link set $ETH1 down
+    ip link set $ETH2 down
     interfaces=$(lspci |grep Eth |tail -n +2 |awk '{print $1}')
-    /dpdk/tools/dpdk-devbind.py --bind=igb_uio $interfaces
+    /opt/tempT/$DPDK_VERSION/usertools/dpdk-devbind.py --bind=igb_uio $interfaces
 
 }
 
@@ -106,20 +113,14 @@ spawn ./app/app/x86_64-native-linuxapp-gcc/pktgen -c 0x07 -n 4 -b $blacklist --
 expect "Pktgen>"
 send "\n"
 expect "Pktgen>"
-send "screen on\n"
+send "on\n"
 expect "Pktgen>"
 set count 10
 while { $count } {
     send "page latency\n"
-    expect {
-        timeout { send "\n" }
-        -regexp {..*} {
-            set result "${result}$expect_out(0,string)"
-            set timeout 1
-            exp_continue
-         }
-        "Pktgen>"
-    }
+    expect -re "(..*)"
+    set result "${result}$expect_out(0,string)"
+    set timeout 1
     set count [expr $count-1]
 }
 send "stop 0\n"
@@ -136,7 +137,7 @@ EOF
 run_pktgen()
 {
     blacklist=$(lspci |grep Eth |awk '{print $1}'|head -1)
-    cd /pktgen-dpdk
+    cd /opt/tempT/$PKTGEN_VERSION
     touch /home/ubuntu/result.log
     result_log="/home/ubuntu/result.log"
     sudo expect /home/ubuntu/pktgen.exp $blacklist $result_log
@@ -153,4 +154,3 @@ main()
 }
 
 main
-
index 247a8a8..30b63a7 100644 (file)
@@ -13,6 +13,10 @@ set -e
 
 # Commandline arguments
 DST_MAC=$1         # MAC address of the peer port
+ETH1=$2
+ETH2=$3
+
+DPDK_VERSION="dpdk-17.02"
 
 load_modules()
 {
@@ -25,13 +29,13 @@ load_modules()
     if lsmod | grep "igb_uio" &> /dev/null ; then
     echo "igb_uio module is loaded"
     else
-    insmod /dpdk/x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
+    insmod /opt/tempT/$DPDK_VERSION/x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
     fi
 
     if lsmod | grep "rte_kni" &> /dev/null ; then
     echo "rte_kni module is loaded"
     else
-    insmod /dpdk/x86_64-native-linuxapp-gcc/kmod/rte_kni.ko
+    insmod /opt/tempT/$DPDK_VERSION/x86_64-native-linuxapp-gcc/kmod/rte_kni.ko
     fi
 }
 
@@ -42,15 +46,17 @@ change_permissions()
 }
 
 add_interface_to_dpdk(){
+    ip link set $ETH1 down
+    ip link set $ETH2 down
     interfaces=$(lspci |grep Eth |tail -n +2 |awk '{print $1}')
-    /dpdk/tools/dpdk-devbind.py --bind=igb_uio $interfaces
+    /opt/tempT/$DPDK_VERSION/usertools//dpdk-devbind.py --bind=igb_uio $interfaces
 }
 
 run_testpmd()
 {
     blacklist=$(lspci |grep Eth |awk '{print $1}'|head -1)
-    cd /dpdk
-    sudo ./destdir/bin/testpmd -c 0x07 -n 4 -b $blacklist -- -a --eth-peer=1,$DST_MAC --forward-mode=mac
+    cd /opt/tempT/$DPDK_VERSION/x86_64-native-linuxapp-gcc/app
+    sudo ./testpmd -c 0x07 -n 4 -b $blacklist -- -a --eth-peer=1,$DST_MAC --forward-mode=mac
 }
 
 main()
index 0e47852..be2fa3f 100644 (file)
@@ -14,6 +14,7 @@
 
 import copy
 import logging
+import time
 
 import ipaddress
 from itertools import chain
@@ -484,3 +485,11 @@ class NetworkServiceTestCase(scenario_base.Scenario):
             # https://bugs.python.org/issue9400
             LOG.exception("")
             raise RuntimeError("Error in teardown")
+
+    def pre_run_wait_time(self, time_seconds):
+        """Time waited before executing the run method"""
+        time.sleep(time_seconds)
+
+    def post_run_wait_time(self, time_seconds):
+        """Time waited after executing the run method"""
+        pass
index a3488a2..c6379e5 100644 (file)
@@ -25,6 +25,7 @@ class TaskCommands(object):     # pragma: no cover
 
        Set of commands to manage benchmark tasks.
        """
+    EXIT_TEST_FAILED = 2
 
     @cliargs("inputfile", type=str, help="path to task or suite file", nargs=1)
     @cliargs("--task-args", dest="task_args",
@@ -48,18 +49,20 @@ class TaskCommands(object):     # pragma: no cover
         param = change_osloobj_to_paras(args)
         self.output_file = param.output_file
 
-        result = {}
         LOG.info('Task START')
         try:
             result = Task().start(param, **kwargs)
         except Exception as e:  # pylint: disable=broad-except
             self._write_error_data(e)
-
-        if result.get('result', {}).get('criteria') == 'PASS':
-            LOG.info('Task SUCCESS')
-        else:
             LOG.info('Task FAILED')
-            raise RuntimeError('Task Failed')
+            raise
+        else:
+            if result.get('result', {}).get('criteria') == 'PASS':
+                LOG.info('Task SUCCESS')
+            else:
+                LOG.info('Task FAILED')
+                # exit without backtrace
+                raise SystemExit(self.EXIT_TEST_FAILED)
 
     def _write_error_data(self, error):
         data = {'status': 2, 'result': str(error)}
index 38d2dd7..ca5a110 100644 (file)
@@ -514,7 +514,7 @@ class AnsibleCommon(object):
         parser.add_section('defaults')
         parser.set('defaults', 'host_key_checking', 'False')
 
-        cfg_path = os.path.join(directory, 'setup.cfg')
+        cfg_path = os.path.join(directory, 'ansible.cfg')
         with open(cfg_path, 'w') as f:
             parser.write(f)
 
index 153bd4b..8640afb 100644 (file)
@@ -152,3 +152,6 @@ IS_PUBLIC = 'is_public'
 # general
 TESTCASE_PRE = 'opnfv_yardstick_'
 TESTSUITE_PRE = 'opnfv_'
+
+# OpenStack cloud default config parameters
+OS_CLOUD_DEFAULT_CONFIG = {'verify': False}
index 8160c5b..0492094 100644 (file)
@@ -54,6 +54,10 @@ class YardstickException(Exception):
         return False
 
 
+class ResourceCommandError(YardstickException):
+    message = 'Command: "%(command)s" Failed, stderr: "%(stderr)s"'
+
+
 class FunctionNotImplemented(YardstickException):
     message = ('The function "%(function_name)s" is not implemented in '
                '"%(class_name)" class.')
@@ -64,6 +68,11 @@ class YardstickBannedModuleImported(YardstickException):
     message = 'Module "%(module)s" cannnot be imported. Reason: "%(reason)s"'
 
 
+class PayloadMissingAttributes(YardstickException):
+    message = ('Error instantiating a Payload class, missing attributes: '
+               '%(missing_attributes)s')
+
+
 class HeatTemplateError(YardstickException):
     """Error in Heat during the stack deployment"""
     message = ('Error in Heat during the creation of the OpenStack stack '
@@ -82,6 +91,47 @@ class DPDKSetupDriverError(YardstickException):
     message = '"igb_uio" driver is not loaded'
 
 
+class OVSUnsupportedVersion(YardstickException):
+    message = ('Unsupported OVS version "%(ovs_version)s". Please check the '
+               'config. OVS to DPDK version map: %(ovs_to_dpdk_map)s.')
+
+
+class OVSHugepagesInfoError(YardstickException):
+    message = 'MemInfo cannnot be retrieved.'
+
+
+class OVSHugepagesNotConfigured(YardstickException):
+    message = 'HugePages are not configured in this system.'
+
+
+class OVSHugepagesZeroFree(YardstickException):
+    message = ('There are no HugePages free in this system. Total HugePages '
+               'configured: %(total_hugepages)s')
+
+
+class OVSDeployError(YardstickException):
+    message = 'OVS deploy tool failed with error: %(stderr)s.'
+
+
+class OVSSetupError(YardstickException):
+    message = 'OVS setup error. Command: %(command)s. Error: %(error)s.'
+
+
+class LibvirtCreateError(YardstickException):
+    message = 'Error creating the virtual machine. Error: %(error)s.'
+
+
+class LibvirtQemuImageBaseImageNotPresent(YardstickException):
+    message = ('Error creating the qemu image for %(vm_image)s. Base image: '
+               '%(base_image)s. Base image not present in execution host or '
+               'remote host.')
+
+
+class LibvirtQemuImageCreateError(YardstickException):
+    message = ('Error creating the qemu image for %(vm_image)s. Base image: '
+               '%(base_image)s. Error: %(error)s.')
+
+
 class ScenarioConfigContextNameNotFound(YardstickException):
     message = 'Context name "%(context_name)s" not found'
 
@@ -102,6 +152,14 @@ class TaskRenderError(YardstickException):
     message = 'Failed to render template:\n%(input_task)s'
 
 
+class TimerTimeout(YardstickException):
+    message = 'Timer timeout expired, %(timeout)s seconds'
+
+
+class WaitTimeout(YardstickException):
+    message = 'Wait timeout while waiting for condition'
+
+
 class ScenarioCreateNetworkError(YardstickException):
     message = 'Create Neutron Network Scenario failed'
 
@@ -124,3 +182,51 @@ class UnsupportedPodFormatError(YardstickException):
 
 class ScenarioCreateRouterError(YardstickException):
     message = 'Create Neutron Router Scenario failed'
+
+
+class ScenarioRemoveRouterIntError(YardstickException):
+    message = 'Remove Neutron Router Interface Scenario failed'
+
+
+class ScenarioCreateFloatingIPError(YardstickException):
+    message = 'Create Neutron Floating IP Scenario failed'
+
+
+class ScenarioDeleteFloatingIPError(YardstickException):
+    message = 'Delete Neutron Floating IP Scenario failed'
+
+
+class ScenarioCreateSecurityGroupError(YardstickException):
+    message = 'Create Neutron Security Group Scenario failed'
+
+
+class ScenarioDeleteNetworkError(YardstickException):
+    message = 'Delete Neutron Network Scenario failed'
+
+
+class ScenarioCreateServerError(YardstickException):
+    message = 'Nova Create Server Scenario failed'
+
+
+class ScenarioDeleteServerError(YardstickException):
+    message = 'Delete Server Scenario failed'
+
+
+class ScenarioCreateKeypairError(YardstickException):
+    message = 'Nova Create Keypair Scenario failed'
+
+
+class ScenarioDeleteKeypairError(YardstickException):
+    message = 'Nova Delete Keypair Scenario failed'
+
+
+class ScenarioAttachVolumeError(YardstickException):
+    message = 'Nova Attach Volume Scenario failed'
+
+
+class ScenarioGetServerError(YardstickException):
+    message = 'Nova Get Server Scenario failed'
+
+
+class ScenarioGetFlavorError(YardstickException):
+    message = 'Nova Get Falvor Scenario failed'
diff --git a/yardstick/common/messaging/__init__.py b/yardstick/common/messaging/__init__.py
new file mode 100644 (file)
index 0000000..f0f012e
--- /dev/null
@@ -0,0 +1,36 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# MQ is statically configured:
+#   - MQ service: RabbitMQ
+#   - user/password: yardstick/yardstick
+#   - host:port: localhost:5672
+MQ_USER = 'yardstick'
+MQ_PASS = 'yardstick'
+MQ_SERVICE = 'rabbit'
+SERVER = 'localhost'
+PORT = 5672
+TRANSPORT_URL = (MQ_SERVICE + '://' + MQ_USER + ':' + MQ_PASS + '@' + SERVER +
+                 ':' + str(PORT) + '/')
+
+# RPC server.
+RPC_SERVER_EXECUTOR = 'threading'
+
+# Topics.
+RUNNER = 'runner'
+
+# Methods.
+# RUNNER methods:
+RUNNER_INFO = 'runner_info'
+RUNNER_LOOP = 'runner_loop'
diff --git a/yardstick/common/messaging/consumer.py b/yardstick/common/messaging/consumer.py
new file mode 100644 (file)
index 0000000..24ec6f1
--- /dev/null
@@ -0,0 +1,85 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import logging
+
+from oslo_config import cfg
+import oslo_messaging
+import six
+
+from yardstick.common import messaging
+
+
+LOG = logging.getLogger(__name__)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class NotificationHandler(object):
+    """Abstract class to define a endpoint object for a MessagingConsumer"""
+
+    def __init__(self, _id, ctx_pids, queue):
+        self._id = _id
+        self._ctx_pids = ctx_pids
+        self._queue = queue
+
+
+@six.add_metaclass(abc.ABCMeta)
+class MessagingConsumer(object):
+    """Abstract class to implement a MQ consumer
+
+    This abstract class allows a class implementing this interface to receive
+    the messages published by a `MessagingNotifier`.
+    """
+
+    def __init__(self, topic, pids, endpoints, fanout=True):
+        """Init function.
+
+        :param topic: (string) MQ exchange topic
+        :param pids: (list of int) list of PIDs of the processes implementing
+                     the MQ Notifier which will be in the message context
+        :param endpoints: (list of class) list of classes implementing the
+                          methods (see `MessagingNotifier.send_message) used by
+                          the Notifier
+        :param fanout: (bool) MQ clients may request that a copy of the message
+                       be delivered to all servers listening on a topic by
+                       setting fanout to ``True``, rather than just one of them
+        :returns: `MessagingConsumer` class object
+        """
+
+        self._pids = pids
+        self._endpoints = endpoints
+        self._transport = oslo_messaging.get_rpc_transport(
+            cfg.CONF, url=messaging.TRANSPORT_URL)
+        self._target = oslo_messaging.Target(topic=topic, fanout=fanout,
+                                             server=messaging.SERVER)
+        self._server = oslo_messaging.get_rpc_server(
+            self._transport, self._target, self._endpoints,
+            executor=messaging.RPC_SERVER_EXECUTOR,
+            access_policy=oslo_messaging.DefaultRPCAccessPolicy)
+
+    def start_rpc_server(self):
+        """Start the RPC server."""
+        if self._server:
+            self._server.start()
+
+    def stop_rpc_server(self):
+        """Stop the RPC server."""
+        if self._server:
+            self._server.stop()
+
+    def wait(self):
+        """Wait for message processing to complete (blocking)."""
+        if self._server:
+            self._server.wait()
diff --git a/yardstick/common/messaging/payloads.py b/yardstick/common/messaging/payloads.py
new file mode 100644 (file)
index 0000000..d29d798
--- /dev/null
@@ -0,0 +1,53 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+
+import six
+
+from yardstick.common import exceptions
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Payload(object):
+    """Base Payload class to transfer data through the MQ service"""
+
+    REQUIRED_FIELDS = {'version'}
+
+    def __init__(self, **kwargs):
+        """Init method
+
+        :param kwargs: (dictionary) attributes and values of the object
+        :returns: Payload object
+        """
+
+        if not all(req_field in kwargs for req_field in self.REQUIRED_FIELDS):
+            _attrs = set(kwargs) - self.REQUIRED_FIELDS
+            missing_attributes = ', '.join(str(_attr) for _attr in _attrs)
+            raise exceptions.PayloadMissingAttributes(
+                missing_attributes=missing_attributes)
+
+        for name, value in kwargs.items():
+            setattr(self, name, value)
+
+        self._fields = set(kwargs.keys())
+
+    def obj_to_dict(self):
+        """Returns a dictionary with the attributes of the object"""
+        return {field: getattr(self, field) for field in self._fields}
+
+    @classmethod
+    def dict_to_obj(cls, _dict):
+        """Returns a Payload object built from the dictionary elements"""
+        return cls(**_dict)
diff --git a/yardstick/common/messaging/producer.py b/yardstick/common/messaging/producer.py
new file mode 100644 (file)
index 0000000..b6adc0c
--- /dev/null
@@ -0,0 +1,70 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import logging
+import os
+
+from oslo_config import cfg
+import oslo_messaging
+import six
+
+from yardstick.common import messaging
+
+
+LOG = logging.getLogger(__name__)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class MessagingProducer(object):
+    """Abstract class to implement a MQ producer
+
+    This abstract class allows a class implementing this interface to publish
+    messages in a message queue.
+    """
+
+    def __init__(self, topic, pid=os.getpid(), fanout=True):
+        """Init function.
+
+        :param topic: (string) MQ exchange topic
+        :param pid: (int) PID of the process implementing this MQ Notifier
+        :param fanout: (bool) MQ clients may request that a copy of the message
+                       be delivered to all servers listening on a topic by
+                       setting fanout to ``True``, rather than just one of them
+        :returns: `MessagingNotifier` class object
+        """
+        self._topic = topic
+        self._pid = pid
+        self._fanout = fanout
+        self._transport = oslo_messaging.get_rpc_transport(
+            cfg.CONF, url=messaging.TRANSPORT_URL)
+        self._target = oslo_messaging.Target(topic=topic, fanout=fanout,
+                                             server=messaging.SERVER)
+        self._notifier = oslo_messaging.RPCClient(self._transport,
+                                                  self._target)
+
+    def send_message(self, method, payload):
+        """Send a cast message, that will invoke a method without blocking.
+
+        The cast() method is used to invoke an RPC method that does not return
+        a value.  cast() RPC requests may be broadcast to all Servers listening
+        on a given topic by setting the fanout Target property to ``True``.
+
+        :param methos: (string) method name, that must be implemented in the
+                       consumer endpoints
+        :param payload: (subclass `Payload`) payload content
+        """
+        self._notifier.cast({'pid': self._pid},
+                            method,
+                            **payload.obj_to_dict())
index a4fd4e5..53f0ccc 100644 (file)
@@ -7,20 +7,21 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
+import copy
+import logging
 import os
-import time
 import sys
-import logging
 
+from cinderclient import client as cinderclient
+from novaclient import client as novaclient
+from glanceclient import client as glanceclient
 from keystoneauth1 import loading
 from keystoneauth1 import session
+from neutronclient.neutron import client as neutronclient
 import shade
 from shade import exc
 
-from cinderclient import client as cinderclient
-from novaclient import client as novaclient
-from glanceclient import client as glanceclient
-from neutronclient.neutron import client as neutronclient
+from yardstick.common import constants
 
 
 log = logging.getLogger(__name__)
@@ -156,204 +157,205 @@ def get_glance_client():    # pragma: no cover
     return glanceclient.Client(get_glance_client_version(), session=sess)
 
 
-def get_shade_client():
-    return shade.openstack_cloud()
+def get_shade_client(**os_cloud_config):
+    """Get Shade OpenStack cloud client
+
+    By default, the input parameters given to "shade.openstack_cloud" method
+    are stored in "constants.OS_CLOUD_DEFAULT_CONFIG". The input parameters
+    passed in this function, "os_cloud_config", will overwrite the default
+    ones.
+
+    :param os_cloud_config: (kwargs) input arguments for
+                            "shade.openstack_cloud" method.
+    :return: ``shade.OpenStackCloud`` object.
+    """
+    params = copy.deepcopy(constants.OS_CLOUD_DEFAULT_CONFIG)
+    params.update(os_cloud_config)
+    return shade.openstack_cloud(**params)
 
 
 # *********************************************
 #   NOVA
 # *********************************************
-def get_instances(nova_client):
-    try:
-        return nova_client.servers.list(search_opts={'all_tenants': 1})
-    except Exception:  # pylint: disable=broad-except
-        log.exception("Error [get_instances(nova_client)]")
-
-
-def get_instance_status(nova_client, instance):     # pragma: no cover
-    try:
-        return nova_client.servers.get(instance.id).status
-    except Exception:  # pylint: disable=broad-except
-        log.exception("Error [get_instance_status(nova_client)]")
+def create_keypair(shade_client, name, public_key=None):
+    """Create a new keypair.
 
+    :param name: Name of the keypair being created.
+    :param public_key: Public key for the new keypair.
 
-def get_instance_by_name(nova_client, instance_name):   # pragma: no cover
-    try:
-        return nova_client.servers.find(name=instance_name)
-    except Exception:  # pylint: disable=broad-except
-        log.exception("Error [get_instance_by_name(nova_client, '%s')]",
-                      instance_name)
-
-
-def get_aggregates(nova_client):    # pragma: no cover
-    try:
-        return nova_client.aggregates.list()
-    except Exception:  # pylint: disable=broad-except
-        log.exception("Error [get_aggregates(nova_client)]")
-
-
-def get_availability_zones(nova_client):    # pragma: no cover
+    :return: Created keypair.
+    """
     try:
-        return nova_client.availability_zones.list()
-    except Exception:  # pylint: disable=broad-except
-        log.exception("Error [get_availability_zones(nova_client)]")
+        return shade_client.create_keypair(name, public_key=public_key)
+    except exc.OpenStackCloudException as o_exc:
+        log.error("Error [create_keypair(shade_client)]. "
+                  "Exception message, '%s'", o_exc.orig_message)
 
 
-def get_availability_zone_names(nova_client):   # pragma: no cover
+def create_instance_and_wait_for_active(shade_client, name, image,
+                                        flavor, auto_ip=True, ips=None,
+                                        ip_pool=None, root_volume=None,
+                                        terminate_volume=False, wait=True,
+                                        timeout=180, reuse_ips=True,
+                                        network=None, boot_from_volume=False,
+                                        volume_size='20', boot_volume=None,
+                                        volumes=None, nat_destination=None,
+                                        **kwargs):
+    """Create a virtual server instance.
+
+    :param name:(string) Name of the server.
+    :param image:(dict) Image dict, name or ID to boot with. Image is required
+                 unless boot_volume is given.
+    :param flavor:(dict) Flavor dict, name or ID to boot onto.
+    :param auto_ip: Whether to take actions to find a routable IP for
+                    the server.
+    :param ips: List of IPs to attach to the server.
+    :param ip_pool:(string) Name of the network or floating IP pool to get an
+                   address from.
+    :param root_volume:(string) Name or ID of a volume to boot from.
+                       (defaults to None - deprecated, use boot_volume)
+    :param boot_volume:(string) Name or ID of a volume to boot from.
+    :param terminate_volume:(bool) If booting from a volume, whether it should
+                            be deleted when the server is destroyed.
+    :param volumes:(optional) A list of volumes to attach to the server.
+    :param wait:(optional) Wait for the address to appear as assigned to the server.
+    :param timeout: Seconds to wait, defaults to 60.
+    :param reuse_ips:(bool)Whether to attempt to reuse pre-existing
+                     floating ips should a floating IP be needed.
+    :param network:(dict) Network dict or name or ID to attach the server to.
+                   Mutually exclusive with the nics parameter. Can also be be
+                   a list of network names or IDs or network dicts.
+    :param boot_from_volume:(bool) Whether to boot from volume. 'boot_volume'
+                            implies True, but boot_from_volume=True with
+                            no boot_volume is valid and will create a
+                            volume from the image and use that.
+    :param volume_size: When booting an image from volume, how big should
+                        the created volume be?
+    :param nat_destination: Which network should a created floating IP
+                            be attached to, if it's not possible to infer from
+                            the cloud's configuration.
+    :param meta:(optional) A dict of arbitrary key/value metadata to store for
+                this server. Both keys and values must be <=255 characters.
+    :param reservation_id: A UUID for the set of servers being requested.
+    :param min_count:(optional extension) The minimum number of servers to
+                     launch.
+    :param max_count:(optional extension) The maximum number of servers to
+                     launch.
+    :param security_groups: A list of security group names.
+    :param userdata: User data to pass to be exposed by the metadata server
+                     this can be a file type object as well or a string.
+    :param key_name:(optional extension) Name of previously created keypair to
+                    inject into the instance.
+    :param availability_zone: Name of the availability zone for instance
+                              placement.
+    :param block_device_mapping:(optional) A dict of block device mappings for
+                                this server.
+    :param block_device_mapping_v2:(optional) A dict of block device mappings
+                                   for this server.
+    :param nics:(optional extension) An ordered list of nics to be added to
+                 this server, with information about connected networks, fixed
+                 IPs, port etc.
+    :param scheduler_hints:(optional extension) Arbitrary key-value pairs
+                           specified by the client to help boot an instance.
+    :param config_drive:(optional extension) Value for config drive either
+                         boolean, or volume-id.
+    :param disk_config:(optional extension) Control how the disk is partitioned
+                       when the server is created. Possible values are 'AUTO'
+                       or 'MANUAL'.
+    :param admin_pass:(optional extension) Add a user supplied admin password.
+
+    :returns: The created server.
+    """
     try:
-        return [az.zoneName for az in get_availability_zones(nova_client)]
-    except Exception:  # pylint: disable=broad-except
-        log.exception("Error [get_availability_zone_names(nova_client)]")
+        return shade_client.create_server(
+            name, image, flavor, auto_ip=auto_ip, ips=ips, ip_pool=ip_pool,
+            root_volume=root_volume, terminate_volume=terminate_volume,
+            wait=wait, timeout=timeout, reuse_ips=reuse_ips, network=network,
+            boot_from_volume=boot_from_volume, volume_size=volume_size,
+            boot_volume=boot_volume, volumes=volumes,
+            nat_destination=nat_destination, **kwargs)
+    except exc.OpenStackCloudException as o_exc:
+        log.error("Error [create_instance(shade_client)]. "
+                  "Exception message, '%s'", o_exc.orig_message)
 
 
-def create_aggregate(nova_client, aggregate_name, av_zone):  # pragma: no cover
-    try:
-        nova_client.aggregates.create(aggregate_name, av_zone)
-    except Exception:  # pylint: disable=broad-except
-        log.exception("Error [create_aggregate(nova_client, %s, %s)]",
-                      aggregate_name, av_zone)
-        return False
-    else:
-        return True
+def attach_volume_to_server(shade_client, server_name_or_id, volume_name_or_id,
+                            device=None, wait=True, timeout=None):
+    """Attach a volume to a server.
 
+    This will attach a volume, described by the passed in volume
+    dict, to the server described by the passed in server dict on the named
+    device on the server.
 
-def get_aggregate_id(nova_client, aggregate_name):      # pragma: no cover
-    try:
-        aggregates = get_aggregates(nova_client)
-        _id = next((ag.id for ag in aggregates if ag.name == aggregate_name))
-    except Exception:  # pylint: disable=broad-except
-        log.exception("Error [get_aggregate_id(nova_client, %s)]",
-                      aggregate_name)
-    else:
-        return _id
+    If the volume is already attached to the server, or generally not
+    available, then an exception is raised. To re-attach to a server,
+    but under a different device, the user must detach it first.
 
+    :param server_name_or_id:(string) The server name or id to attach to.
+    :param volume_name_or_id:(string) The volume name or id to attach.
+    :param device:(string) The device name where the volume will attach.
+    :param wait:(bool) If true, waits for volume to be attached.
+    :param timeout: Seconds to wait for volume attachment. None is forever.
 
-def add_host_to_aggregate(nova_client, aggregate_name,
-                          compute_host):    # pragma: no cover
+    :returns: True if attached successful, False otherwise.
+    """
     try:
-        aggregate_id = get_aggregate_id(nova_client, aggregate_name)
-        nova_client.aggregates.add_host(aggregate_id, compute_host)
-    except Exception:  # pylint: disable=broad-except
-        log.exception("Error [add_host_to_aggregate(nova_client, %s, %s)]",
-                      aggregate_name, compute_host)
-        return False
-    else:
+        server = shade_client.get_server(name_or_id=server_name_or_id)
+        volume = shade_client.get_volume(volume_name_or_id)
+        shade_client.attach_volume(
+            server, volume, device=device, wait=wait, timeout=timeout)
         return True
-
-
-def create_aggregate_with_host(nova_client, aggregate_name, av_zone,
-                               compute_host):    # pragma: no cover
-    try:
-        create_aggregate(nova_client, aggregate_name, av_zone)
-        add_host_to_aggregate(nova_client, aggregate_name, compute_host)
-    except Exception:  # pylint: disable=broad-except
-        log.exception("Error [create_aggregate_with_host("
-                      "nova_client, %s, %s, %s)]",
-                      aggregate_name, av_zone, compute_host)
+    except exc.OpenStackCloudException as o_exc:
+        log.error("Error [attach_volume_to_server(shade_client)]. "
+                  "Exception message: %s", o_exc.orig_message)
         return False
-    else:
-        return True
 
 
-def create_keypair(name, key_path=None):    # pragma: no cover
-    try:
-        with open(key_path) as fpubkey:
-            keypair = get_nova_client().keypairs.create(
-                name=name, public_key=fpubkey.read())
-            return keypair
-    except Exception:  # pylint: disable=broad-except
-        log.exception("Error [create_keypair(nova_client)]")
-
-
-def create_instance(json_body):    # pragma: no cover
-    try:
-        return get_nova_client().servers.create(**json_body)
-    except Exception:  # pylint: disable=broad-except
-        log.exception("Error create instance failed")
-        return None
-
-
-def create_instance_and_wait_for_active(json_body):    # pragma: no cover
-    SLEEP = 3
-    VM_BOOT_TIMEOUT = 180
-    nova_client = get_nova_client()
-    instance = create_instance(json_body)
-    for _ in range(int(VM_BOOT_TIMEOUT / SLEEP)):
-        status = get_instance_status(nova_client, instance)
-        if status.lower() == "active":
-            return instance
-        elif status.lower() == "error":
-            log.error("The instance went to ERROR status.")
-            return None
-        time.sleep(SLEEP)
-    log.error("Timeout booting the instance.")
-    return None
-
-
-def attach_server_volume(server_id, volume_id,
-                         device=None):    # pragma: no cover
-    try:
-        get_nova_client().volumes.create_server_volume(server_id,
-                                                       volume_id, device)
-    except Exception:  # pylint: disable=broad-except
-        log.exception("Error [attach_server_volume(nova_client, '%s', '%s')]",
-                      server_id, volume_id)
-        return False
-    else:
-        return True
-
+def delete_instance(shade_client, name_or_id, wait=False, timeout=180,
+                    delete_ips=False, delete_ip_retry=1):
+    """Delete a server instance.
 
-def delete_instance(nova_client, instance_id):      # pragma: no cover
-    try:
-        nova_client.servers.force_delete(instance_id)
-    except Exception:  # pylint: disable=broad-except
-        log.exception("Error [delete_instance(nova_client, '%s')]",
-                      instance_id)
-        return False
-    else:
-        return True
-
-
-def remove_host_from_aggregate(nova_client, aggregate_name,
-                               compute_host):  # pragma: no cover
+    :param name_or_id: name or ID of the server to delete
+    :param wait:(bool) If true, waits for server to be deleted.
+    :param timeout:(int) Seconds to wait for server deletion.
+    :param delete_ips:(bool) If true, deletes any floating IPs associated with
+                      the instance.
+    :param delete_ip_retry:(int) Number of times to retry deleting
+                           any floating ips, should the first try be
+                           unsuccessful.
+    :returns: True if delete succeeded, False otherwise.
+    """
     try:
-        aggregate_id = get_aggregate_id(nova_client, aggregate_name)
-        nova_client.aggregates.remove_host(aggregate_id, compute_host)
-    except Exception:  # pylint: disable=broad-except
-        log.exception("Error remove_host_from_aggregate(nova_client, %s, %s)",
-                      aggregate_name, compute_host)
+        return shade_client.delete_server(
+            name_or_id, wait=wait, timeout=timeout, delete_ips=delete_ips,
+            delete_ip_retry=delete_ip_retry)
+    except exc.OpenStackCloudException as o_exc:
+        log.error("Error [delete_instance(shade_client, '%s')]. "
+                  "Exception message: %s", name_or_id,
+                  o_exc.orig_message)
         return False
-    else:
-        return True
 
 
-def remove_hosts_from_aggregate(nova_client,
-                                aggregate_name):   # pragma: no cover
-    aggregate_id = get_aggregate_id(nova_client, aggregate_name)
-    hosts = nova_client.aggregates.get(aggregate_id).hosts
-    assert(
-        all(remove_host_from_aggregate(nova_client, aggregate_name, host)
-            for host in hosts))
+def get_server(shade_client, name_or_id=None, filters=None, detailed=False,
+               bare=False):
+    """Get a server by name or ID.
 
+    :param name_or_id: Name or ID of the server.
+    :param filters:(dict) A dictionary of meta data to use for further
+                   filtering.
+    :param detailed:(bool) Whether or not to add detailed additional
+                    information.
+    :param bare:(bool) Whether to skip adding any additional information to the
+                server record.
 
-def delete_aggregate(nova_client, aggregate_name):  # pragma: no cover
-    try:
-        remove_hosts_from_aggregate(nova_client, aggregate_name)
-        nova_client.aggregates.delete(aggregate_name)
-    except Exception:  # pylint: disable=broad-except
-        log.exception("Error [delete_aggregate(nova_client, %s)]",
-                      aggregate_name)
-        return False
-    else:
-        return True
-
-
-def get_server_by_name(name):   # pragma: no cover
+    :returns: A server ``munch.Munch`` or None if no matching server is found.
+    """
     try:
-        return get_nova_client().servers.list(search_opts={'name': name})[0]
-    except IndexError:
-        log.exception('Failed to get nova client')
-        raise
+        return shade_client.get_server(name_or_id=name_or_id, filters=filters,
+                                       detailed=detailed, bare=bare)
+    except exc.OpenStackCloudException as o_exc:
+        log.error("Error [get_server(shade_client, '%s')]. "
+                  "Exception message: %s", name_or_id, o_exc.orig_message)
 
 
 def create_flavor(name, ram, vcpus, disk, **kwargs):   # pragma: no cover
@@ -366,14 +368,6 @@ def create_flavor(name, ram, vcpus, disk, **kwargs):   # pragma: no cover
         return None
 
 
-def get_image_by_name(name):    # pragma: no cover
-    images = get_nova_client().images.list()
-    try:
-        return next((a for a in images if a.name == name))
-    except StopIteration:
-        log.exception('No image matched')
-
-
 def get_flavor_id(nova_client, flavor_name):    # pragma: no cover
     flavors = nova_client.flavors.list(detailed=True)
     flavor_id = ''
@@ -384,27 +378,22 @@ def get_flavor_id(nova_client, flavor_name):    # pragma: no cover
     return flavor_id
 
 
-def get_flavor_by_name(name):   # pragma: no cover
-    flavors = get_nova_client().flavors.list()
-    try:
-        return next((a for a in flavors if a.name == name))
-    except StopIteration:
-        log.exception('No flavor matched')
-
+def get_flavor(shade_client, name_or_id, filters=None, get_extra=True):
+    """Get a flavor by name or ID.
 
-def check_status(status, name, iterations, interval):   # pragma: no cover
-    for _ in range(iterations):
-        try:
-            server = get_server_by_name(name)
-        except IndexError:
-            log.error('Cannot found %s server', name)
-            raise
+    :param name_or_id: Name or ID of the flavor.
+    :param filters: A dictionary of meta data to use for further filtering.
+    :param get_extra: Whether or not the list_flavors call should get the extra
+    flavor specs.
 
-        if server.status == status:
-            return True
-
-        time.sleep(interval)
-    return False
+    :returns: A flavor ``munch.Munch`` or None if no matching flavor is found.
+    """
+    try:
+        return shade_client.get_flavor(name_or_id, filters=filters,
+                                       get_extra=get_extra)
+    except exc.OpenStackCloudException as o_exc:
+        log.error("Error [get_flavor(shade_client, '%s')]. "
+                  "Exception message: %s", name_or_id, o_exc.orig_message)
 
 
 def delete_flavor(flavor_id):    # pragma: no cover
@@ -417,24 +406,24 @@ def delete_flavor(flavor_id):    # pragma: no cover
         return True
 
 
-def delete_keypair(nova_client, key):     # pragma: no cover
+def delete_keypair(shade_client, name):
+    """Delete a keypair.
+
+    :param name: Name of the keypair to delete.
+
+    :returns: True if delete succeeded, False otherwise.
+    """
     try:
-        nova_client.keypairs.delete(key=key)
-        return True
-    except Exception:  # pylint: disable=broad-except
-        log.exception("Error [delete_keypair(nova_client)]")
+        return shade_client.delete_keypair(name)
+    except exc.OpenStackCloudException as o_exc:
+        log.error("Error [delete_neutron_router(shade_client, '%s')]. "
+                  "Exception message: %s", name, o_exc.orig_message)
         return False
 
 
 # *********************************************
 #   NEUTRON
 # *********************************************
-def get_network_id(shade_client, network_name):
-    networks = shade_client.list_networks({'name': network_name})
-    if networks:
-        return networks[0]['id']
-
-
 def create_neutron_net(shade_client, network_name, shared=False,
                        admin_state_up=True, external=False, provider=None,
                        project_id=None):
@@ -563,151 +552,174 @@ def remove_gateway_router(neutron_client, router_id):      # pragma: no cover
         return False
 
 
-def remove_interface_router(neutron_client, router_id, subnet_id,
-                            **json_body):      # pragma: no cover
-    json_body.update({"subnet_id": subnet_id})
-    try:
-        neutron_client.remove_interface_router(router=router_id,
-                                               body=json_body)
-        return True
-    except Exception:  # pylint: disable=broad-except
-        log.error("Error [remove_interface_router(neutron_client, '%s', "
-                  "'%s')]", router_id, subnet_id)
-        return False
+def remove_router_interface(shade_client, router, subnet_id=None,
+                            port_id=None):
+    """Detach a subnet from an internal router interface.
 
+    At least one of subnet_id or port_id must be supplied. If you specify both
+    subnet and port ID, the subnet ID must correspond to the subnet ID of the
+    first IP address on the port specified by the port ID.
+    Otherwise an error occurs.
 
-def create_floating_ip(neutron_client, extnet_id):      # pragma: no cover
-    props = {'floating_network_id': extnet_id}
-    try:
-        ip_json = neutron_client.create_floatingip({'floatingip': props})
-        fip_addr = ip_json['floatingip']['floating_ip_address']
-        fip_id = ip_json['floatingip']['id']
-    except Exception:  # pylint: disable=broad-except
-        log.error("Error [create_floating_ip(neutron_client)]")
-        return None
-    return {'fip_addr': fip_addr, 'fip_id': fip_id}
-
-
-def delete_floating_ip(nova_client, floatingip_id):      # pragma: no cover
+    :param router: The dict object of the router being changed
+    :param subnet_id:(string) The ID of the subnet to use for the interface
+    :param port_id:(string) The ID of the port to use for the interface
+    :returns: True on success
+    """
     try:
-        nova_client.floating_ips.delete(floatingip_id)
+        shade_client.remove_router_interface(
+            router, subnet_id=subnet_id, port_id=port_id)
         return True
-    except Exception:  # pylint: disable=broad-except
-        log.error("Error [delete_floating_ip(nova_client, '%s')]",
-                  floatingip_id)
+    except exc.OpenStackCloudException as o_exc:
+        log.error("Error [remove_interface_router(shade_client)]. "
+                  "Exception message: %s", o_exc.orig_message)
         return False
 
 
-def get_security_groups(neutron_client):      # pragma: no cover
+def create_floating_ip(shade_client, network_name_or_id=None, server=None,
+                       fixed_address=None, nat_destination=None,
+                       port=None, wait=False, timeout=60):
+    """Allocate a new floating IP from a network or a pool.
+
+    :param network_name_or_id: Name or ID of the network
+                               that the floating IP should come from.
+    :param server: Server dict for the server to create
+                  the IP for and to which it should be attached.
+    :param fixed_address: Fixed IP to attach the floating ip to.
+    :param nat_destination: Name or ID of the network
+                           that the fixed IP to attach the floating
+                           IP to should be on.
+    :param port: The port ID that the floating IP should be
+                attached to. Specifying a port conflicts with specifying a
+                server,fixed_address or nat_destination.
+    :param wait: Whether to wait for the IP to be active.Only applies
+                if a server is provided.
+    :param timeout: How long to wait for the IP to be active.Only
+                   applies if a server is provided.
+
+    :returns:Floating IP id and address
+    """
     try:
-        security_groups = neutron_client.list_security_groups()[
-            'security_groups']
-        return security_groups
-    except Exception:  # pylint: disable=broad-except
-        log.error("Error [get_security_groups(neutron_client)]")
-        return None
-
-
-def get_security_group_id(neutron_client, sg_name):      # pragma: no cover
-    security_groups = get_security_groups(neutron_client)
-    id = ''
-    for sg in security_groups:
-        if sg['name'] == sg_name:
-            id = sg['id']
-            break
-    return id
+        fip = shade_client.create_floating_ip(
+            network=network_name_or_id, server=server,
+            fixed_address=fixed_address, nat_destination=nat_destination,
+            port=port, wait=wait, timeout=timeout)
+        return {'fip_addr': fip['floating_ip_address'], 'fip_id': fip['id']}
+    except exc.OpenStackCloudException as o_exc:
+        log.error("Error [create_floating_ip(shade_client)]. "
+                  "Exception message: %s", o_exc.orig_message)
 
 
-def create_security_group(neutron_client, sg_name,
-                          sg_description):      # pragma: no cover
-    json_body = {'security_group': {'name': sg_name,
-                                    'description': sg_description}}
+def delete_floating_ip(shade_client, floating_ip_id, retry=1):
     try:
-        secgroup = neutron_client.create_security_group(json_body)
-        return secgroup['security_group']
-    except Exception:  # pylint: disable=broad-except
-        log.error("Error [create_security_group(neutron_client, '%s', "
-                  "'%s')]", sg_name, sg_description)
-        return None
+        return shade_client.delete_floating_ip(floating_ip_id=floating_ip_id,
+                                               retry=retry)
+    except exc.OpenStackCloudException as o_exc:
+        log.error("Error [delete_floating_ip(shade_client,'%s')]. "
+                  "Exception message: %s", floating_ip_id, o_exc.orig_message)
+        return False
 
 
-def create_secgroup_rule(neutron_client, sg_id, direction, protocol,
-                         port_range_min=None, port_range_max=None,
-                         **json_body):      # pragma: no cover
-    # We create a security group in 2 steps
-    # 1 - we check the format and set the json body accordingly
-    # 2 - we call neturon client to create the security group
-
-    # Format check
-    json_body.update({'security_group_rule': {'direction': direction,
-                     'security_group_id': sg_id, 'protocol': protocol}})
-    # parameters may be
-    # - both None => we do nothing
-    # - both Not None => we add them to the json description
-    # but one cannot be None is the other is not None
-    if (port_range_min is not None and port_range_max is not None):
-        # add port_range in json description
-        json_body['security_group_rule']['port_range_min'] = port_range_min
-        json_body['security_group_rule']['port_range_max'] = port_range_max
-        log.debug("Security_group format set (port range included)")
-    else:
-        # either both port range are set to None => do nothing
-        # or one is set but not the other => log it and return False
-        if port_range_min is None and port_range_max is None:
-            log.debug("Security_group format set (no port range mentioned)")
-        else:
-            log.error("Bad security group format."
-                      "One of the port range is not properly set:"
-                      "range min: %s, range max: %s", port_range_min,
-                      port_range_max)
-            return False
+def create_security_group_rule(shade_client, secgroup_name_or_id,
+                               port_range_min=None, port_range_max=None,
+                               protocol=None, remote_ip_prefix=None,
+                               remote_group_id=None, direction='ingress',
+                               ethertype='IPv4', project_id=None):
+    """Create a new security group rule
+
+    :param secgroup_name_or_id:(string) The security group name or ID to
+                               associate with this security group rule. If a
+                               non-unique group name is given, an exception is
+                               raised.
+    :param port_range_min:(int) The minimum port number in the range that is
+                          matched by the security group rule. If the protocol
+                          is TCP or UDP, this value must be less than or equal
+                          to the port_range_max attribute value. If nova is
+                          used by the cloud provider for security groups, then
+                          a value of None will be transformed to -1.
+    :param port_range_max:(int) The maximum port number in the range that is
+                          matched by the security group rule. The
+                          port_range_min attribute constrains the
+                          port_range_max attribute. If nova is used by the
+                          cloud provider for security groups, then a value of
+                          None will be transformed to -1.
+    :param protocol:(string) The protocol that is matched by the security group
+                    rule. Valid values are None, tcp, udp, and icmp.
+    :param remote_ip_prefix:(string) The remote IP prefix to be associated with
+                            this security group rule. This attribute matches
+                            the specified IP prefix as the source IP address of
+                            the IP packet.
+    :param remote_group_id:(string) The remote group ID to be associated with
+                           this security group rule.
+    :param direction:(string) Ingress or egress: The direction in which the
+                     security group rule is applied.
+    :param ethertype:(string) Must be IPv4 or IPv6, and addresses represented
+                     in CIDR must match the ingress or egress rules.
+    :param project_id:(string) Specify the project ID this security group will
+                      be created on (admin-only).
+
+    :returns: True on success.
+    """
 
-    # Create security group using neutron client
     try:
-        neutron_client.create_security_group_rule(json_body)
+        shade_client.create_security_group_rule(
+            secgroup_name_or_id, port_range_min=port_range_min,
+            port_range_max=port_range_max, protocol=protocol,
+            remote_ip_prefix=remote_ip_prefix, remote_group_id=remote_group_id,
+            direction=direction, ethertype=ethertype, project_id=project_id)
         return True
-    except Exception:  # pylint: disable=broad-except
-        log.exception("Impossible to create_security_group_rule,"
-                      "security group rule probably already exists")
+    except exc.OpenStackCloudException as op_exc:
+        log.error("Failed to create_security_group_rule(shade_client). "
+                  "Exception message: %s", op_exc.orig_message)
         return False
 
 
-def create_security_group_full(neutron_client, sg_name,
-                               sg_description):      # pragma: no cover
-    sg_id = get_security_group_id(neutron_client, sg_name)
-    if sg_id != '':
+def create_security_group_full(shade_client, sg_name,
+                               sg_description, project_id=None):
+    security_group = shade_client.get_security_group(sg_name)
+
+    if security_group:
         log.info("Using existing security group '%s'...", sg_name)
-    else:
-        log.info("Creating security group  '%s'...", sg_name)
-        SECGROUP = create_security_group(neutron_client,
-                                         sg_name,
-                                         sg_description)
-        if not SECGROUP:
-            log.error("Failed to create the security group...")
-            return None
-
-        sg_id = SECGROUP['id']
-
-        log.debug("Security group '%s' with ID=%s created successfully.",
-                  SECGROUP['name'], sg_id)
-
-        log.debug("Adding ICMP rules in security group '%s'...", sg_name)
-        if not create_secgroup_rule(neutron_client, sg_id,
-                                    'ingress', 'icmp'):
-            log.error("Failed to create the security group rule...")
-            return None
-
-        log.debug("Adding SSH rules in security group '%s'...", sg_name)
-        if not create_secgroup_rule(
-                neutron_client, sg_id, 'ingress', 'tcp', '22', '22'):
-            log.error("Failed to create the security group rule...")
-            return None
-
-        if not create_secgroup_rule(
-                neutron_client, sg_id, 'egress', 'tcp', '22', '22'):
-            log.error("Failed to create the security group rule...")
-            return None
-    return sg_id
+        return security_group['id']
+
+    log.info("Creating security group  '%s'...", sg_name)
+    try:
+        security_group = shade_client.create_security_group(
+            sg_name, sg_description, project_id=project_id)
+    except (exc.OpenStackCloudException,
+            exc.OpenStackCloudUnavailableFeature) as op_exc:
+        log.error("Error [create_security_group(shade_client, %s, %s)]. "
+                  "Exception message: %s", sg_name, sg_description,
+                  op_exc.orig_message)
+        return
+
+    log.debug("Security group '%s' with ID=%s created successfully.",
+              security_group['name'], security_group['id'])
+
+    log.debug("Adding ICMP rules in security group '%s'...", sg_name)
+    if not create_security_group_rule(shade_client, security_group['id'],
+                                      direction='ingress', protocol='icmp'):
+        log.error("Failed to create the security group rule...")
+        shade_client.delete_security_group(sg_name)
+        return
+
+    log.debug("Adding SSH rules in security group '%s'...", sg_name)
+    if not create_security_group_rule(shade_client, security_group['id'],
+                                      direction='ingress', protocol='tcp',
+                                      port_range_min='22',
+                                      port_range_max='22'):
+        log.error("Failed to create the security group rule...")
+        shade_client.delete_security_group(sg_name)
+        return
+
+    if not create_security_group_rule(shade_client, security_group['id'],
+                                      direction='egress', protocol='tcp',
+                                      port_range_min='22',
+                                      port_range_max='22'):
+        log.error("Failed to create the security group rule...")
+        shade_client.delete_security_group(sg_name)
+        return
+    return security_group['id']
 
 
 # *********************************************
@@ -757,6 +769,18 @@ def delete_image(glance_client, image_id):    # pragma: no cover
         return True
 
 
+def list_images(shade_client=None):
+    if shade_client is None:
+        shade_client = get_shade_client()
+
+    try:
+        return shade_client.list_images()
+    except exc.OpenStackCloudException as o_exc:
+        log.error("Error [list_images(shade_client)]."
+                  "Exception message, '%s'", o_exc.orig_message)
+        return False
+
+
 # *********************************************
 #   CINDER
 # *********************************************
index 357f66b..108ee17 100644 (file)
@@ -23,9 +23,11 @@ import logging
 import os
 import random
 import re
+import signal
 import socket
 import subprocess
 import sys
+import time
 
 import six
 from flask import jsonify
@@ -34,6 +36,8 @@ from oslo_serialization import jsonutils
 from oslo_utils import encodeutils
 
 import yardstick
+from yardstick.common import exceptions
+
 
 logger = logging.getLogger(__name__)
 logger.setLevel(logging.DEBUG)
@@ -136,6 +140,11 @@ def source_env(env_file):
     p = subprocess.Popen(". %s; env" % env_file, stdout=subprocess.PIPE,
                          shell=True)
     output = p.communicate()[0]
+
+    # sometimes output type would be binary_type, and it don't have splitlines
+    # method, so we need to decode
+    if isinstance(output, six.binary_type):
+        output = encodeutils.safe_decode(output)
     env = dict(line.split('=', 1) for line in output.splitlines() if '=' in line)
     os.environ.update(env)
     return env
@@ -400,15 +409,24 @@ class ErrorClass(object):
 
 
 class Timer(object):
-    def __init__(self):
+    def __init__(self, timeout=None):
         super(Timer, self).__init__()
         self.start = self.delta = None
+        self._timeout = int(timeout) if timeout else None
+
+    def _timeout_handler(self, *args):
+        raise exceptions.TimerTimeout(timeout=self._timeout)
 
     def __enter__(self):
         self.start = datetime.datetime.now()
+        if self._timeout:
+            signal.signal(signal.SIGALRM, self._timeout_handler)
+            signal.alarm(self._timeout)
         return self
 
     def __exit__(self, *_):
+        if self._timeout:
+            signal.alarm(0)
         self.delta = datetime.datetime.now() - self.start
 
     def __getattr__(self, item):
@@ -455,3 +473,22 @@ def open_relative_file(path, task_path):
         if e.errno == errno.ENOENT:
             return open(os.path.join(task_path, path))
         raise
+
+
+def wait_until_true(predicate, timeout=60, sleep=1, exception=None):
+    """Wait until callable predicate is evaluated as True
+
+    :param predicate: (func) callable deciding whether waiting should continue
+    :param timeout: (int) timeout in seconds how long should function wait
+    :param sleep: (int) polling interval for results in seconds
+    :param exception: exception instance to raise on timeout. If None is passed
+                      (default) then WaitTimeout exception is raised.
+    """
+    try:
+        with Timer(timeout=timeout):
+            while not predicate():
+                time.sleep(sleep)
+    except exceptions.TimerTimeout:
+        if exception and issubclass(exception, Exception):
+            raise exception  # pylint: disable=raising-bad-type
+        raise exceptions.WaitTimeout
index 70ce4ff..c538cee 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from __future__ import absolute_import
-from __future__ import print_function
-import sys
 import logging
 
 import re
 from itertools import product
+import IxNetwork
+
 
 log = logging.getLogger(__name__)
 
@@ -135,7 +134,6 @@ class IxNextgen(object):
             port.append(port0)
 
         cfg = {
-            'py_lib_path': tg_cfg["mgmt-interface"]["tg-config"]["py_lib_path"],
             'machine': tg_cfg["mgmt-interface"]["ip"],
             'port': tg_cfg["mgmt-interface"]["tg-config"]["tcl_port"],
             'chassis': tg_cfg["mgmt-interface"]["tg-config"]["ixchassis"],
@@ -186,7 +184,7 @@ class IxNextgen(object):
             self.set_random_ip_multi_attribute(ip, seeds[1], fixed_bits, random_mask, l3_count)
 
     def add_ip_header(self, params, version):
-        for it, ep, i in self.iter_over_get_lists('/traffic', 'trafficItem', "configElement", 1):
+        for _, ep, i in self.iter_over_get_lists('/traffic', 'trafficItem', "configElement", 1):
             iter1 = (v['outer_l3'] for v in params.values() if str(v['id']) == str(i))
             try:
                 l3 = next(iter1, {})
@@ -194,21 +192,13 @@ class IxNextgen(object):
             except (KeyError, IndexError):
                 continue
 
-            for ip, ip_bits, _ in self.iter_over_get_lists(ep, 'stack', 'field'):
+            for _, ip_bits, _ in self.iter_over_get_lists(ep, 'stack', 'field'):
                 self.set_random_ip_multi_attributes(ip_bits, version, seeds, l3)
 
         self.ixnet.commit()
 
     def _connect(self, tg_cfg):
         self._cfg = self.get_config(tg_cfg)
-
-        sys.path.append(self._cfg["py_lib_path"])
-        # Import IxNetwork after getting ixia lib path
-        try:
-            import IxNetwork
-        except ImportError:
-            raise
-
         self.ixnet = IxNetwork.IxNet()
 
         machine = self._cfg['machine']
@@ -292,7 +282,7 @@ class IxNextgen(object):
             self.update_ether_multi_attribute(ether, str(l2.get('srcmac', "00:00:00:00:00:01")))
 
     def ix_update_ether(self, params):
-        for ti, ep, index in self.iter_over_get_lists('/traffic', 'trafficItem',
+        for _, ep, index in self.iter_over_get_lists('/traffic', 'trafficItem',
                                                       "configElement", 1):
             iter1 = (v['outer_l2'] for v in params.values() if str(v['id']) == str(index))
             try:
@@ -300,7 +290,7 @@ class IxNextgen(object):
             except KeyError:
                 continue
 
-            for ip, ether, _ in self.iter_over_get_lists(ep, 'stack', 'field'):
+            for _, ether, _ in self.iter_over_get_lists(ep, 'stack', 'field'):
                 self.update_ether_multi_attributes(ether, l2)
 
         self.ixnet.commit()
index dc5c46a..0c0bf22 100644 (file)
@@ -27,6 +27,7 @@ from oslo_config import cfg
 from oslo_utils.encodeutils import safe_decode
 
 from yardstick import ssh
+from yardstick.common.exceptions import ResourceCommandError
 from yardstick.common.task_template import finalize_for_yaml
 from yardstick.common.utils import validate_non_string_sequence
 from yardstick.network_services.nfvi.collectd import AmqpConsumer
@@ -249,45 +250,46 @@ class ResourceProfile(object):
         if status != 0:
             LOG.error("cannot find OVS socket %s", socket_path)
 
+    def _start_rabbitmq(self, connection):
+        # Reset amqp queue
+        LOG.debug("reset and setup amqp to collect data from collectd")
+        # ensure collectd.conf.d exists to avoid error/warning
+        cmd_list = ["sudo mkdir -p /etc/collectd/collectd.conf.d",
+                    "sudo service rabbitmq-server restart",
+                    "sudo rabbitmqctl stop_app",
+                    "sudo rabbitmqctl reset",
+                    "sudo rabbitmqctl start_app",
+                    "sudo rabbitmqctl add_user admin admin",
+                    "sudo rabbitmqctl authenticate_user admin admin",
+                    "sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'"
+                    ]
+        for cmd in cmd_list:
+                exit_status, stdout, stderr = connection.execute(cmd)
+                if exit_status != 0:
+                    raise ResourceCommandError(command=cmd, stderr=stderr)
+
+        # check stdout for "sudo rabbitmqctl status" command
+        cmd = "sudo rabbitmqctl status"
+        _, stdout, stderr = connection.execute(cmd)
+        if not re.search("RabbitMQ", stdout):
+            LOG.error("rabbitmqctl status don't have RabbitMQ in running apps")
+            raise ResourceCommandError(command=cmd, stderr=stderr)
+
     def _start_collectd(self, connection, bin_path):
         LOG.debug("Starting collectd to collect NFVi stats")
-        connection.execute('sudo pkill -x -9 collectd')
         collectd_path = os.path.join(bin_path, "collectd", "sbin", "collectd")
         config_file_path = os.path.join(bin_path, "collectd", "etc")
+        self._prepare_collectd_conf(config_file_path)
+
+        connection.execute('sudo pkill -x -9 collectd')
         exit_status = connection.execute("which %s > /dev/null 2>&1" % collectd_path)[0]
         if exit_status != 0:
             LOG.warning("%s is not present disabling", collectd_path)
-            # disable auto-provisioning because it requires Internet access
-            # collectd_installer = os.path.join(bin_path, "collectd.sh")
-            # provision_tool(connection, collectd)
-            # http_proxy = os.environ.get('http_proxy', '')
-            # https_proxy = os.environ.get('https_proxy', '')
-            # connection.execute("sudo %s '%s' '%s'" % (
-            #     collectd_installer, http_proxy, https_proxy))
             return
         if "ovs_stats" in self.plugins:
             self._setup_ovs_stats(connection)
 
         LOG.debug("Starting collectd to collect NFVi stats")
-        # ensure collectd.conf.d exists to avoid error/warning
-        connection.execute("sudo mkdir -p /etc/collectd/collectd.conf.d")
-        self._prepare_collectd_conf(config_file_path)
-
-        # Reset amqp queue
-        LOG.debug("reset and setup amqp to collect data from collectd")
-        connection.execute("sudo rm -rf /var/lib/rabbitmq/mnesia/rabbit*")
-        connection.execute("sudo service rabbitmq-server start")
-        connection.execute("sudo rabbitmqctl stop_app")
-        connection.execute("sudo rabbitmqctl reset")
-        connection.execute("sudo rabbitmqctl start_app")
-        connection.execute("sudo service rabbitmq-server restart")
-
-        LOG.debug("Creating admin user for rabbitmq in order to collect data from collectd")
-        connection.execute("sudo rabbitmqctl delete_user guest")
-        connection.execute("sudo rabbitmqctl add_user admin admin")
-        connection.execute("sudo rabbitmqctl authenticate_user admin admin")
-        connection.execute("sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'")
-
         LOG.debug("Start collectd service..... %s second timeout", self.timeout)
         # intel_pmu plug requires large numbers of files open, so try to set
         # ulimit -n to a large value
@@ -299,9 +301,10 @@ class ResourceProfile(object):
         """ Start system agent for NFVi collection on host """
         if self.enable:
             try:
+                self._start_rabbitmq(self.connection)
                 self._start_collectd(self.connection, bin_path)
-            except Exception:
-                LOG.exception("Exception during collectd start")
+            except ResourceCommandError as e:
+                LOG.exception("Exception during collectd and rabbitmq start: %s", str(e))
                 raise
 
     def start(self):
index addbd9a..d8b9625 100644 (file)
@@ -35,7 +35,6 @@ from yardstick.common import utils
 from yardstick.network_services import constants
 from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkBindHelper, DpdkNode
 from yardstick.network_services.helpers.samplevnf_helper import MultiPortConfig
-from yardstick.network_services.helpers.samplevnf_helper import PortPairs
 from yardstick.network_services.nfvi.resource import ResourceProfile
 from yardstick.network_services.utils import get_nsb_option
 from yardstick.network_services.vnf_generic.vnf.base import GenericTrafficGen
@@ -79,7 +78,6 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
     APP_NAME = 'DpdkVnf'
     FIND_NET_CMD = "find /sys/class/net -lname '*{}*' -printf '%f'"
     NR_HUGEPAGES_PATH = '/proc/sys/vm/nr_hugepages'
-    HUGEPAGES_KB = 1024 * 1024 * 16
 
     @staticmethod
     def _update_packet_type(ip_pipeline_cfg, traffic_options):
@@ -118,7 +116,8 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
     def _setup_hugepages(self):
         meminfo = utils.read_meminfo(self.ssh_helper)
         hp_size_kb = int(meminfo['Hugepagesize'])
-        nr_hugepages = int(abs(self.HUGEPAGES_KB / hp_size_kb))
+        hugepages_gb = self.scenario_helper.all_options.get('hugepages_gb', 16)
+        nr_hugepages = int(abs(hugepages_gb * 1024 * 1024 / hp_size_kb))
         self.ssh_helper.execute('echo %s | sudo tee %s' %
                                 (nr_hugepages, self.NR_HUGEPAGES_PATH))
         hp = six.BytesIO()
@@ -657,49 +656,6 @@ class SampleVNF(GenericVNF):
         self.vnf_port_pairs = None
         self._vnf_process = None
 
-    def _build_ports(self):
-        self._port_pairs = PortPairs(self.vnfd_helper.interfaces)
-        self.networks = self._port_pairs.networks
-        self.uplink_ports = self.vnfd_helper.port_nums(self._port_pairs.uplink_ports)
-        self.downlink_ports = self.vnfd_helper.port_nums(self._port_pairs.downlink_ports)
-        self.my_ports = self.vnfd_helper.port_nums(self._port_pairs.all_ports)
-
-    def _get_route_data(self, route_index, route_type):
-        route_iter = iter(self.vnfd_helper.vdu0.get('nd_route_tbl', []))
-        for _ in range(route_index):
-            next(route_iter, '')
-        return next(route_iter, {}).get(route_type, '')
-
-    def _get_port0localip6(self):
-        return_value = self._get_route_data(0, 'network')
-        LOG.info("_get_port0localip6 : %s", return_value)
-        return return_value
-
-    def _get_port1localip6(self):
-        return_value = self._get_route_data(1, 'network')
-        LOG.info("_get_port1localip6 : %s", return_value)
-        return return_value
-
-    def _get_port0prefixlen6(self):
-        return_value = self._get_route_data(0, 'netmask')
-        LOG.info("_get_port0prefixlen6 : %s", return_value)
-        return return_value
-
-    def _get_port1prefixlen6(self):
-        return_value = self._get_route_data(1, 'netmask')
-        LOG.info("_get_port1prefixlen6 : %s", return_value)
-        return return_value
-
-    def _get_port0gateway6(self):
-        return_value = self._get_route_data(0, 'network')
-        LOG.info("_get_port0gateway6 : %s", return_value)
-        return return_value
-
-    def _get_port1gateway6(self):
-        return_value = self._get_route_data(1, 'network')
-        LOG.info("_get_port1gateway6 : %s", return_value)
-        return return_value
-
     def _start_vnf(self):
         self.queue_wrapper = QueueFileWrapper(self.q_in, self.q_out, self.VNF_PROMPT)
         name = "{}-{}-{}".format(self.name, self.APP_NAME, os.getpid())
index d69f860..e0c0db2 100644 (file)
@@ -15,18 +15,20 @@ import datetime
 import getpass
 import logging
 import pkg_resources
+import pprint
 import socket
 import tempfile
 import time
 
 from oslo_serialization import jsonutils
 from oslo_utils import encodeutils
-import shade
+from shade._heat import event_utils
 
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import constants as consts
 from yardstick.common import exceptions
 from yardstick.common import template_format
-from yardstick.common import constants as consts
+from yardstick.common import openstack_utils as op_utils
+
 
 log = logging.getLogger(__name__)
 
@@ -39,10 +41,11 @@ _DEPLOYED_STACKS = {}
 class HeatStack(object):
     """Represents a Heat stack (deployed template) """
 
-    def __init__(self, name):
+    def __init__(self, name, os_cloud_config=None):
         self.name = name
         self.outputs = {}
-        self._cloud = shade.openstack_cloud()
+        os_cloud_config = {} if not os_cloud_config else os_cloud_config
+        self._cloud = op_utils.get_shade_client(**os_cloud_config)
         self._stack = None
 
     def _update_stack_tracking(self):
@@ -63,6 +66,10 @@ class HeatStack(object):
 
         self._update_stack_tracking()
 
+    def get_failures(self):
+        return event_utils.get_events(self._cloud, self._stack.id,
+                                      event_args={'resource_status': 'FAILED'})
+
     def get(self):
         """Retrieves an existing stack from the target cloud
 
@@ -146,10 +153,12 @@ name (i.e. %s).
         # short hand for resources part of template
         self.resources = self._template['resources']
 
-    def __init__(self, name, template_file=None, heat_parameters=None):
+    def __init__(self, name, template_file=None, heat_parameters=None,
+                 os_cloud_config=None):
         self.name = name
         self.keystone_client = None
         self.heat_parameters = {}
+        self._os_cloud_config = {} if not os_cloud_config else os_cloud_config
 
         # heat_parameters is passed to heat in stack create, empty dict when
         # yardstick creates the template (no get_param in resources part)
@@ -616,7 +625,7 @@ name (i.e. %s).
         log.info("Creating stack '%s' START", self.name)
 
         start_time = time.time()
-        stack = HeatStack(self.name)
+        stack = HeatStack(self.name, os_cloud_config=self._os_cloud_config)
         stack.create(self._template, self.heat_parameters, block, timeout)
 
         if not block:
@@ -625,6 +634,9 @@ name (i.e. %s).
             return stack
 
         if stack.status != self.HEAT_STATUS_COMPLETE:
+            for event in stack.get_failures():
+                log.error("%s", event.resource_status_reason)
+            log.error(pprint.pformat(self._template))
             raise exceptions.HeatTemplateError(stack_name=self.name)
 
         log.info("Creating stack '%s' DONE in %d secs",
diff --git a/yardstick/tests/functional/common/messaging/test_messaging.py b/yardstick/tests/functional/common/messaging/test_messaging.py
new file mode 100644 (file)
index 0000000..9987434
--- /dev/null
@@ -0,0 +1,99 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import multiprocessing
+import time
+
+from yardstick.common.messaging import consumer
+from yardstick.common.messaging import payloads
+from yardstick.common.messaging import producer
+from yardstick.tests.functional import base
+
+
+TOPIC = 'topic_MQ'
+METHOD_INFO = 'info'
+
+
+class DummyPayload(payloads.Payload):
+    REQUIRED_FIELDS = {'version', 'data'}
+
+
+class DummyEndpoint(consumer.NotificationHandler):
+
+    def info(self, ctxt, **kwargs):
+        if ctxt['pid'] in self._ctx_pids:
+            self._queue.put('ID {}, data: {}, pid: {}'.format(
+                self._id, kwargs['data'], ctxt['pid']))
+
+
+class DummyConsumer(consumer.MessagingConsumer):
+
+    def __init__(self, _id, ctx_pids, queue):
+        self._id = _id
+        endpoints = [DummyEndpoint(_id, ctx_pids, queue)]
+        super(DummyConsumer, self).__init__(TOPIC, ctx_pids, endpoints)
+
+
+class DummyProducer(producer.MessagingProducer):
+    pass
+
+
+def _run_consumer(_id, ctx_pids, queue):
+    _consumer = DummyConsumer(_id, ctx_pids, queue)
+    _consumer.start_rpc_server()
+    _consumer.wait()
+
+
+class MessagingTestCase(base.BaseFunctionalTestCase):
+
+    @staticmethod
+    def _terminate_consumers(num_consumers, processes):
+        for i in range(num_consumers):
+            processes[i].terminate()
+
+    def test_run_five_consumers(self):
+        output_queue = multiprocessing.Queue()
+        num_consumers = 10
+        ctx_1 = 100001
+        ctx_2 = 100002
+        producers = [DummyProducer(TOPIC, pid=ctx_1),
+                     DummyProducer(TOPIC, pid=ctx_2)]
+
+        processes = []
+        for i in range(num_consumers):
+            processes.append(multiprocessing.Process(
+                name='consumer_{}'.format(i),
+                target=_run_consumer,
+                args=(i, [ctx_1, ctx_2], output_queue)))
+            processes[i].start()
+        self.addCleanup(self._terminate_consumers, num_consumers, processes)
+
+        time.sleep(2)  # Let consumers to create the listeners
+        for producer in producers:
+            for message in ['message 0', 'message 1']:
+                producer.send_message(METHOD_INFO,
+                                      DummyPayload(version=1, data=message))
+
+        time.sleep(2)  # Let consumers attend the calls
+        output = []
+        while not output_queue.empty():
+            output.append(output_queue.get(True, 1))
+
+        self.assertEqual(num_consumers * 4, len(output))
+        msg_template = 'ID {}, data: {}, pid: {}'
+        for i in range(num_consumers):
+            for ctx in [ctx_1, ctx_2]:
+                for message in ['message 0', 'message 1']:
+                    msg = msg_template.format(i, message, ctx)
+                    self.assertIn(msg, output)
diff --git a/yardstick/tests/integration/dummy-scenario-heat-context.yaml b/yardstick/tests/integration/dummy-scenario-heat-context.yaml
new file mode 100644 (file)
index 0000000..45a3995
--- /dev/null
@@ -0,0 +1,38 @@
+##############################################################################
+# Copyright (c) 2018 Intel
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{% set context_name = context_name or "demo" %}
+---
+# Sample Heat context config with Dummy context
+
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+  type: Dummy
+
+  runner:
+    type: Duration
+    duration: 5
+    interval: 1
+
+context:
+  name: {{ context_name }}
+  image: yardstick-image
+  flavor: yardstick-flavor
+  user: ubuntu
+
+  servers:
+    athena:
+      name: athena
+    ares:
+      name: ares
+
+  networks:
+    test:
+      name: test
diff --git a/yardstick/tests/unit/apiserver/resources/v1/test_testsuites.py b/yardstick/tests/unit/apiserver/resources/v1/test_testsuites.py
new file mode 100644 (file)
index 0000000..85c045f
--- /dev/null
@@ -0,0 +1,35 @@
+##############################################################################
+# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import mock
+
+import unittest
+
+from yardstick.tests.unit.apiserver import APITestCase
+from api.utils.thread import TaskThread
+
+
+class TestsuiteTestCase(APITestCase):
+
+    def test_run_test_suite(self):
+        if self.app is None:
+            unittest.skip('host config error')
+            return
+
+        TaskThread.start = mock.MagicMock()
+
+        url = 'yardstick/testsuites/action'
+        data = {
+            'action': 'run_test_suite',
+            'args': {
+                'opts': {},
+                'testsuite': 'opnfv_smoke'
+            }
+        }
+        resp = self._post(url, data)
+        self.assertEqual(resp.get('status'), 1)
diff --git a/yardstick/tests/unit/apiserver/resources/v2/test_images.py b/yardstick/tests/unit/apiserver/resources/v2/test_images.py
new file mode 100644 (file)
index 0000000..ab131ee
--- /dev/null
@@ -0,0 +1,46 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import mock
+
+import unittest
+
+from yardstick.tests.unit.apiserver import APITestCase
+from api.resources.v2.images import format_image_info
+
+
+class V2ImagesTestCase(APITestCase):
+    @mock.patch('yardstick.common.openstack_utils.list_images')
+    @mock.patch('yardstick.common.utils.source_env')
+    def test_get(self, _, mock_list_images):
+        if self.app is None:
+            unittest.skip('host config error')
+            return
+
+        single_image = mock.MagicMock()
+        single_image.name = 'yardstick-image'
+        single_image.size = 16384
+        single_image.status = 'active'
+        single_image.updated_at = '2018-04-08'
+
+        mock_list_images.return_value = [single_image]
+        url = 'api/v2/yardstick/images'
+        resp = self._get(url)
+        self.assertEqual(resp.get('status'), 1)
+
+
+class FormatImageInfoTestCase(unittest.TestCase):
+    def test_format_image_info(self):
+        image = mock.MagicMock()
+        image.name = 'yardstick-image'
+        image.size = 1048576
+        image.status = 'active'
+        image.updated_at = '2018-04-08'
+
+        image_dict = format_image_info(image)
+        self.assertEqual(image_dict.get('size'), 1)
index 18ea3c4..72e684a 100644 (file)
 
 import copy
 import os
-import unittest
-import mock
+import uuid
 
+import mock
+import unittest
 from xml.etree import ElementTree
+
+from yardstick import ssh
 from yardstick.benchmark.contexts.standalone import model
+from yardstick.common import exceptions
+from yardstick import constants
 from yardstick.network_services import utils
 
 
@@ -38,22 +43,19 @@ XML_SAMPLE_INTERFACE = """<?xml version="1.0"?>
 </domain>
 """
 
+
 class ModelLibvirtTestCase(unittest.TestCase):
 
     def setUp(self):
-        self.xml = ElementTree.ElementTree(
-            element=ElementTree.fromstring(XML_SAMPLE))
         self.pci_address_str = '0001:04:03.2'
         self.pci_address = utils.PciAddress(self.pci_address_str)
         self.mac = '00:00:00:00:00:01'
-        self._mock_write_xml = mock.patch.object(ElementTree.ElementTree,
-                                                 'write')
-        self.mock_write_xml = self._mock_write_xml.start()
-
+        self._mock_ssh = mock.Mock()
+        self.mock_ssh = self._mock_ssh.start()
         self.addCleanup(self._cleanup)
 
     def _cleanup(self):
-        self._mock_write_xml.stop()
+        self._mock_ssh.stop()
 
     # TODO: Remove mocking of yardstick.ssh.SSH (here and elsewhere)
     # In this case, we are mocking a param to be passed into other methods
@@ -67,20 +69,31 @@ class ModelLibvirtTestCase(unittest.TestCase):
         model.Libvirt.check_if_vm_exists_and_delete("vm_0", ssh_mock)
 
     def test_virsh_create_vm(self):
-        with mock.patch("yardstick.ssh.SSH") as ssh:
-            ssh_mock = mock.Mock(autospec=ssh.SSH)
-            ssh_mock.execute = mock.Mock(return_value=(0, "a", ""))
-            ssh.return_value = ssh_mock
-        # NOTE(ralonsoh): this test doesn't cover function execution.
-        model.Libvirt.virsh_create_vm(ssh_mock, "vm_0")
+        self.mock_ssh.execute = mock.Mock(return_value=(0, 0, 0))
+        model.Libvirt.virsh_create_vm(self.mock_ssh, 'vm_0')
+        self.mock_ssh.execute.assert_called_once_with('virsh create vm_0')
+
+    def test_virsh_create_vm_error(self):
+        self.mock_ssh.execute = mock.Mock(return_value=(1, 0, 'error_create'))
+        with self.assertRaises(exceptions.LibvirtCreateError) as exc:
+            model.Libvirt.virsh_create_vm(self.mock_ssh, 'vm_0')
+        self.assertEqual('Error creating the virtual machine. Error: '
+                         'error_create.', str(exc.exception))
+        self.mock_ssh.execute.assert_called_once_with('virsh create vm_0')
 
     def test_virsh_destroy_vm(self):
-        with mock.patch("yardstick.ssh.SSH") as ssh:
-            ssh_mock = mock.Mock(autospec=ssh.SSH)
-            ssh_mock.execute = mock.Mock(return_value=(0, "a", ""))
-            ssh.return_value = ssh_mock
-        # NOTE(ralonsoh): this test doesn't cover function execution.
-        model.Libvirt.virsh_destroy_vm("vm_0", ssh_mock)
+        self.mock_ssh.execute = mock.Mock(return_value=(0, 0, 0))
+        model.Libvirt.virsh_destroy_vm('vm_0', self.mock_ssh)
+        self.mock_ssh.execute.assert_called_once_with('virsh destroy vm_0')
+
+    @mock.patch.object(model, 'LOG')
+    def test_virsh_destroy_vm_error(self, mock_logger):
+        self.mock_ssh.execute = mock.Mock(return_value=(1, 0, 'error_destroy'))
+        mock_logger.warning = mock.Mock()
+        model.Libvirt.virsh_destroy_vm('vm_0', self.mock_ssh)
+        mock_logger.warning.assert_called_once_with(
+            'Error destroying VM %s. Error: %s', 'vm_0', 'error_destroy')
+        self.mock_ssh.execute.assert_called_once_with('virsh destroy vm_0')
 
     def test_add_interface_address(self):
         xml = ElementTree.ElementTree(
@@ -98,101 +111,164 @@ class ModelLibvirtTestCase(unittest.TestCase):
                          result.get('function'))
 
     def test_add_ovs_interfaces(self):
-        xml_input = mock.Mock()
-        with mock.patch.object(ElementTree, 'parse', return_value=self.xml) \
-                as mock_parse:
-            xml = copy.deepcopy(self.xml)
-            mock_parse.return_value = xml
-            model.Libvirt.add_ovs_interface(
-                '/usr/local', 0, self.pci_address_str, self.mac, xml_input)
-            mock_parse.assert_called_once_with(xml_input)
-            self.mock_write_xml.assert_called_once_with(xml_input)
-            interface = xml.find('devices').find('interface')
-            self.assertEqual('vhostuser', interface.get('type'))
-            mac = interface.find('mac')
-            self.assertEqual(self.mac, mac.get('address'))
-            source = interface.find('source')
-            self.assertEqual('unix', source.get('type'))
-            self.assertEqual('/usr/local/var/run/openvswitch/dpdkvhostuser0',
-                             source.get('path'))
-            self.assertEqual('client', source.get('mode'))
-            _model = interface.find('model')
-            self.assertEqual('virtio', _model.get('type'))
-            driver = interface.find('driver')
-            self.assertEqual('4', driver.get('queues'))
-            host = driver.find('host')
-            self.assertEqual('off', host.get('mrg_rxbuf'))
-            self.assertIsNotNone(interface.find('address'))
+        xml_input = copy.deepcopy(XML_SAMPLE)
+        xml_output = model.Libvirt.add_ovs_interface(
+            '/usr/local', 0, self.pci_address_str, self.mac, xml_input)
+
+        root = ElementTree.fromstring(xml_output)
+        et_out = ElementTree.ElementTree(element=root)
+        interface = et_out.find('devices').find('interface')
+        self.assertEqual('vhostuser', interface.get('type'))
+        mac = interface.find('mac')
+        self.assertEqual(self.mac, mac.get('address'))
+        source = interface.find('source')
+        self.assertEqual('unix', source.get('type'))
+        self.assertEqual('/usr/local/var/run/openvswitch/dpdkvhostuser0',
+                         source.get('path'))
+        self.assertEqual('client', source.get('mode'))
+        _model = interface.find('model')
+        self.assertEqual('virtio', _model.get('type'))
+        driver = interface.find('driver')
+        self.assertEqual('4', driver.get('queues'))
+        host = driver.find('host')
+        self.assertEqual('off', host.get('mrg_rxbuf'))
+        self.assertIsNotNone(interface.find('address'))
 
     def test_add_sriov_interfaces(self):
-        xml_input = mock.Mock()
-        with mock.patch.object(ElementTree, 'parse', return_value=self.xml) \
-                as mock_parse:
-            xml = copy.deepcopy(self.xml)
-            mock_parse.return_value = xml
-            vm_pci = '0001:05:04.2'
-            model.Libvirt.add_sriov_interfaces(
-                vm_pci, self.pci_address_str, self.mac, xml_input)
-            mock_parse.assert_called_once_with(xml_input)
-            self.mock_write_xml.assert_called_once_with(xml_input)
-            interface = xml.find('devices').find('interface')
-            self.assertEqual('yes', interface.get('managed'))
-            self.assertEqual('hostdev', interface.get('type'))
-            mac = interface.find('mac')
-            self.assertEqual(self.mac, mac.get('address'))
-            source = interface.find('source')
-            source_address = source.find('address')
-            self.assertIsNotNone(source.find('address'))
-
-            self.assertEqual('pci', source_address.get('type'))
-            self.assertEqual('0x' + self.pci_address_str.split(':')[0],
-                             source_address.get('domain'))
-            self.assertEqual('0x' + self.pci_address_str.split(':')[1],
-                             source_address.get('bus'))
-            self.assertEqual('0x' + self.pci_address_str.split(':')[2].split('.')[0],
-                             source_address.get('slot'))
-            self.assertEqual('0x' + self.pci_address_str.split(':')[2].split('.')[1],
-                             source_address.get('function'))
-
-            interface_address = interface.find('address')
-            self.assertEqual('pci', interface_address.get('type'))
-            self.assertEqual('0x' + vm_pci.split(':')[0],
-                             interface_address.get('domain'))
-            self.assertEqual('0x' + vm_pci.split(':')[1],
-                             interface_address.get('bus'))
-            self.assertEqual('0x' + vm_pci.split(':')[2].split('.')[0],
-                             interface_address.get('slot'))
-            self.assertEqual('0x' + vm_pci.split(':')[2].split('.')[1],
-                             interface_address.get('function'))
+        xml_input = copy.deepcopy(XML_SAMPLE)
+        vm_pci = '0001:05:04.2'
+        xml_output = model.Libvirt.add_sriov_interfaces(
+            vm_pci, self.pci_address_str, self.mac, xml_input)
+        root = ElementTree.fromstring(xml_output)
+        et_out = ElementTree.ElementTree(element=root)
+        interface = et_out.find('devices').find('interface')
+        self.assertEqual('yes', interface.get('managed'))
+        self.assertEqual('hostdev', interface.get('type'))
+        mac = interface.find('mac')
+        self.assertEqual(self.mac, mac.get('address'))
+        source = interface.find('source')
+        source_address = source.find('address')
+        self.assertIsNotNone(source.find('address'))
+
+        self.assertEqual('pci', source_address.get('type'))
+        self.assertEqual('0x' + self.pci_address_str.split(':')[0],
+                         source_address.get('domain'))
+        self.assertEqual('0x' + self.pci_address_str.split(':')[1],
+                         source_address.get('bus'))
+        self.assertEqual('0x' + self.pci_address_str.split(':')[2].split('.')[0],
+                         source_address.get('slot'))
+        self.assertEqual('0x' + self.pci_address_str.split(':')[2].split('.')[1],
+                         source_address.get('function'))
+
+        interface_address = interface.find('address')
+        self.assertEqual('pci', interface_address.get('type'))
+        self.assertEqual('0x' + vm_pci.split(':')[0],
+                         interface_address.get('domain'))
+        self.assertEqual('0x' + vm_pci.split(':')[1],
+                         interface_address.get('bus'))
+        self.assertEqual('0x' + vm_pci.split(':')[2].split('.')[0],
+                         interface_address.get('slot'))
+        self.assertEqual('0x' + vm_pci.split(':')[2].split('.')[1],
+                         interface_address.get('function'))
 
     def test_create_snapshot_qemu(self):
-        result = "/var/lib/libvirt/images/0.qcow2"
-        with mock.patch("yardstick.ssh.SSH") as ssh:
-            ssh_mock = mock.Mock(autospec=ssh.SSH)
-            ssh_mock.execute = \
-                mock.Mock(return_value=(0, "a", ""))
-            ssh.return_value = ssh_mock
-        image = model.Libvirt.create_snapshot_qemu(ssh_mock, "0", "ubuntu.img")
-        self.assertEqual(image, result)
-
-    @mock.patch.object(model.Libvirt, 'pin_vcpu_for_perf')
-    @mock.patch.object(model.Libvirt, 'create_snapshot_qemu')
+        self.mock_ssh.execute = mock.Mock(return_value=(0, 0, 0))
+        index = 1
+        vm_image = '/var/lib/libvirt/images/%s.qcow2' % index
+        base_image = '/tmp/base_image'
+
+        model.Libvirt.create_snapshot_qemu(self.mock_ssh, index, base_image)
+        self.mock_ssh.execute.assert_has_calls([
+            mock.call('rm -- "%s"' % vm_image),
+            mock.call('test -r %s' % base_image),
+            mock.call('qemu-img create -f qcow2 -o backing_file=%s %s' %
+                      (base_image, vm_image))
+        ])
+
+    @mock.patch.object(os.path, 'basename', return_value='base_image')
+    @mock.patch.object(os.path, 'normpath')
+    @mock.patch.object(os, 'access', return_value=True)
+    def test_create_snapshot_qemu_no_image_remote(self,
+            mock_os_access, mock_normpath, mock_basename):
+        self.mock_ssh.execute = mock.Mock(
+            side_effect=[(0, 0, 0), (1, 0, 0), (0, 0, 0), (0, 0, 0)])
+        index = 1
+        vm_image = '/var/lib/libvirt/images/%s.qcow2' % index
+        base_image = '/tmp/base_image'
+        mock_normpath.return_value = base_image
+
+        model.Libvirt.create_snapshot_qemu(self.mock_ssh, index, base_image)
+        self.mock_ssh.execute.assert_has_calls([
+            mock.call('rm -- "%s"' % vm_image),
+            mock.call('test -r %s' % base_image),
+            mock.call('mv -- "/tmp/%s" "%s"' % ('base_image', base_image)),
+            mock.call('qemu-img create -f qcow2 -o backing_file=%s %s' %
+                      (base_image, vm_image))
+        ])
+        mock_os_access.assert_called_once_with(base_image, os.R_OK)
+        mock_normpath.assert_called_once_with(base_image)
+        mock_basename.assert_has_calls([mock.call(base_image)])
+        self.mock_ssh.put_file.assert_called_once_with(base_image,
+                                                       '/tmp/base_image')
+
+    @mock.patch.object(os, 'access', return_value=False)
+    def test_create_snapshot_qemu_no_image_local(self, mock_os_access):
+        self.mock_ssh.execute = mock.Mock(side_effect=[(0, 0, 0), (1, 0, 0)])
+        base_image = '/tmp/base_image'
+
+        with self.assertRaises(exceptions.LibvirtQemuImageBaseImageNotPresent):
+            model.Libvirt.create_snapshot_qemu(self.mock_ssh, 3, base_image)
+        mock_os_access.assert_called_once_with(base_image, os.R_OK)
+
+    def test_create_snapshot_qemu_error_qemuimg_command(self):
+        self.mock_ssh.execute = mock.Mock(
+            side_effect=[(0, 0, 0), (0, 0, 0), (1, 0, 0)])
+        index = 1
+        vm_image = '/var/lib/libvirt/images/%s.qcow2' % index
+        base_image = '/tmp/base_image'
+
+        with self.assertRaises(exceptions.LibvirtQemuImageCreateError):
+            model.Libvirt.create_snapshot_qemu(self.mock_ssh, index,
+                                               base_image)
+        self.mock_ssh.execute.assert_has_calls([
+            mock.call('rm -- "%s"' % vm_image),
+            mock.call('test -r %s' % base_image),
+            mock.call('qemu-img create -f qcow2 -o backing_file=%s %s' %
+                      (base_image, vm_image))
+        ])
+
+    @mock.patch.object(model.Libvirt, 'pin_vcpu_for_perf', return_value='4,5')
+    @mock.patch.object(model.Libvirt, 'create_snapshot_qemu',
+                       return_value='qemu_image')
     def test_build_vm_xml(self, mock_create_snapshot_qemu,
-                          *args):
-        # NOTE(ralonsoh): this test doesn't cover function execution. This test
-        # should also check mocked function calls.
-        cfg_file = 'test_config_file.cfg'
-        self.addCleanup(os.remove, cfg_file)
-        result = [4]
-        with mock.patch("yardstick.ssh.SSH") as ssh:
-            ssh_mock = mock.Mock(autospec=ssh.SSH)
-            ssh_mock.execute = \
-                mock.Mock(return_value=(0, "a", ""))
-            ssh.return_value = ssh_mock
-        mock_create_snapshot_qemu.return_value = "0.img"
-
-        status = model.Libvirt.build_vm_xml(ssh_mock, {}, cfg_file, 'vm_0', 0)
-        self.assertEqual(status[0], result[0])
+                          mock_pin_vcpu_for_perf):
+        extra_specs = {'hw:cpu_cores': '4',
+                       'hw:cpu_sockets': '3',
+                       'hw:cpu_threads': '2',
+                       'cputune': 'cool'}
+        flavor = {'ram': '1024',
+                  'extra_specs': extra_specs,
+                  'hw_socket': '1',
+                  'images': 'images'}
+        mac = model.StandaloneContextHelper.get_mac_address(0x00)
+        _uuid = uuid.uuid4()
+        connection = mock.Mock()
+        with mock.patch.object(model.StandaloneContextHelper,
+                               'get_mac_address', return_value=mac) as \
+                mock_get_mac_address, \
+                mock.patch.object(uuid, 'uuid4', return_value=_uuid):
+            xml_out, mac = model.Libvirt.build_vm_xml(
+                connection, flavor, 'vm_name', 100)
+
+        xml_ref = model.VM_TEMPLATE.format(vm_name='vm_name',
+            random_uuid=_uuid, mac_addr=mac, memory='1024', vcpu='8', cpu='4',
+            numa_cpus='0-7', socket='3', threads='2',
+            vm_image='qemu_image', cpuset='4,5', cputune='cool')
+        self.assertEqual(xml_ref, xml_out)
+        mock_get_mac_address.assert_called_once_with(0x00)
+        mock_create_snapshot_qemu.assert_called_once_with(
+            connection, 100, 'images')
+        mock_pin_vcpu_for_perf.assert_called_once_with(connection, '1')
 
     # TODO: Edit this test to test state instead of output
     # update_interrupts_hugepages_perf does not return anything
@@ -398,43 +474,80 @@ class ServerTestCase(unittest.TestCase):
 
 class OvsDeployTestCase(unittest.TestCase):
 
-    NETWORKS = {
-        'mgmt': {'cidr': '152.16.100.10/24'},
-        'private_0': {
-            'phy_port': "0000:05:00.0",
-            'vpci': "0000:00:07.0",
-            'driver': 'i40e',
-            'mac': '',
-            'cidr': '152.16.100.10/24',
-            'gateway_ip': '152.16.100.20'},
-        'public_0': {
-            'phy_port': "0000:05:00.1",
-            'vpci': "0000:00:08.0",
-            'driver': 'i40e',
-            'mac': '',
-            'cidr': '152.16.40.10/24',
-            'gateway_ip': '152.16.100.20'}
-    }
-
-    @mock.patch('yardstick.ssh.SSH')
-    def setUp(self, mock_ssh):
-        self.ovs_deploy = model.OvsDeploy(mock_ssh, '/tmp/dpdk-devbind.py', {})
+    OVS_DETAILS = {'version': {'ovs': 'ovs_version', 'dpdk': 'dpdk_version'}}
 
-    def test___init__(self):
-        self.assertIsNotNone(self.ovs_deploy.connection)
-
-    @mock.patch('yardstick.benchmark.contexts.standalone.model.os')
-    def test_prerequisite(self, *args):
-        # NOTE(ralonsoh): this test should check mocked function calls.
-        self.ovs_deploy.helper = mock.Mock()
-        self.assertIsNone(self.ovs_deploy.prerequisite())
-
-    @mock.patch('yardstick.benchmark.contexts.standalone.model.os')
-    def test_prerequisite_2(self, *args):
-        # NOTE(ralonsoh): this test should check mocked function calls. Rename
-        # this test properly.
-        self.ovs_deploy.helper = mock.Mock()
-        self.ovs_deploy.connection.execute = mock.Mock(
-            return_value=(1, '1.2.3.4 00:00:00:00:00:01', ''))
-        self.ovs_deploy.prerequisite = mock.Mock()
-        self.assertIsNone(self.ovs_deploy.ovs_deploy())
+    def setUp(self):
+        self._mock_ssh = mock.patch.object(ssh, 'SSH')
+        self.mock_ssh = self._mock_ssh.start()
+        self.ovs_deploy = model.OvsDeploy(self.mock_ssh,
+                                          '/tmp/dpdk-devbind.py',
+                                          self.OVS_DETAILS)
+        self._mock_path_isfile = mock.patch.object(os.path, 'isfile')
+        self._mock_path_join = mock.patch.object(os.path, 'join')
+        self.mock_path_isfile = self._mock_path_isfile.start()
+        self.mock_path_join = self._mock_path_join.start()
+
+        self.addCleanup(self._stop_mock)
+
+    def _stop_mock(self):
+        self._mock_ssh.stop()
+        self._mock_path_isfile.stop()
+        self._mock_path_join.stop()
+
+    @mock.patch.object(model.StandaloneContextHelper, 'install_req_libs')
+    def test_prerequisite(self, mock_install_req_libs):
+        pkgs = ["git", "build-essential", "pkg-config", "automake",
+                "autotools-dev", "libltdl-dev", "cmake", "libnuma-dev",
+                "libpcap-dev"]
+        self.ovs_deploy.prerequisite()
+        mock_install_req_libs.assert_called_once_with(
+            self.ovs_deploy.connection, pkgs)
+
+    def test_ovs_deploy_no_file(self):
+        self.mock_path_isfile.return_value = False
+        mock_file = mock.Mock()
+        self.mock_path_join.return_value = mock_file
+
+        self.ovs_deploy.ovs_deploy()
+        self.mock_path_isfile.assert_called_once_with(mock_file)
+        self.mock_path_join.assert_called_once_with(
+            constants.YARDSTICK_ROOT_PATH,
+            'yardstick/resources/scripts/install/',
+            self.ovs_deploy.OVS_DEPLOY_SCRIPT)
+
+    @mock.patch.object(os.environ, 'get', return_value='test_proxy')
+    def test_ovs_deploy(self, mock_env_get):
+        self.mock_path_isfile.return_value = True
+        mock_deploy_file = mock.Mock()
+        mock_remove_ovs_deploy = mock.Mock()
+        self.mock_path_join.side_effect = [mock_deploy_file,
+                                           mock_remove_ovs_deploy]
+        dpdk_version = self.OVS_DETAILS['version']['dpdk']
+        ovs_version = self.OVS_DETAILS['version']['ovs']
+
+        with mock.patch.object(self.ovs_deploy.connection, 'put') as \
+                mock_put, \
+                mock.patch.object(self.ovs_deploy.connection, 'execute') as \
+                mock_execute, \
+                mock.patch.object(self.ovs_deploy, 'prerequisite'):
+            mock_execute.return_value = (0, 0, 0)
+            self.ovs_deploy.ovs_deploy()
+
+            self.mock_path_isfile.assert_called_once_with(mock_deploy_file)
+            self.mock_path_join.assert_has_calls([
+                mock.call(constants.YARDSTICK_ROOT_PATH,
+                          'yardstick/resources/scripts/install/',
+                          self.ovs_deploy.OVS_DEPLOY_SCRIPT),
+                mock.call(self.ovs_deploy.bin_path,
+                          self.ovs_deploy.OVS_DEPLOY_SCRIPT)
+            ])
+            mock_put.assert_called_once_with(mock_deploy_file,
+                                             mock_remove_ovs_deploy)
+            cmd = ("sudo -E %(remote_ovs_deploy)s --ovs='%(ovs_version)s' "
+                   "--dpdk='%(dpdk_version)s' -p='%(proxy)s'" %
+                   {'remote_ovs_deploy': mock_remove_ovs_deploy,
+                    'ovs_version': ovs_version,
+                    'dpdk_version': dpdk_version,
+                    'proxy': 'test_proxy'})
+            mock_execute.assert_called_once_with(cmd)
+            mock_env_get.assert_called_once_with('http_proxy', '')
index 0223fd3..bc3bb73 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import io
 import os
 
 import mock
+import six
 import unittest
 
+from yardstick.benchmark.contexts.standalone import model
 from yardstick.benchmark.contexts.standalone import ovs_dpdk
+from yardstick.common import exceptions
+from yardstick.network_services import utils
 
 
 class OvsDpdkContextTestCase(unittest.TestCase):
@@ -27,7 +32,6 @@ class OvsDpdkContextTestCase(unittest.TestCase):
     NODES_DUPLICATE_SAMPLE = "nodes_duplicate_sample.yaml"
 
     NETWORKS = {
-        'mgmt': {'cidr': '152.16.100.10/24'},
         'private_0': {
             'phy_port': "0000:05:00.0",
             'vpci': "0000:00:07.0",
@@ -82,15 +86,43 @@ class OvsDpdkContextTestCase(unittest.TestCase):
         self.assertIsNone(self.ovs_dpdk.init(ATTRS))
 
     def test_setup_ovs(self):
-        with mock.patch("yardstick.ssh.SSH") as ssh:
-            ssh_mock = mock.Mock(autospec=ssh.SSH)
-            ssh_mock.execute = \
-                mock.Mock(return_value=(0, "a", ""))
-            ssh.return_value = ssh_mock
-            self.ovs_dpdk.connection = ssh_mock
-            self.ovs_dpdk.networks = self.NETWORKS
-            self.ovs_dpdk.ovs_properties = {}
-            self.assertIsNone(self.ovs_dpdk.setup_ovs())
+        fake_path = '/fake_path'
+        fake_dpdk_nic_bind = 'dpdk_tool.py'
+        self.ovs_dpdk.ovs_properties = {'vpath': fake_path}
+        self.ovs_dpdk.dpdk_devbind = fake_dpdk_nic_bind
+        self.ovs_dpdk.networks = self.NETWORKS
+        self.ovs_dpdk.connection = mock.Mock()
+        self.ovs_dpdk.connection.execute = mock.Mock(return_value=(0, 0, 0))
+        create_from = fake_path + '/etc/openvswitch/conf.db'
+        create_to = fake_path + '/share/openvswitch/vswitch.ovsschema'
+        cmd_list = [
+            'killall -r "ovs.*" -q | true',
+            'mkdir -p {0}/etc/openvswitch'.format(fake_path),
+            'mkdir -p {0}/var/run/openvswitch'.format(fake_path),
+            'rm {0}/etc/openvswitch/conf.db | true'.format(fake_path),
+            'ovsdb-tool create {0} {1}'.format(create_from, create_to),
+            'modprobe vfio-pci',
+            'chmod a+x /dev/vfio',
+            'chmod 0666 /dev/vfio/*',
+            '{0} --force -b vfio-pci {1}'.format(fake_dpdk_nic_bind,
+                self.ovs_dpdk.networks['private_0']['phy_port']),
+            '{0} --force -b vfio-pci {1}'.format(fake_dpdk_nic_bind,
+                self.ovs_dpdk.networks['public_0']['phy_port'])
+        ]
+        calls = [mock.call(cmd, timeout=self.ovs_dpdk.CMD_TIMEOUT)
+                 for cmd in cmd_list]
+
+        self.ovs_dpdk.setup_ovs()
+        self.ovs_dpdk.connection.execute.assert_has_calls(calls,
+                                                          any_order=True)
+
+    def test_setup_ovs_exception(self):
+        self.ovs_dpdk.networks = self.NETWORKS
+        self.ovs_dpdk.connection = mock.Mock()
+        self.ovs_dpdk.connection.execute = mock.Mock(return_value=(1, 0, 0))
+
+        with self.assertRaises(exceptions.OVSSetupError):
+            self.ovs_dpdk.setup_ovs()
 
     def test_start_ovs_serverswitch(self):
         with mock.patch("yardstick.ssh.SSH") as ssh:
@@ -129,34 +161,45 @@ class OvsDpdkContextTestCase(unittest.TestCase):
        self.ovs_dpdk.wait_for_vswitchd = 0
        self.assertIsNone(self.ovs_dpdk.cleanup_ovs_dpdk_env())
 
-    @mock.patch('yardstick.benchmark.contexts.standalone.model.OvsDeploy')
-    def test_check_ovs_dpdk_env(self, mock_ovs):
-        with mock.patch("yardstick.ssh.SSH") as ssh:
-            ssh_mock = mock.Mock(autospec=ssh.SSH)
-            ssh_mock.execute = \
-                mock.Mock(return_value=(1, "a", ""))
-            ssh.return_value = ssh_mock
-            self.ovs_dpdk.connection = ssh_mock
-            self.ovs_dpdk.networks = self.NETWORKS
-            self.ovs_dpdk.ovs_properties = {
-                'version': {'ovs': '2.7.0', 'dpdk': '16.11.1'}
-            }
-            self.ovs_dpdk.wait_for_vswitchd = 0
-            self.ovs_dpdk.cleanup_ovs_dpdk_env = mock.Mock()
-            self.assertIsNone(self.ovs_dpdk.check_ovs_dpdk_env())
-            self.ovs_dpdk.ovs_properties = {
-                'version': {'ovs': '2.0.0'}
-            }
-            self.ovs_dpdk.wait_for_vswitchd = 0
-            self.cleanup_ovs_dpdk_env = mock.Mock()
-            mock_ovs.deploy = mock.Mock()
-            # NOTE(elfoley): Check for a specific Exception
-            self.assertRaises(Exception, self.ovs_dpdk.check_ovs_dpdk_env)
+    @mock.patch.object(ovs_dpdk.OvsDpdkContext, '_check_hugepages')
+    @mock.patch.object(utils, 'get_nsb_option')
+    @mock.patch.object(model.OvsDeploy, 'ovs_deploy')
+    def test_check_ovs_dpdk_env(self, mock_ovs_deploy, mock_get_nsb_option,
+                                mock_check_hugepages):
+        self.ovs_dpdk.connection = mock.Mock()
+        self.ovs_dpdk.connection.execute = mock.Mock(
+            return_value=(1, 0, 0))
+        self.ovs_dpdk.networks = self.NETWORKS
+        self.ovs_dpdk.ovs_properties = {
+            'version': {'ovs': '2.7.0', 'dpdk': '16.11.1'}
+        }
+        self.ovs_dpdk.wait_for_vswitchd = 0
+        self.ovs_dpdk.cleanup_ovs_dpdk_env = mock.Mock()
+        mock_get_nsb_option.return_value = 'fake_path'
+
+        self.ovs_dpdk.check_ovs_dpdk_env()
+        mock_ovs_deploy.assert_called_once()
+        mock_check_hugepages.assert_called_once()
+        mock_get_nsb_option.assert_called_once_with('bin_path')
+
+    @mock.patch.object(ovs_dpdk.OvsDpdkContext, '_check_hugepages')
+    def test_check_ovs_dpdk_env_wrong_version(self, mock_check_hugepages):
+        self.ovs_dpdk.connection = mock.Mock()
+        self.ovs_dpdk.connection.execute = mock.Mock(
+            return_value=(1, 0, 0))
+        self.ovs_dpdk.networks = self.NETWORKS
+        self.ovs_dpdk.ovs_properties = {
+            'version': {'ovs': '0.0.1', 'dpdk': '9.8.7'}
+        }
+        self.ovs_dpdk.wait_for_vswitchd = 0
+        self.ovs_dpdk.cleanup_ovs_dpdk_env = mock.Mock()
 
-    @mock.patch('yardstick.ssh.SSH')
-    def test_deploy(self, mock_ssh):
-        mock_ssh.execute.return_value = 0, "a", ""
+        with self.assertRaises(exceptions.OVSUnsupportedVersion):
+            self.ovs_dpdk.check_ovs_dpdk_env()
+        mock_check_hugepages.assert_called_once()
 
+    @mock.patch('yardstick.ssh.SSH')
+    def test_deploy(self, *args):
         self.ovs_dpdk.vm_deploy = False
         self.assertIsNone(self.ovs_dpdk.deploy())
 
@@ -174,21 +217,19 @@ class OvsDpdkContextTestCase(unittest.TestCase):
         # output.
         self.assertIsNone(self.ovs_dpdk.deploy())
 
-    @mock.patch('yardstick.benchmark.contexts.standalone.model.Libvirt')
-    @mock.patch('yardstick.ssh.SSH')
-    def test_undeploy(self, mock_ssh, *args):
-        mock_ssh.execute.return_value = 0, "a", ""
-
-        self.ovs_dpdk.vm_deploy = False
-        self.assertIsNone(self.ovs_dpdk.undeploy())
-
+    @mock.patch.object(model.Libvirt, 'check_if_vm_exists_and_delete')
+    def test_undeploy(self, mock_libvirt):
         self.ovs_dpdk.vm_deploy = True
-        self.ovs_dpdk.connection = mock_ssh
+        self.ovs_dpdk.connection = mock.Mock()
         self.ovs_dpdk.vm_names = ['vm_0', 'vm_1']
         self.ovs_dpdk.drivers = ['vm_0', 'vm_1']
         self.ovs_dpdk.cleanup_ovs_dpdk_env = mock.Mock()
         self.ovs_dpdk.networks = self.NETWORKS
-        self.assertIsNone(self.ovs_dpdk.undeploy())
+        self.ovs_dpdk.undeploy()
+        mock_libvirt.assert_has_calls([
+            mock.call(self.ovs_dpdk.vm_names[0], self.ovs_dpdk.connection),
+            mock.call(self.ovs_dpdk.vm_names[1], self.ovs_dpdk.connection)
+        ])
 
     def _get_file_abspath(self, filename):
         curr_path = os.path.dirname(os.path.abspath(__file__))
@@ -310,34 +351,28 @@ class OvsDpdkContextTestCase(unittest.TestCase):
         self.ovs_dpdk.get_vf_datas = mock.Mock(return_value="")
         self.assertIsNone(self.ovs_dpdk.configure_nics_for_ovs_dpdk())
 
-    @mock.patch('yardstick.benchmark.contexts.standalone.ovs_dpdk.Libvirt')
-    def test__enable_interfaces(self, *args):
-        with mock.patch("yardstick.ssh.SSH") as ssh:
-            ssh_mock = mock.Mock(autospec=ssh.SSH)
-            ssh_mock.execute = \
-                mock.Mock(return_value=(0, "a", ""))
-            ssh.return_value = ssh_mock
+    @mock.patch.object(model.Libvirt, 'add_ovs_interface')
+    def test__enable_interfaces(self, mock_add_ovs_interface):
         self.ovs_dpdk.vm_deploy = True
-        self.ovs_dpdk.connection = ssh_mock
+        self.ovs_dpdk.connection = mock.Mock()
         self.ovs_dpdk.vm_names = ['vm_0', 'vm_1']
         self.ovs_dpdk.drivers = []
         self.ovs_dpdk.networks = self.NETWORKS
+        self.ovs_dpdk.ovs_properties = {'vpath': 'fake_path'}
         self.ovs_dpdk.get_vf_datas = mock.Mock(return_value="")
-        self.assertIsNone(self.ovs_dpdk._enable_interfaces(
-            0, ["private_0"], 'test'))
-
-    @mock.patch('yardstick.benchmark.contexts.standalone.model.Server')
-    @mock.patch('yardstick.benchmark.contexts.standalone.ovs_dpdk.Libvirt')
-    def test_setup_ovs_dpdk_context(self, mock_libvirt, *args):
-        with mock.patch("yardstick.ssh.SSH") as ssh:
-            ssh_mock = mock.Mock(autospec=ssh.SSH)
-            ssh_mock.execute = \
-                mock.Mock(return_value=(0, "a", ""))
-            ssh_mock.put = \
-                mock.Mock(return_value=(0, "a", ""))
-            ssh.return_value = ssh_mock
+        self.ovs_dpdk._enable_interfaces(0, ["private_0"], 'test')
+        mock_add_ovs_interface.assert_called_once_with(
+            'fake_path', 0, self.NETWORKS['private_0']['vpci'],
+            self.NETWORKS['private_0']['mac'], 'test')
+
+    @mock.patch.object(model.Libvirt, 'write_file')
+    @mock.patch.object(model.Libvirt, 'build_vm_xml')
+    @mock.patch.object(model.Libvirt, 'check_if_vm_exists_and_delete')
+    @mock.patch.object(model.Libvirt, 'virsh_create_vm')
+    def test_setup_ovs_dpdk_context(self, mock_create_vm, mock_check_if_exists,
+                                    mock_build_xml, mock_write_file):
         self.ovs_dpdk.vm_deploy = True
-        self.ovs_dpdk.connection = ssh_mock
+        self.ovs_dpdk.connection = mock.Mock()
         self.ovs_dpdk.vm_names = ['vm_0', 'vm_1']
         self.ovs_dpdk.drivers = []
         self.ovs_dpdk.servers = {
@@ -353,11 +388,64 @@ class OvsDpdkContextTestCase(unittest.TestCase):
         self.ovs_dpdk.host_mgmt = {}
         self.ovs_dpdk.flavor = {}
         self.ovs_dpdk.configure_nics_for_ovs_dpdk = mock.Mock(return_value="")
-        mock_libvirt.build_vm_xml.return_value = [6, "00:00:00:00:00:01"]
-        self.ovs_dpdk._enable_interfaces = mock.Mock(return_value="")
-        mock_libvirt.virsh_create_vm.return_value = ""
-        mock_libvirt.pin_vcpu_for_perf.return_value = ""
+        xml_str = mock.Mock()
+        mock_build_xml.return_value = (xml_str, '00:00:00:00:00:01')
+        self.ovs_dpdk._enable_interfaces = mock.Mock(return_value=xml_str)
+        vnf_instance = mock.Mock()
         self.ovs_dpdk.vnf_node.generate_vnf_instance = mock.Mock(
-            return_value={})
-
-        self.assertIsNotNone(self.ovs_dpdk.setup_ovs_dpdk_context())
+            return_value=vnf_instance)
+
+        self.assertEqual([vnf_instance],
+                         self.ovs_dpdk.setup_ovs_dpdk_context())
+        mock_create_vm.assert_called_once_with(
+            self.ovs_dpdk.connection, '/tmp/vm_ovs_0.xml')
+        mock_check_if_exists.assert_called_once_with(
+            'vm_0', self.ovs_dpdk.connection)
+        mock_build_xml.assert_called_once_with(
+            self.ovs_dpdk.connection, self.ovs_dpdk.vm_flavor, 'vm_0', 0)
+        mock_write_file.assert_called_once_with('/tmp/vm_ovs_0.xml', xml_str)
+
+    @mock.patch.object(io, 'BytesIO')
+    def test__check_hugepages(self, mock_bytesio):
+        data = six.BytesIO('HugePages_Total:      20\n'
+                           'HugePages_Free:       20\n'
+                           'HugePages_Rsvd:        0\n'
+                           'HugePages_Surp:        0\n'
+                           'Hugepagesize:    1048576 kB'.encode())
+        mock_bytesio.return_value = data
+        self.ovs_dpdk.connection = mock.Mock()
+        self.ovs_dpdk._check_hugepages()
+
+    @mock.patch.object(io, 'BytesIO')
+    def test__check_hugepages_no_info(self, mock_bytesio):
+        data = six.BytesIO(''.encode())
+        mock_bytesio.return_value = data
+        self.ovs_dpdk.connection = mock.Mock()
+        with self.assertRaises(exceptions.OVSHugepagesInfoError):
+            self.ovs_dpdk._check_hugepages()
+
+    @mock.patch.object(io, 'BytesIO')
+    def test__check_hugepages_no_total_hp(self, mock_bytesio):
+        data = six.BytesIO('HugePages_Total:       0\n'
+                           'HugePages_Free:        0\n'
+                           'HugePages_Rsvd:        0\n'
+                           'HugePages_Surp:        0\n'
+                           'Hugepagesize:    1048576 kB'.encode())
+        mock_bytesio.return_value = data
+        self.ovs_dpdk.connection = mock.Mock()
+        with self.assertRaises(exceptions.OVSHugepagesNotConfigured):
+            self.ovs_dpdk._check_hugepages()
+
+    @mock.patch.object(io, 'BytesIO')
+    def test__check_hugepages_no_free_hp(self, mock_bytesio):
+        data = six.BytesIO('HugePages_Total:      20\n'
+                           'HugePages_Free:        0\n'
+                           'HugePages_Rsvd:        0\n'
+                           'HugePages_Surp:        0\n'
+                           'Hugepagesize:    1048576 kB'.encode())
+        mock_bytesio.return_value = data
+        self.ovs_dpdk.connection = mock.Mock()
+        with self.assertRaises(exceptions.OVSHugepagesZeroFree) as exc:
+            self.ovs_dpdk._check_hugepages()
+        self.assertEqual('There are no HugePages free in this system. Total '
+                         'HugePages configured: 20', exc.exception.msg)
index f0953ef..e70ab0a 100644 (file)
@@ -18,6 +18,7 @@ import mock
 import unittest
 
 from yardstick import ssh
+from yardstick.benchmark.contexts.standalone import model
 from yardstick.benchmark.contexts.standalone import sriov
 
 
@@ -69,10 +70,11 @@ class SriovContextTestCase(unittest.TestCase):
         if self.sriov in self.sriov.list:
             self.sriov._delete_context()
 
-    @mock.patch('yardstick.benchmark.contexts.standalone.sriov.Libvirt')
-    @mock.patch('yardstick.benchmark.contexts.standalone.model.StandaloneContextHelper')
-    @mock.patch('yardstick.benchmark.contexts.standalone.model.Server')
-    def test___init__(self, mock_helper, mock_server, *args):
+    @mock.patch.object(model, 'StandaloneContextHelper')
+    @mock.patch.object(model, 'Libvirt')
+    @mock.patch.object(model, 'Server')
+    def test___init__(self, mock_helper, mock_libvirt, mock_server):
+        # pylint: disable=unused-argument
         # NOTE(ralonsoh): this test doesn't cover function execution.
         self.sriov.helper = mock_helper
         self.sriov.vnf_node = mock_server
@@ -97,9 +99,11 @@ class SriovContextTestCase(unittest.TestCase):
         self.sriov.wait_for_vnfs_to_start = mock.Mock(return_value={})
         self.assertIsNone(self.sriov.deploy())
 
-    @mock.patch('yardstick.benchmark.contexts.standalone.sriov.Libvirt')
     @mock.patch.object(ssh, 'SSH', return_value=(0, "a", ""))
-    def test_undeploy(self, mock_ssh, *args):
+    @mock.patch.object(model, 'Libvirt')
+    def test_undeploy(self, mock_libvirt, mock_ssh):
+        # pylint: disable=unused-argument
+        # NOTE(ralonsoh): the pylint exception should be removed.
         self.sriov.vm_deploy = False
         self.assertIsNone(self.sriov.undeploy())
 
@@ -237,11 +241,11 @@ class SriovContextTestCase(unittest.TestCase):
         self.sriov._get_vf_data = mock.Mock(return_value="")
         self.assertIsNone(self.sriov.configure_nics_for_sriov())
 
-    @mock.patch('yardstick.benchmark.contexts.standalone.sriov.Libvirt')
-    @mock.patch.object(ssh, 'SSH')
-    def test__enable_interfaces(self, mock_ssh, *args):
-        mock_ssh.return_value = 0, "a", ""
-
+    @mock.patch.object(ssh, 'SSH', return_value=(0, "a", ""))
+    @mock.patch.object(model, 'Libvirt')
+    def test__enable_interfaces(self, mock_libvirt, mock_ssh):
+        # pylint: disable=unused-argument
+        # NOTE(ralonsoh): the pylint exception should be removed.
         self.sriov.vm_deploy = True
         self.sriov.connection = mock_ssh
         self.sriov.vm_names = ['vm_0', 'vm_1']
@@ -251,20 +255,12 @@ class SriovContextTestCase(unittest.TestCase):
         self.assertIsNone(self.sriov._enable_interfaces(
             0, 0, ["private_0"], 'test'))
 
-    @mock.patch('yardstick.benchmark.contexts.standalone.model.Server')
-    @mock.patch('yardstick.benchmark.contexts.standalone.sriov.Libvirt')
-    def test_setup_sriov_context(self, mock_libvirt, *args):
-        with mock.patch("yardstick.ssh.SSH") as ssh:
-            ssh_mock = mock.Mock(autospec=ssh.SSH)
-            ssh_mock.execute = \
-                mock.Mock(return_value=(0, "a", ""))
-            ssh_mock.put = \
-                mock.Mock(return_value=(0, "a", ""))
-            ssh.return_value = ssh_mock
-        self.sriov.vm_deploy = True
-        self.sriov.connection = ssh_mock
-        self.sriov.vm_names = ['vm_0', 'vm_1']
-        self.sriov.drivers = []
+    @mock.patch.object(model.Libvirt, 'build_vm_xml')
+    @mock.patch.object(model.Libvirt, 'check_if_vm_exists_and_delete')
+    @mock.patch.object(model.Libvirt, 'write_file')
+    @mock.patch.object(model.Libvirt, 'virsh_create_vm')
+    def test_setup_sriov_context(self, mock_create_vm, mock_write_file,
+                                 mock_check, mock_build_vm_xml):
         self.sriov.servers = {
             'vnf_0': {
                 'network_ports': {
@@ -274,15 +270,31 @@ class SriovContextTestCase(unittest.TestCase):
                 }
             }
         }
-        self.sriov.networks = self.NETWORKS
-        self.sriov.host_mgmt = {}
-        self.sriov.flavor = {}
-        self.sriov.configure_nics_for_sriov = mock.Mock(return_value="")
-        mock_libvirt.build_vm_xml = mock.Mock(
-            return_value=[6, "00:00:00:00:00:01"])
-        self.sriov._enable_interfaces = mock.Mock(return_value="")
-        self.sriov.vnf_node.generate_vnf_instance = mock.Mock(return_value={})
-        self.assertIsNotNone(self.sriov.setup_sriov_context())
+        connection = mock.Mock()
+        self.sriov.connection = connection
+        self.sriov.host_mgmt = {'ip': '1.2.3.4'}
+        self.sriov.vm_flavor = 'flavor'
+        self.sriov.networks = 'networks'
+        self.sriov.configure_nics_for_sriov = mock.Mock()
+        cfg = '/tmp/vm_sriov_0.xml'
+        vm_name = 'vm_0'
+        xml_out = mock.Mock()
+        mock_build_vm_xml.return_value = (xml_out, '00:00:00:00:00:01')
+
+        with mock.patch.object(self.sriov, 'vnf_node') as mock_vnf_node, \
+                mock.patch.object(self.sriov, '_enable_interfaces'):
+            mock_vnf_node.generate_vnf_instance = mock.Mock(
+                return_value='node')
+            nodes_out = self.sriov.setup_sriov_context()
+        self.assertEqual(['node'], nodes_out)
+        mock_vnf_node.generate_vnf_instance.assert_called_once_with(
+            'flavor', 'networks', '1.2.3.4', 'vnf_0',
+            self.sriov.servers['vnf_0'], '00:00:00:00:00:01')
+        mock_build_vm_xml.assert_called_once_with(
+            connection, 'flavor', vm_name, 0)
+        mock_create_vm.assert_called_once_with(connection, cfg)
+        mock_check.assert_called_once_with(vm_name, connection)
+        mock_write_file.assert_called_once_with(cfg, xml_out)
 
     def test__get_vf_data(self):
         with mock.patch("yardstick.ssh.SSH") as ssh:
index 153c6a5..b198834 100644 (file)
@@ -25,6 +25,7 @@ class FlagsTestCase(unittest.TestCase):
     def test___init__(self):
         self.assertFalse(self.flags.no_setup)
         self.assertFalse(self.flags.no_teardown)
+        self.assertEqual({'verify': False}, self.flags.os_cloud_config)
 
     def test___init__with_flags(self):
         flags = base.Flags(no_setup=True)
@@ -32,10 +33,12 @@ class FlagsTestCase(unittest.TestCase):
         self.assertFalse(flags.no_teardown)
 
     def test_parse(self):
-        self.flags.parse(no_setup=True, no_teardown="False")
+        self.flags.parse(no_setup=True, no_teardown='False',
+                         os_cloud_config={'verify': True})
 
         self.assertTrue(self.flags.no_setup)
-        self.assertEqual(self.flags.no_teardown, "False")
+        self.assertEqual('False', self.flags.no_teardown)
+        self.assertEqual({'verify': True}, self.flags.os_cloud_config)
 
     def test_parse_forbidden_flags(self):
         self.flags.parse(foo=42)
index 625f97b..ebb1d69 100644 (file)
@@ -229,12 +229,12 @@ class HeatContextTestCase(unittest.TestCase):
         self.assertRaises(y_exc.HeatTemplateError,
                           self.test_context.deploy)
 
-        mock_path_exists.assert_called_once()
+        mock_path_exists.assert_called()
         mock_resources_template.assert_called_once()
 
     @mock.patch.object(os.path, 'exists', return_value=False)
     @mock.patch.object(ssh.SSH, 'gen_keys')
-    @mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
+    @mock.patch.object(heat, 'HeatTemplate')
     def test_deploy(self, mock_template, mock_genkeys, mock_path_exists):
         self.test_context._name = 'foo'
         self.test_context._task_id = '1234567890'
@@ -245,16 +245,17 @@ class HeatContextTestCase(unittest.TestCase):
         self.test_context.get_neutron_info = mock.MagicMock()
         self.test_context.deploy()
 
-        mock_template.assert_called_with('foo-12345678',
-                                         '/bar/baz/some-heat-file',
-                                         {'image': 'cirros'})
+        mock_template.assert_called_with(
+            'foo-12345678', template_file='/bar/baz/some-heat-file',
+            heat_parameters={'image': 'cirros'},
+            os_cloud_config=self.test_context._flags.os_cloud_config)
         self.assertIsNotNone(self.test_context.stack)
         key_filename = ''.join(
             [consts.YARDSTICK_ROOT_PATH,
              'yardstick/resources/files/yardstick_key-',
              self.test_context._name_task_id])
         mock_genkeys.assert_called_once_with(key_filename)
-        mock_path_exists.assert_called_once_with(key_filename)
+        mock_path_exists.assert_any_call(key_filename)
 
     @mock.patch.object(heat, 'HeatTemplate')
     @mock.patch.object(os.path, 'exists', return_value=False)
@@ -280,7 +281,7 @@ class HeatContextTestCase(unittest.TestCase):
              'yardstick/resources/files/yardstick_key-',
              self.test_context._name])
         mock_genkeys.assert_called_once_with(key_filename)
-        mock_path_exists.assert_called_once_with(key_filename)
+        mock_path_exists.assert_any_call(key_filename)
 
     @mock.patch.object(heat, 'HeatTemplate')
     @mock.patch.object(os.path, 'exists', return_value=False)
@@ -296,7 +297,6 @@ class HeatContextTestCase(unittest.TestCase):
         self.test_context._flags.no_setup = True
         self.test_context.template_file = '/bar/baz/some-heat-file'
         self.test_context.get_neutron_info = mock.MagicMock()
-
         self.test_context.deploy()
 
         mock_retrieve_stack.assert_called_once_with(self.test_context._name)
@@ -306,7 +306,7 @@ class HeatContextTestCase(unittest.TestCase):
              'yardstick/resources/files/yardstick_key-',
              self.test_context._name])
         mock_genkeys.assert_called_once_with(key_filename)
-        mock_path_exists.assert_called_once_with(key_filename)
+        mock_path_exists.assert_any_call(key_filename)
 
     @mock.patch.object(heat, 'HeatTemplate', return_value='heat_template')
     @mock.patch.object(heat.HeatContext, '_add_resources_to_template')
@@ -334,7 +334,7 @@ class HeatContextTestCase(unittest.TestCase):
              'yardstick/resources/files/yardstick_key-',
              self.test_context._name_task_id])
         mock_genkeys.assert_called_once_with(key_filename)
-        mock_path_exists.assert_called_with(key_filename)
+        mock_path_exists.assert_any_call(key_filename)
 
         mock_call_gen_keys = mock.call.gen_keys(key_filename)
         mock_call_add_resources = (
index 45840d5..d1172d5 100644 (file)
@@ -14,7 +14,8 @@ from yardstick.benchmark.scenarios.availability import scenario_general
 
 class ScenarioGeneralTestCase(unittest.TestCase):
 
-    def setUp(self):
+    @mock.patch.object(scenario_general, 'Director')
+    def setUp(self, *args):
         self.scenario_cfg = {
             'type': "general_scenario",
             'options': {
@@ -36,32 +37,28 @@ class ScenarioGeneralTestCase(unittest.TestCase):
             }
         }
         self.instance = scenario_general.ScenarioGeneral(self.scenario_cfg, None)
-
-        self._mock_director = mock.patch.object(scenario_general, 'Director')
-        self.mock_director = self._mock_director.start()
-        self.addCleanup(self._stop_mock)
-
-    def _stop_mock(self):
-        self._mock_director.stop()
+        self.instance.setup()
+        self.instance.director.verify.return_value = True
 
     def test_scenario_general_all_successful(self):
-        self.instance.setup()
-        self.instance.run({})
+
+        ret = {}
+        self.instance.run(ret)
         self.instance.teardown()
+        self.assertEqual(ret['sla_pass'], 1)
 
     def test_scenario_general_exception(self):
-        mock_obj = mock.Mock()
-        mock_obj.createActionPlayer.side_effect = KeyError('Wrong')
-        self.instance.director = mock_obj
+        self.instance.director.createActionPlayer.side_effect = KeyError('Wrong')
         self.instance.director.data = {}
-        self.instance.run({})
+        ret = {}
+        self.instance.run(ret)
         self.instance.teardown()
+        self.assertEqual(ret['sla_pass'], 1)
 
     def test_scenario_general_case_fail(self):
-        mock_obj = mock.Mock()
-        mock_obj.verify.return_value = False
-        self.instance.director = mock_obj
+        self.instance.director.verify.return_value = False
         self.instance.director.data = {}
-        self.instance.run({})
-        self.instance.pass_flag = True
+        ret = {}
+        self.assertRaises(AssertionError, self.instance.run, ret)
         self.instance.teardown()
+        self.assertEqual(ret['sla_pass'], 0)
index 6bb3ec6..dd656fb 100644 (file)
@@ -60,15 +60,16 @@ class ServicehaTestCase(unittest.TestCase):
         p.setup()
         self.assertTrue(p.setup_done)
 
-    # def test__serviceha_run_sla_error(self, mock_attacker, mock_monitor):
-    #     p = serviceha.ServiceHA(self.args, self.ctx)
+    @mock.patch.object(serviceha, 'baseattacker')
+    @mock.patch.object(serviceha, 'basemonitor')
+    def test__serviceha_run_sla_error(self, mock_monitor, *args):
+        p = serviceha.ServiceHA(self.args, self.ctx)
 
-        p.setup()
-        self.assertEqual(p.setup_done, True)
+        p.setup()
+        self.assertEqual(p.setup_done, True)
 
-    #     result = {}
-    #     result["outage_time"] = 10
-    #     mock_monitor.Monitor().get_result.return_value = result
+        mock_monitor.MonitorMgr().verify_SLA.return_value = False
 
-    #     ret = {}
-    #     self.assertRaises(AssertionError, p.run, ret)
+        ret = {}
+        self.assertRaises(AssertionError, p.run, ret)
+        self.assertEqual(ret['sla_pass'], 0)
index 2964ecc..bb7fa45 100644 (file)
@@ -6,21 +6,51 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
+from oslo_utils import uuidutils
 import unittest
 import mock
 
-from yardstick.benchmark.scenarios.lib.attach_volume import AttachVolume
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import attach_volume
 
 
 class AttachVolumeTestCase(unittest.TestCase):
 
-    @mock.patch('yardstick.common.openstack_utils.attach_server_volume')
-    def test_attach_volume(self, mock_attach_server_volume):
-        options = {
-            'volume_id': '123-456-000',
-            'server_id': '000-123-456'
-        }
-        args = {"options": options}
-        obj = AttachVolume(args, {})
-        obj.run({})
-        mock_attach_server_volume.assert_called_once()
+    def setUp(self):
+
+        self._mock_attach_volume_to_server = mock.patch.object(
+            openstack_utils, 'attach_volume_to_server')
+        self.mock_attach_volume_to_server = (
+            self._mock_attach_volume_to_server.start())
+        self._mock_get_shade_client = mock.patch.object(
+            openstack_utils, 'get_shade_client')
+        self.mock_get_shade_client = self._mock_get_shade_client.start()
+        self._mock_log = mock.patch.object(attach_volume, 'LOG')
+        self.mock_log = self._mock_log.start()
+        _uuid = uuidutils.generate_uuid()
+        self.args = {'options': {'server_name_or_id': _uuid,
+                                 'volume_name_or_id': _uuid}}
+        self.result = {}
+        self.addCleanup(self._stop_mock)
+        self.attachvol_obj = attach_volume.AttachVolume(self.args, mock.ANY)
+
+    def _stop_mock(self):
+        self._mock_attach_volume_to_server.stop()
+        self._mock_get_shade_client.stop()
+        self._mock_log.stop()
+
+    def test_run(self):
+        self.mock_attach_volume_to_server.return_value = True
+        self.assertIsNone(self.attachvol_obj.run(self.result))
+        self.assertEqual({'attach_volume': 1}, self.result)
+        self.mock_log.info.asset_called_once_with(
+            'Attach volume to server successful!')
+
+    def test_run_fail(self):
+        self.mock_attach_volume_to_server.return_value = False
+        with self.assertRaises(exceptions.ScenarioAttachVolumeError):
+            self.attachvol_obj.run(self.result)
+        self.assertEqual({'attach_volume': 0}, self.result)
+        self.mock_log.error.assert_called_once_with(
+            'Attach volume to server failed!')
index a7286f5..894cc1c 100644 (file)
@@ -11,48 +11,47 @@ import unittest
 import mock
 
 from yardstick.benchmark.scenarios.lib import create_floating_ip
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
 
 
 class CreateFloatingIpTestCase(unittest.TestCase):
 
     def setUp(self):
-        self._mock_get_network_id = mock.patch.object(
-            op_utils, 'get_network_id')
-        self.mock_get_network_id = self._mock_get_network_id.start()
         self._mock_create_floating_ip = mock.patch.object(
-            op_utils, 'create_floating_ip')
+            openstack_utils, 'create_floating_ip')
         self.mock_create_floating_ip = self._mock_create_floating_ip.start()
-        self._mock_get_neutron_client = mock.patch.object(
-            op_utils, 'get_neutron_client')
-        self.mock_get_neutron_client = self._mock_get_neutron_client.start()
         self._mock_get_shade_client = mock.patch.object(
-            op_utils, 'get_shade_client')
+            openstack_utils, 'get_shade_client')
         self.mock_get_shade_client = self._mock_get_shade_client.start()
         self._mock_log = mock.patch.object(create_floating_ip, 'LOG')
         self.mock_log = self._mock_log.start()
+        self.args = {'options': {'network_name_or_id': 'yardstick_net'}}
+        self.result = {}
 
-        self._fip_obj = create_floating_ip.CreateFloatingIp(mock.ANY, mock.ANY)
-        self._fip_obj.scenario_cfg = {'output': 'key1\nkey2'}
+        self.fip_obj = create_floating_ip.CreateFloatingIp(self.args, mock.ANY)
+        self.fip_obj.scenario_cfg = {'output': 'key1\nkey2'}
 
         self.addCleanup(self._stop_mock)
 
     def _stop_mock(self):
-        self._mock_get_network_id.stop()
         self._mock_create_floating_ip.stop()
-        self._mock_get_neutron_client.stop()
         self._mock_get_shade_client.stop()
         self._mock_log.stop()
 
     def test_run(self):
         self.mock_create_floating_ip.return_value = {'fip_id': 'value1',
                                                      'fip_addr': 'value2'}
-        output = self._fip_obj.run(mock.ANY)
-        self.assertDictEqual({'key1': 'value1', 'key2': 'value2'}, output)
+        output = self.fip_obj.run(self.result)
+        self.assertEqual({'floating_ip_create': 1}, self.result)
+        self.assertEqual({'key1': 'value1', 'key2': 'value2'}, output)
+        self.mock_log.info.asset_called_once_with(
+            'Creating floating ip successful!')
 
     def test_run_no_fip(self):
         self.mock_create_floating_ip.return_value = None
-        output = self._fip_obj.run(mock.ANY)
-        self.assertIsNone(output)
+        with self.assertRaises(exceptions.ScenarioCreateFloatingIPError):
+            self.fip_obj.run(self.result)
+        self.assertEqual({'floating_ip_create': 0}, self.result)
         self.mock_log.error.assert_called_once_with(
             'Creating floating ip failed!')
index 1c3d6ce..a7b683f 100644 (file)
@@ -6,22 +6,52 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
-import mock
+from oslo_utils import uuidutils
 import unittest
+import mock
 
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
 from yardstick.benchmark.scenarios.lib import create_keypair
 
 
 class CreateKeypairTestCase(unittest.TestCase):
-    @mock.patch.object(create_keypair, 'paramiko')
-    @mock.patch.object(create_keypair, 'op_utils')
-    def test_create_keypair(self, mock_op_utils, *args):
-        options = {
-            'key_name': 'yardstick_key',
-            'key_path': '/tmp/yardstick_key'
-        }
-        args = {"options": options}
-        obj = create_keypair.CreateKeypair(args, {})
-        obj.run({})
-        mock_op_utils.create_keypair.assert_called_once()
+
+    def setUp(self):
+
+        self._mock_create_keypair = mock.patch.object(
+            openstack_utils, 'create_keypair')
+        self.mock_create_keypair = (
+            self._mock_create_keypair.start())
+        self._mock_get_shade_client = mock.patch.object(
+            openstack_utils, 'get_shade_client')
+        self.mock_get_shade_client = self._mock_get_shade_client.start()
+        self._mock_log = mock.patch.object(create_keypair, 'LOG')
+        self.mock_log = self._mock_log.start()
+        self.args = {'options': {'key_name': 'yardstick_key'}}
+        self.result = {}
+
+        self.ckeypair_obj = create_keypair.CreateKeypair(self.args, mock.ANY)
+        self.addCleanup(self._stop_mock)
+
+    def _stop_mock(self):
+        self._mock_create_keypair.stop()
+        self._mock_get_shade_client.stop()
+        self._mock_log.stop()
+
+    def test_run(self):
+        _uuid = uuidutils.generate_uuid()
+        self.ckeypair_obj.scenario_cfg = {'output': 'id'}
+        self.mock_create_keypair.return_value = {
+            'name': 'key-name', 'type': 'ssh', 'id': _uuid}
+        output = self.ckeypair_obj.run(self.result)
+        self.assertDictEqual({'keypair_create': 1}, self.result)
+        self.assertDictEqual({'id': _uuid}, output)
+        self.mock_log.info.asset_called_once_with('Create keypair successful!')
+
+    def test_run_fail(self):
+        self.mock_create_keypair.return_value = None
+        with self.assertRaises(exceptions.ScenarioCreateKeypairError):
+            self.ckeypair_obj.run(self.result)
+        self.assertDictEqual({'keypair_create': 0}, self.result)
+        self.mock_log.error.assert_called_once_with('Create keypair failed!')
index 21158ab..0477a49 100644 (file)
@@ -6,25 +6,54 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
+
+from oslo_utils import uuidutils
 import unittest
 import mock
 
-from yardstick.benchmark.scenarios.lib.create_sec_group import CreateSecgroup
-
-
-class CreateSecGroupTestCase(unittest.TestCase):
-
-    @mock.patch('yardstick.common.openstack_utils.get_neutron_client')
-    @mock.patch('yardstick.common.openstack_utils.create_security_group_full')
-    def test_create_sec_group(self, mock_get_neutron_client, mock_create_security_group_full):
-        options = {
-            'openstack_paras': {
-                'sg_name': 'yardstick_sec_group',
-                'description': 'security group for yardstick manual VM'
-            }
-        }
-        args = {"options": options}
-        obj = CreateSecgroup(args, {})
-        obj.run({})
-        mock_get_neutron_client.assert_called_once()
-        mock_create_security_group_full.assert_called_once()
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import create_sec_group
+
+
+class CreateSecurityGroupTestCase(unittest.TestCase):
+
+    def setUp(self):
+
+        self._mock_create_security_group_full = mock.patch.object(
+            openstack_utils, 'create_security_group_full')
+        self.mock_create_security_group_full = (
+            self._mock_create_security_group_full.start())
+        self._mock_get_shade_client = mock.patch.object(
+            openstack_utils, 'get_shade_client')
+        self.mock_get_shade_client = self._mock_get_shade_client.start()
+        self._mock_log = mock.patch.object(create_sec_group, 'LOG')
+        self.mock_log = self._mock_log.start()
+        self.args = {'options': {'sg_name': 'yardstick_sg'}}
+        self.result = {}
+
+        self.csecgp_obj = create_sec_group.CreateSecgroup(self.args, mock.ANY)
+        self.addCleanup(self._stop_mock)
+
+    def _stop_mock(self):
+        self._mock_create_security_group_full.stop()
+        self._mock_get_shade_client.stop()
+        self._mock_log.stop()
+
+    def test_run(self):
+        _uuid = uuidutils.generate_uuid()
+        self.csecgp_obj.scenario_cfg = {'output': 'id'}
+        self.mock_create_security_group_full.return_value = _uuid
+        output = self.csecgp_obj.run(self.result)
+        self.assertEqual({'sg_create': 1}, self.result)
+        self.assertEqual({'id': _uuid}, output)
+        self.mock_log.info.asset_called_once_with(
+            'Create security group successful!')
+
+    def test_run_fail(self):
+        self.mock_create_security_group_full.return_value = None
+        with self.assertRaises(exceptions.ScenarioCreateSecurityGroupError):
+            self.csecgp_obj.run(self.result)
+        self.assertEqual({'sg_create': 0}, self.result)
+        self.mock_log.error.assert_called_once_with(
+            'Create security group failed!')
index 9d6d8cb..b587851 100644 (file)
@@ -6,29 +6,54 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
+from oslo_utils import uuidutils
 import unittest
 import mock
 
-from yardstick.benchmark.scenarios.lib.create_server import CreateServer
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import create_server
 
 
 class CreateServerTestCase(unittest.TestCase):
 
-    @mock.patch('yardstick.common.openstack_utils.create_instance_and_wait_for_active')
-    @mock.patch('yardstick.common.openstack_utils.get_nova_client')
-    @mock.patch('yardstick.common.openstack_utils.get_glance_client')
-    @mock.patch('yardstick.common.openstack_utils.get_neutron_client')
-    def test_create_server(self, mock_get_nova_client, mock_get_neutron_client,
-                           mock_get_glance_client, mock_create_instance_and_wait_for_active):
-        scenario_cfg = {
-            'options': {
-                'openstack_paras': 'example'
-            },
-            'output': 'server'
-        }
-        obj = CreateServer(scenario_cfg, {})
-        obj.run({})
-        mock_get_nova_client.assert_called_once()
-        mock_get_glance_client.assert_called_once()
-        mock_get_neutron_client.assert_called_once()
-        mock_create_instance_and_wait_for_active.assert_called_once()
+    def setUp(self):
+
+        self._mock_create_instance_and_wait_for_active = mock.patch.object(
+            openstack_utils, 'create_instance_and_wait_for_active')
+        self.mock_create_instance_and_wait_for_active = (
+            self._mock_create_instance_and_wait_for_active.start())
+        self._mock_get_shade_client = mock.patch.object(
+            openstack_utils, 'get_shade_client')
+        self.mock_get_shade_client = self._mock_get_shade_client.start()
+        self._mock_log = mock.patch.object(create_server, 'LOG')
+        self.mock_log = self._mock_log.start()
+        self.args = {
+            'options': {'name': 'server-name', 'image': 'image-name',
+                        'flavor': 'flavor-name'}}
+        self.result = {}
+
+        self.addCleanup(self._stop_mock)
+        self.cserver_obj = create_server.CreateServer(self.args, mock.ANY)
+
+    def _stop_mock(self):
+        self._mock_create_instance_and_wait_for_active.stop()
+        self._mock_get_shade_client.stop()
+        self._mock_log.stop()
+
+    def test_run(self):
+        _uuid = uuidutils.generate_uuid()
+        self.cserver_obj.scenario_cfg = {'output': 'id'}
+        self.mock_create_instance_and_wait_for_active.return_value = (
+            {'name': 'server-name', 'flavor': 'flavor-name', 'id': _uuid})
+        output = self.cserver_obj.run(self.result)
+        self.assertEqual({'instance_create': 1}, self.result)
+        self.assertEqual({'id': _uuid}, output)
+        self.mock_log.info.asset_called_once_with('Create server successful!')
+
+    def test_run_fail(self):
+        self.mock_create_instance_and_wait_for_active.return_value = None
+        with self.assertRaises(exceptions.ScenarioCreateServerError):
+            self.cserver_obj.run(self.result)
+        self.assertEqual({'instance_create': 0}, self.result)
+        self.mock_log.error.assert_called_once_with('Create server failed!')
index 3185ec5..45a39eb 100644 (file)
@@ -6,22 +6,50 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
+from oslo_utils import uuidutils
 import unittest
 import mock
 
-from yardstick.benchmark.scenarios.lib.delete_floating_ip import DeleteFloatingIp
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import delete_floating_ip
 
 
 class DeleteFloatingIpTestCase(unittest.TestCase):
 
-    @mock.patch('yardstick.common.openstack_utils.get_nova_client')
-    @mock.patch('yardstick.common.openstack_utils.delete_floating_ip')
-    def test_delete_floating_ip(self, mock_get_nova_client, mock_delete_floating_ip):
-        options = {
-            'floating_ip_id': '123-123-123'
-        }
-        args = {"options": options}
-        obj = DeleteFloatingIp(args, {})
-        obj.run({})
-        mock_get_nova_client.assert_called_once()
-        mock_delete_floating_ip.assert_called_once()
+    def setUp(self):
+        self._mock_delete_floating_ip = mock.patch.object(
+            openstack_utils, 'delete_floating_ip')
+        self.mock_delete_floating_ip = self._mock_delete_floating_ip.start()
+        self._mock_get_shade_client = mock.patch.object(
+            openstack_utils, 'get_shade_client')
+        self.mock_get_shade_client = self._mock_get_shade_client.start()
+        self._mock_log = mock.patch.object(delete_floating_ip, 'LOG')
+        self.mock_log = self._mock_log.start()
+        self.args = {'options': {'floating_ip_id': uuidutils.generate_uuid()}}
+        self.result = {}
+
+        self.del_obj = delete_floating_ip.DeleteFloatingIp(
+            self.args, mock.ANY)
+
+        self.addCleanup(self._stop_mock)
+
+    def _stop_mock(self):
+        self._mock_delete_floating_ip.stop()
+        self._mock_get_shade_client.stop()
+        self._mock_log.stop()
+
+    def test_run(self):
+        self.mock_delete_floating_ip.return_value = True
+        self.assertIsNone(self.del_obj.run(self.result))
+        self.assertEqual({"delete_floating_ip": 1}, self.result)
+        self.mock_log.info.assert_called_once_with(
+            "Delete floating ip successful!")
+
+    def test_run_fail(self):
+        self.mock_delete_floating_ip.return_value = False
+        with self.assertRaises(exceptions.ScenarioDeleteFloatingIPError):
+            self.del_obj.run(self.result)
+        self.assertEqual({"delete_floating_ip": 0}, self.result)
+        self.mock_log.error.assert_called_once_with(
+            "Delete floating ip failed!")
index 6e790ba..c794025 100644 (file)
@@ -9,19 +9,43 @@
 import unittest
 import mock
 
-from yardstick.benchmark.scenarios.lib.delete_keypair import DeleteKeypair
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import delete_keypair
 
 
 class DeleteKeypairTestCase(unittest.TestCase):
 
-    @mock.patch('yardstick.common.openstack_utils.get_nova_client')
-    @mock.patch('yardstick.common.openstack_utils.delete_keypair')
-    def test_detach_volume(self, mock_get_nova_client, mock_delete_keypair):
-        options = {
-            'key_name': 'yardstick_key'
-        }
-        args = {"options": options}
-        obj = DeleteKeypair(args, {})
-        obj.run({})
-        mock_get_nova_client.assert_called_once()
-        mock_delete_keypair.assert_called_once()
+    def setUp(self):
+        self._mock_delete_keypair = mock.patch.object(
+            openstack_utils, 'delete_keypair')
+        self.mock_delete_keypair = self._mock_delete_keypair.start()
+        self._mock_get_shade_client = mock.patch.object(
+            openstack_utils, 'get_shade_client')
+        self.mock_get_shade_client = self._mock_get_shade_client.start()
+        self._mock_log = mock.patch.object(delete_keypair, 'LOG')
+        self.mock_log = self._mock_log.start()
+        self.args = {'options': {'key_name': 'yardstick_key'}}
+        self.result = {}
+        self.delkey_obj = delete_keypair.DeleteKeypair(self.args, mock.ANY)
+
+        self.addCleanup(self._stop_mock)
+
+    def _stop_mock(self):
+        self._mock_delete_keypair.stop()
+        self._mock_get_shade_client.stop()
+        self._mock_log.stop()
+
+    def test_run(self):
+        self.mock_delete_keypair.return_value = True
+        self.assertIsNone(self.delkey_obj.run(self.result))
+        self.assertEqual({'delete_keypair': 1}, self.result)
+        self.mock_log.info.assert_called_once_with(
+            'Delete keypair successful!')
+
+    def test_run_fail(self):
+        self.mock_delete_keypair.return_value = False
+        with self.assertRaises(exceptions.ScenarioDeleteKeypairError):
+            self.delkey_obj.run(self.result)
+        self.assertEqual({'delete_keypair': 0}, self.result)
+        self.mock_log.error.assert_called_once_with("Delete keypair failed!")
index aef99ee..b6dbf47 100644 (file)
@@ -11,7 +11,8 @@ from oslo_utils import uuidutils
 import unittest
 import mock
 
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
 from yardstick.benchmark.scenarios.lib import delete_network
 
 
@@ -19,16 +20,17 @@ class DeleteNetworkTestCase(unittest.TestCase):
 
     def setUp(self):
         self._mock_delete_neutron_net = mock.patch.object(
-            op_utils, 'delete_neutron_net')
+            openstack_utils, "delete_neutron_net")
         self.mock_delete_neutron_net = self._mock_delete_neutron_net.start()
         self._mock_get_shade_client = mock.patch.object(
-            op_utils, 'get_shade_client')
+            openstack_utils, "get_shade_client")
         self.mock_get_shade_client = self._mock_get_shade_client.start()
-        self._mock_log = mock.patch.object(delete_network, 'LOG')
+        self._mock_log = mock.patch.object(delete_network, "LOG")
         self.mock_log = self._mock_log.start()
-        _uuid = uuidutils.generate_uuid()
-        self.args = {'options': {'network_id': _uuid}}
-        self._del_obj = delete_network.DeleteNetwork(self.args, mock.ANY)
+        self.args = {"options": {"network_name_or_id": (
+            uuidutils.generate_uuid())}}
+        self.result = {}
+        self.del_obj = delete_network.DeleteNetwork(self.args, mock.ANY)
 
         self.addCleanup(self._stop_mock)
 
@@ -39,11 +41,14 @@ class DeleteNetworkTestCase(unittest.TestCase):
 
     def test_run(self):
         self.mock_delete_neutron_net.return_value = True
-        self.assertTrue(self._del_obj.run({}))
+        self.assertIsNone(self.del_obj.run(self.result))
+        self.assertEqual({"delete_network": 1}, self.result)
         self.mock_log.info.assert_called_once_with(
             "Delete network successful!")
 
     def test_run_fail(self):
         self.mock_delete_neutron_net.return_value = False
-        self.assertFalse(self._del_obj.run({}))
+        with self.assertRaises(exceptions.ScenarioDeleteNetworkError):
+            self.del_obj.run(self.result)
+        self.assertEqual({"delete_network": 0}, self.result)
         self.mock_log.error.assert_called_once_with("Delete network failed!")
index 9e9c5a5..823cb95 100644 (file)
@@ -6,23 +6,51 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
+
+from oslo_utils import uuidutils
 import unittest
 import mock
 
-from yardstick.benchmark.scenarios.lib.delete_router_interface import DeleteRouterInterface
+from yardstick.benchmark.scenarios.lib import delete_router_interface
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
 
 
 class DeleteRouterInterfaceTestCase(unittest.TestCase):
 
-    @mock.patch('yardstick.common.openstack_utils.get_neutron_client')
-    @mock.patch('yardstick.common.openstack_utils.remove_interface_router')
-    def test_delete_router_interface(self, mock_get_neutron_client, mock_remove_interface_router):
-        options = {
-            'router_id': '123-123-123',
-            'subnet_id': '321-321-321'
-        }
-        args = {"options": options}
-        obj = DeleteRouterInterface(args, {})
-        obj.run({})
-        mock_get_neutron_client.assert_called_once()
-        mock_remove_interface_router.assert_called_once()
+    def setUp(self):
+        self._mock_remove_router_interface = mock.patch.object(
+            openstack_utils, 'remove_router_interface')
+        self.mock_remove_router_interface = (
+            self._mock_remove_router_interface.start())
+        self._mock_get_shade_client = mock.patch.object(
+            openstack_utils, 'get_shade_client')
+        self.mock_get_shade_client = self._mock_get_shade_client.start()
+        self._mock_log = mock.patch.object(delete_router_interface, 'LOG')
+        self.mock_log = self._mock_log.start()
+        self.args = {'options': {'router': uuidutils.generate_uuid()}}
+        self.result = {}
+        self.delrout_obj = delete_router_interface.DeleteRouterInterface(
+            self.args, mock.ANY)
+
+        self.addCleanup(self._stop_mock)
+
+    def _stop_mock(self):
+        self._mock_remove_router_interface.stop()
+        self._mock_get_shade_client.stop()
+        self._mock_log.stop()
+
+    def test_run(self):
+        self.mock_remove_router_interface.return_value = True
+        self.assertIsNone(self.delrout_obj.run(self.result))
+        self.assertEqual({"delete_router_interface": 1}, self.result)
+        self.mock_log.info.assert_called_once_with(
+            "Delete router interface successful!")
+
+    def test_run_fail(self):
+        self.mock_remove_router_interface.return_value = False
+        with self.assertRaises(exceptions.ScenarioRemoveRouterIntError):
+            self.delrout_obj.run(self.result)
+        self.assertEqual({"delete_router_interface": 0}, self.result)
+        self.mock_log.error.assert_called_once_with(
+            "Delete router interface failed!")
index eee565d..55fe53d 100644 (file)
@@ -6,22 +6,49 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
+from oslo_utils import uuidutils
 import unittest
 import mock
 
-from yardstick.benchmark.scenarios.lib.delete_server import DeleteServer
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import delete_server
 
 
 class DeleteServerTestCase(unittest.TestCase):
 
-    @mock.patch('yardstick.common.openstack_utils.delete_instance')
-    @mock.patch('yardstick.common.openstack_utils.get_nova_client')
-    def test_delete_server(self, mock_get_nova_client, mock_delete_instance):
-        options = {
-            'server_id': '1234-4567-0000'
-        }
-        args = {"options": options}
-        obj = DeleteServer(args, {})
-        obj.run({})
-        mock_get_nova_client.assert_called_once()
-        mock_delete_instance.assert_called_once()
+    def setUp(self):
+        self._mock_delete_instance = mock.patch.object(
+            openstack_utils, 'delete_instance')
+        self.mock_delete_instance = (
+            self._mock_delete_instance.start())
+        self._mock_get_shade_client = mock.patch.object(
+            openstack_utils, 'get_shade_client')
+        self.mock_get_shade_client = self._mock_get_shade_client.start()
+        self._mock_log = mock.patch.object(delete_server, 'LOG')
+        self.mock_log = self._mock_log.start()
+        self.args = {'options': {'name_or_id': uuidutils.generate_uuid()
+                                 }}
+        self.result = {}
+
+        self.delserver_obj = delete_server.DeleteServer(self.args, mock.ANY)
+
+        self.addCleanup(self._stop_mock)
+
+    def _stop_mock(self):
+        self._mock_delete_instance.stop()
+        self._mock_get_shade_client.stop()
+        self._mock_log.stop()
+
+    def test_run(self):
+        self.mock_delete_instance.return_value = True
+        self.assertIsNone(self.delserver_obj.run(self.result))
+        self.assertEqual({'delete_server': 1}, self.result)
+        self.mock_log.info.assert_called_once_with('Delete server successful!')
+
+    def test_run_fail(self):
+        self.mock_delete_instance.return_value = False
+        with self.assertRaises(exceptions.ScenarioDeleteServerError):
+            self.delserver_obj.run(self.result)
+        self.assertEqual({'delete_server': 0}, self.result)
+        self.mock_log.error.assert_called_once_with('Delete server failed!')
index 15a6f7c..1c13643 100644 (file)
@@ -6,20 +6,52 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
+from oslo_utils import uuidutils
 import unittest
 import mock
 
-from yardstick.benchmark.scenarios.lib.get_flavor import GetFlavor
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import get_flavor
 
 
 class GetFlavorTestCase(unittest.TestCase):
 
-    @mock.patch('yardstick.common.openstack_utils.get_flavor_by_name')
-    def test_get_flavor(self, mock_get_flavor_by_name):
-        options = {
-            'flavor_name': 'yardstick_test_flavor'
-        }
-        args = {"options": options}
-        obj = GetFlavor(args, {})
-        obj.run({})
-        mock_get_flavor_by_name.assert_called_once()
+    def setUp(self):
+
+        self._mock_get_flavor = mock.patch.object(
+            openstack_utils, 'get_flavor')
+        self.mock_get_flavor = self._mock_get_flavor.start()
+        self._mock_get_shade_client = mock.patch.object(
+            openstack_utils, 'get_shade_client')
+        self.mock_get_shade_client = self._mock_get_shade_client.start()
+        self._mock_log = mock.patch.object(get_flavor, 'LOG')
+        self.mock_log = self._mock_log.start()
+        self.args = {'options': {'name_or_id': 'yardstick_flavor'}}
+        self.result = {}
+
+        self.getflavor_obj = get_flavor.GetFlavor(self.args, mock.ANY)
+        self.addCleanup(self._stop_mock)
+
+    def _stop_mock(self):
+        self._mock_get_flavor.stop()
+        self._mock_get_shade_client.stop()
+        self._mock_log.stop()
+
+    def test_run(self):
+        _uuid = uuidutils.generate_uuid()
+        self.getflavor_obj.scenario_cfg = {'output': 'flavor'}
+        self.mock_get_flavor.return_value = (
+            {'name': 'flavor-name', 'id': _uuid})
+        output = self.getflavor_obj.run(self.result)
+        self.assertDictEqual({'get_flavor': 1}, self.result)
+        self.assertDictEqual({'flavor': {'name': 'flavor-name', 'id': _uuid}},
+                             output)
+        self.mock_log.info.asset_called_once_with('Get flavor successful!')
+
+    def test_run_fail(self):
+        self.mock_get_flavor.return_value = None
+        with self.assertRaises(exceptions.ScenarioGetFlavorError):
+            self.getflavor_obj.run(self.result)
+        self.assertDictEqual({'get_flavor': 0}, self.result)
+        self.mock_log.error.assert_called_once_with('Get flavor failed!')
index 83ec903..5b5329c 100644 (file)
@@ -6,37 +6,52 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
+from oslo_utils import uuidutils
 import unittest
 import mock
 
-from yardstick.benchmark.scenarios.lib.get_server import GetServer
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import get_server
 
 
 class GetServerTestCase(unittest.TestCase):
 
-    @mock.patch('yardstick.common.openstack_utils.get_server_by_name')
-    @mock.patch('yardstick.common.openstack_utils.get_nova_client')
-    def test_get_server_with_name(self, mock_get_nova_client, mock_get_server_by_name):
-        scenario_cfg = {
-            'options': {
-                'server_name': 'yardstick_server'
-            },
-            'output': 'status server'
-        }
-        obj = GetServer(scenario_cfg, {})
-        obj.run({})
-        mock_get_nova_client.assert_called_once()
-        mock_get_server_by_name.assert_called_once()
-
-    @mock.patch('yardstick.common.openstack_utils.get_nova_client')
-    def test_get_server_with_id(self, mock_get_nova_client):
-        scenario_cfg = {
-            'options': {
-                'server_id': '1'
-            },
-            'output': 'status server'
-        }
-        mock_get_nova_client().servers.get.return_value = None
-        obj = GetServer(scenario_cfg, {})
-        obj.run({})
-        mock_get_nova_client.assert_called()
+    def setUp(self):
+
+        self._mock_get_server = mock.patch.object(
+            openstack_utils, 'get_server')
+        self.mock_get_server = self._mock_get_server.start()
+        self._mock_get_shade_client = mock.patch.object(
+            openstack_utils, 'get_shade_client')
+        self.mock_get_shade_client = self._mock_get_shade_client.start()
+        self._mock_log = mock.patch.object(get_server, 'LOG')
+        self.mock_log = self._mock_log.start()
+        self.args = {'options': {'name_or_id': 'yardstick_key'}}
+        self.result = {}
+
+        self.getserver_obj = get_server.GetServer(self.args, mock.ANY)
+        self.addCleanup(self._stop_mock)
+
+    def _stop_mock(self):
+        self._mock_get_server.stop()
+        self._mock_get_shade_client.stop()
+        self._mock_log.stop()
+
+    def test_run(self):
+        _uuid = uuidutils.generate_uuid()
+        self.getserver_obj.scenario_cfg = {'output': 'server'}
+        self.mock_get_server.return_value = (
+            {'name': 'server-name', 'id': _uuid})
+        output = self.getserver_obj.run(self.result)
+        self.assertDictEqual({'get_server': 1}, self.result)
+        self.assertDictEqual({'server': {'name': 'server-name', 'id': _uuid}},
+                             output)
+        self.mock_log.info.asset_called_once_with('Get Server successful!')
+
+    def test_run_fail(self):
+        self.mock_get_server.return_value = None
+        with self.assertRaises(exceptions.ScenarioGetServerError):
+            self.getserver_obj.run(self.result)
+        self.assertDictEqual({'get_server': 0}, self.result)
+        self.mock_log.error.assert_called_once_with('Get Server failed!')
index 9853385..284a71c 100644 (file)
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import time
+
+import mock
+
 from yardstick.benchmark.scenarios import base
 from yardstick.tests.unit import base as ut_base
 
 
+class _TestScenario(base.Scenario):
+    __scenario_type__ = 'Test Scenario'
+
+    def run(self):
+        pass
+
+
 class ScenarioTestCase(ut_base.BaseUnitTestCase):
 
     def test_get_scenario_type(self):
@@ -85,6 +96,25 @@ class ScenarioTestCase(ut_base.BaseUnitTestCase):
         self.assertEqual('No such scenario type %s' % wrong_scenario_name,
                          str(exc.exception))
 
+    def test_scenario_abstract_class(self):
+        # pylint: disable=abstract-class-instantiated
+        with self.assertRaises(TypeError):
+            base.Scenario()
+
+    @mock.patch.object(time, 'sleep')
+    def test_pre_run_wait_time(self, mock_sleep):
+        """Ensure default behaviour (backwards compatibility): no wait time"""
+        test_scenario = _TestScenario()
+        test_scenario.pre_run_wait_time(mock.ANY)
+        mock_sleep.assert_not_called()
+
+    @mock.patch.object(time, 'sleep')
+    def test_post_run_wait_time(self, mock_sleep):
+        """Ensure default behaviour (backwards compatibility): wait time"""
+        test_scenario = _TestScenario()
+        test_scenario.post_run_wait_time(100)
+        mock_sleep.assert_called_once_with(100)
+
 
 class IterScenarioClassesTestCase(ut_base.BaseUnitTestCase):
 
diff --git a/yardstick/tests/unit/common/messaging/__init__.py b/yardstick/tests/unit/common/messaging/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/yardstick/tests/unit/common/messaging/test_consumer.py b/yardstick/tests/unit/common/messaging/test_consumer.py
new file mode 100644 (file)
index 0000000..612dcae
--- /dev/null
@@ -0,0 +1,54 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+from oslo_config import cfg
+import oslo_messaging
+
+from yardstick.common import messaging
+from yardstick.common.messaging import consumer
+from yardstick.tests.unit import base as ut_base
+
+
+class TestEndPoint(object):
+    def action_1(self):
+        pass
+
+
+class _MessagingConsumer(consumer.MessagingConsumer):
+    pass
+
+
+class MessagingConsumerTestCase(ut_base.BaseUnitTestCase):
+
+    def test__init(self):
+        with mock.patch.object(oslo_messaging, 'get_rpc_server') as \
+                mock_get_rpc_server, \
+                mock.patch.object(oslo_messaging, 'get_rpc_transport') as \
+                mock_get_rpc_transport, \
+                mock.patch.object(oslo_messaging, 'Target') as \
+                mock_Target:
+            mock_get_rpc_transport.return_value = 'test_rpc_transport'
+            mock_Target.return_value = 'test_Target'
+
+            _MessagingConsumer('test_topic', 'test_pid', [TestEndPoint],
+                               fanout=True)
+            mock_get_rpc_transport.assert_called_once_with(
+                cfg.CONF, url=messaging.TRANSPORT_URL)
+            mock_Target.assert_called_once_with(
+                topic='test_topic', fanout=True, server=messaging.SERVER)
+            mock_get_rpc_server.assert_called_once_with(
+                'test_rpc_transport', 'test_Target', [TestEndPoint],
+                executor=messaging.RPC_SERVER_EXECUTOR,
+                access_policy=oslo_messaging.DefaultRPCAccessPolicy)
diff --git a/yardstick/tests/unit/common/messaging/test_payloads.py b/yardstick/tests/unit/common/messaging/test_payloads.py
new file mode 100644 (file)
index 0000000..00ec220
--- /dev/null
@@ -0,0 +1,46 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from yardstick.common import exceptions
+from yardstick.common.messaging import payloads
+from yardstick.tests.unit import base as ut_base
+
+
+class _DummyPayload(payloads.Payload):
+    REQUIRED_FIELDS = {'version', 'key1', 'key2'}
+
+
+class PayloadTestCase(ut_base.BaseUnitTestCase):
+
+    def test__init(self):
+        payload = _DummyPayload(version=1, key1='value1', key2='value2')
+        self.assertEqual(1, payload.version)
+        self.assertEqual('value1', payload.key1)
+        self.assertEqual('value2', payload.key2)
+        self.assertEqual(3, len(payload._fields))
+
+    def test__init_missing_required_fields(self):
+        with self.assertRaises(exceptions.PayloadMissingAttributes):
+            _DummyPayload(key1='value1', key2='value2')
+
+    def test_obj_to_dict(self):
+        payload = _DummyPayload(version=1, key1='value1', key2='value2')
+        payload_dict = payload.obj_to_dict()
+        self.assertEqual({'version': 1, 'key1': 'value1', 'key2': 'value2'},
+                         payload_dict)
+
+    def test_dict_to_obj(self):
+        _dict = {'version': 2, 'key1': 'value100', 'key2': 'value200'}
+        payload = _DummyPayload.dict_to_obj(_dict)
+        self.assertEqual(set(_dict.keys()), payload._fields)
diff --git a/yardstick/tests/unit/common/messaging/test_producer.py b/yardstick/tests/unit/common/messaging/test_producer.py
new file mode 100644 (file)
index 0000000..0289689
--- /dev/null
@@ -0,0 +1,46 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+from oslo_config import cfg
+import oslo_messaging
+
+from yardstick.common import messaging
+from yardstick.common.messaging import producer
+from yardstick.tests.unit import base as ut_base
+
+
+class _MessagingProducer(producer.MessagingProducer):
+    pass
+
+
+class MessagingProducerTestCase(ut_base.BaseUnitTestCase):
+
+    def test__init(self):
+        with mock.patch.object(oslo_messaging, 'RPCClient') as \
+                mock_RPCClient, \
+                mock.patch.object(oslo_messaging, 'get_rpc_transport') as \
+                mock_get_rpc_transport, \
+                mock.patch.object(oslo_messaging, 'Target') as \
+                mock_Target:
+            mock_get_rpc_transport.return_value = 'test_rpc_transport'
+            mock_Target.return_value = 'test_Target'
+
+            _MessagingProducer('test_topic', 'test_pid', fanout=True)
+            mock_get_rpc_transport.assert_called_once_with(
+                cfg.CONF, url=messaging.TRANSPORT_URL)
+            mock_Target.assert_called_once_with(
+                topic='test_topic', fanout=True, server=messaging.SERVER)
+            mock_RPCClient.assert_called_once_with('test_rpc_transport',
+                                                   'test_Target')
index e39a13f..67ca826 100644 (file)
 from oslo_utils import uuidutils
 import unittest
 import mock
-
+import shade
 from shade import exc
+
+from yardstick.common import constants
 from yardstick.common import openstack_utils
 
 
@@ -35,44 +37,44 @@ class GetHeatApiVersionTestCase(unittest.TestCase):
             self.assertEqual(api_version, expected_result)
 
 
-class GetNetworkIdTestCase(unittest.TestCase):
-
-    def test_get_network_id(self):
-        _uuid = uuidutils.generate_uuid()
-        mock_shade_client = mock.Mock()
-        mock_shade_client.list_networks = mock.Mock()
-        mock_shade_client.list_networks.return_value = [{'id': _uuid}]
+class GetShadeClientTestCase(unittest.TestCase):
 
-        output = openstack_utils.get_network_id(mock_shade_client,
-                                                'network_name')
-        self.assertEqual(_uuid, output)
+    @mock.patch.object(shade, 'openstack_cloud', return_value='os_client')
+    def test_get_shade_client(self, mock_openstack_cloud):
+        os_cloud_config = {'param1': True, 'param2': 'value2'}
+        self.assertEqual('os_client',
+                         openstack_utils.get_shade_client(**os_cloud_config))
+        os_cloud_config.update(constants.OS_CLOUD_DEFAULT_CONFIG)
+        mock_openstack_cloud.assert_called_once_with(**os_cloud_config)
 
-    def test_get_network_id_no_network(self):
-        mock_shade_client = mock.Mock()
-        mock_shade_client.list_networks = mock.Mock()
-        mock_shade_client.list_networks.return_value = None
+        mock_openstack_cloud.reset_mock()
+        os_cloud_config = {'verify': True, 'param2': 'value2'}
+        self.assertEqual('os_client',
+                         openstack_utils.get_shade_client(**os_cloud_config))
+        mock_openstack_cloud.assert_called_once_with(**os_cloud_config)
 
-        output = openstack_utils.get_network_id(mock_shade_client,
-                                                'network_name')
-        self.assertIsNone(output)
+    @mock.patch.object(shade, 'openstack_cloud', return_value='os_client')
+    def test_get_shade_client_no_parameters(self, mock_openstack_cloud):
+        self.assertEqual('os_client', openstack_utils.get_shade_client())
+        mock_openstack_cloud.assert_called_once_with(
+            **constants.OS_CLOUD_DEFAULT_CONFIG)
 
 
 class DeleteNeutronNetTestCase(unittest.TestCase):
 
     def setUp(self):
         self.mock_shade_client = mock.Mock()
-        self.mock_shade_client.delete_network = mock.Mock()
 
     def test_delete_neutron_net(self):
         self.mock_shade_client.delete_network.return_value = True
         output = openstack_utils.delete_neutron_net(self.mock_shade_client,
-                                                    'network_id')
+                                                    'network_name_or_id')
         self.assertTrue(output)
 
     def test_delete_neutron_net_fail(self):
         self.mock_shade_client.delete_network.return_value = False
         output = openstack_utils.delete_neutron_net(self.mock_shade_client,
-                                                    'network_id')
+                                                    'network_name_or_id')
         self.assertFalse(output)
 
     @mock.patch.object(openstack_utils, 'log')
@@ -80,7 +82,7 @@ class DeleteNeutronNetTestCase(unittest.TestCase):
         self.mock_shade_client.delete_network.side_effect = (
             exc.OpenStackCloudException('error message'))
         output = openstack_utils.delete_neutron_net(self.mock_shade_client,
-                                                    'network_id')
+                                                    'network_name_or_id')
         self.assertFalse(output)
         mock_logger.error.assert_called_once()
 
@@ -185,3 +187,352 @@ class CreateNeutronRouterTestCase(unittest.TestCase):
             self.mock_shade_client)
         mock_logger.error.assert_called_once()
         self.assertIsNone(output)
+
+
+class RemoveRouterInterfaceTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.mock_shade_client = mock.Mock()
+        self.router = 'router'
+        self.mock_shade_client.remove_router_interface = mock.Mock()
+
+    def test_remove_router_interface(self):
+        self.mock_shade_client.remove_router_interface.return_value = True
+        output = openstack_utils.remove_router_interface(
+            self.mock_shade_client, self.router)
+        self.assertTrue(output)
+
+    @mock.patch.object(openstack_utils, 'log')
+    def test_remove_router_interface_exception(self, mock_logger):
+        self.mock_shade_client.remove_router_interface.side_effect = (
+            exc.OpenStackCloudException('error message'))
+        output = openstack_utils.remove_router_interface(
+            self.mock_shade_client, self.router)
+        mock_logger.error.assert_called_once()
+        self.assertFalse(output)
+
+
+class CreateFloatingIpTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.mock_shade_client = mock.Mock()
+        self.network_name_or_id = 'name'
+        self.mock_shade_client.create_floating_ip = mock.Mock()
+
+    def test_create_floating_ip(self):
+        self.mock_shade_client.create_floating_ip.return_value = \
+            {'floating_ip_address': 'value1', 'id': 'value2'}
+        output = openstack_utils.create_floating_ip(self.mock_shade_client,
+                                                    self.network_name_or_id)
+        self.assertEqual({'fip_addr': 'value1', 'fip_id': 'value2'}, output)
+
+    @mock.patch.object(openstack_utils, 'log')
+    def test_create_floating_ip_exception(self, mock_logger):
+        self.mock_shade_client.create_floating_ip.side_effect = (
+            exc.OpenStackCloudException('error message'))
+        output = openstack_utils.create_floating_ip(
+            self.mock_shade_client, self.network_name_or_id)
+        mock_logger.error.assert_called_once()
+        self.assertIsNone(output)
+
+
+class DeleteFloatingIpTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.mock_shade_client = mock.Mock()
+        self.floating_ip_id = 'floating_ip_id'
+        self.mock_shade_client.delete_floating_ip = mock.Mock()
+
+    def test_delete_floating_ip(self):
+        self.mock_shade_client.delete_floating_ip.return_value = True
+        output = openstack_utils.delete_floating_ip(self.mock_shade_client,
+                                                    'floating_ip_id')
+        self.assertTrue(output)
+
+    def test_delete_floating_ip_fail(self):
+        self.mock_shade_client.delete_floating_ip.return_value = False
+        output = openstack_utils.delete_floating_ip(self.mock_shade_client,
+                                                    'floating_ip_id')
+        self.assertFalse(output)
+
+    @mock.patch.object(openstack_utils, 'log')
+    def test_delete_floating_ip_exception(self, mock_logger):
+        self.mock_shade_client.delete_floating_ip.side_effect = (
+            exc.OpenStackCloudException('error message'))
+        output = openstack_utils.delete_floating_ip(self.mock_shade_client,
+                                                    'floating_ip_id')
+        mock_logger.error.assert_called_once()
+        self.assertFalse(output)
+
+
+class CreateSecurityGroupRuleTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.mock_shade_client = mock.Mock()
+        self.secgroup_name_or_id = 'sg_name_id'
+        self.mock_shade_client.create_security_group_rule = mock.Mock()
+
+    def test_create_security_group_rule(self):
+        self.mock_shade_client.create_security_group_rule.return_value = (
+            {'security_group_rule'})
+        output = openstack_utils.create_security_group_rule(
+            self.mock_shade_client, self.secgroup_name_or_id)
+        self.assertTrue(output)
+
+    @mock.patch.object(openstack_utils, 'log')
+    def test_create_security_group_rule_exception(self, mock_logger):
+        self.mock_shade_client.create_security_group_rule.side_effect = (
+            exc.OpenStackCloudException('error message'))
+
+        output = openstack_utils.create_security_group_rule(
+            self.mock_shade_client, self.secgroup_name_or_id)
+        mock_logger.error.assert_called_once()
+        self.assertFalse(output)
+
+
+class ListImageTestCase(unittest.TestCase):
+
+    def test_list_images(self):
+        mock_shade_client = mock.MagicMock()
+        mock_shade_client.list_images.return_value = []
+        openstack_utils.list_images(mock_shade_client)
+
+    @mock.patch.object(openstack_utils, 'log')
+    def test_list_images_exception(self, mock_logger):
+        mock_shade_client = mock.MagicMock()
+        mock_shade_client.list_images = mock.MagicMock()
+        mock_shade_client.list_images.side_effect = (
+            exc.OpenStackCloudException('error message'))
+        images = openstack_utils.list_images(mock_shade_client)
+        mock_logger.error.assert_called_once()
+        self.assertFalse(images)
+
+
+class SecurityGroupTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.mock_shade_client = mock.Mock()
+        self.sg_name = 'sg_name'
+        self.sg_description = 'sg_description'
+        self._uuid = uuidutils.generate_uuid()
+
+    def test_create_security_group_full_existing_security_group(self):
+        self.mock_shade_client.get_security_group.return_value = (
+            {'name': 'name', 'id': self._uuid})
+        output = openstack_utils.create_security_group_full(
+            self.mock_shade_client, self.sg_name, self.sg_description)
+        self.mock_shade_client.get_security_group.assert_called_once()
+        self.assertEqual(self._uuid, output)
+
+    @mock.patch.object(openstack_utils, 'log')
+    def test_create_security_group_full_non_existing_security_group(
+            self, mock_logger):
+        self.mock_shade_client.get_security_group.return_value = None
+        self.mock_shade_client.create_security_group.side_effect = (
+            exc.OpenStackCloudException('error message'))
+        output = openstack_utils.create_security_group_full(
+            self.mock_shade_client, self.sg_name, self.sg_description)
+        mock_logger.error.assert_called_once()
+        self.assertIsNone(output)
+
+    @mock.patch.object(openstack_utils, 'create_security_group_rule')
+    @mock.patch.object(openstack_utils, 'log')
+    def test_create_security_group_full_create_rule_fail(
+            self, mock_logger, mock_create_security_group_rule):
+        self.mock_shade_client.get_security_group.return_value = None
+        self.mock_shade_client.create_security_group.return_value = (
+            {'name': 'name', 'id': self._uuid})
+        mock_create_security_group_rule.return_value = False
+        output = openstack_utils.create_security_group_full(
+            self.mock_shade_client, self.sg_name, self.sg_description)
+        mock_create_security_group_rule.assert_called()
+        self.mock_shade_client.delete_security_group(self.sg_name)
+        mock_logger.error.assert_called_once()
+        self.assertIsNone(output)
+
+    @mock.patch.object(openstack_utils, 'create_security_group_rule')
+    def test_create_security_group_full(
+            self, mock_create_security_group_rule):
+        self.mock_shade_client.get_security_group.return_value = None
+        self.mock_shade_client.create_security_group.return_value = (
+            {'name': 'name', 'id': self._uuid})
+        mock_create_security_group_rule.return_value = True
+        output = openstack_utils.create_security_group_full(
+            self.mock_shade_client, self.sg_name, self.sg_description)
+        mock_create_security_group_rule.assert_called()
+        self.mock_shade_client.delete_security_group(self.sg_name)
+        self.assertEqual(self._uuid, output)
+
+# *********************************************
+#   NOVA
+# *********************************************
+
+
+class CreateInstanceTestCase(unittest.TestCase):
+
+    def test_create_instance_and_wait_for_active(self):
+        self.mock_shade_client = mock.Mock()
+        name = 'server_name'
+        image = 'image_name'
+        flavor = 'flavor_name'
+        self.mock_shade_client.create_server.return_value = (
+            {'name': name, 'image': image, 'flavor': flavor})
+        output = openstack_utils.create_instance_and_wait_for_active(
+            self.mock_shade_client, name, image, flavor)
+        self.assertEqual(
+            {'name': name, 'image': image, 'flavor': flavor}, output)
+
+    @mock.patch.object(openstack_utils, 'log')
+    def test_create_instance_and_wait_for_active_fail(self, mock_logger):
+        self.mock_shade_client = mock.Mock()
+        self.mock_shade_client.create_server.side_effect = (
+            exc.OpenStackCloudException('error message'))
+        output = openstack_utils.create_instance_and_wait_for_active(
+            self.mock_shade_client, 'server_name', 'image_name', 'flavor_name')
+        mock_logger.error.assert_called_once()
+        self.assertIsNone(output)
+
+
+class DeleteInstanceTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.mock_shade_client = mock.Mock()
+
+    def test_delete_instance(self):
+        self.mock_shade_client.delete_server.return_value = True
+        output = openstack_utils.delete_instance(self.mock_shade_client,
+                                                 'instance_name_id')
+        self.assertTrue(output)
+
+    def test_delete_instance_fail(self):
+        self.mock_shade_client.delete_server.return_value = False
+        output = openstack_utils.delete_instance(self.mock_shade_client,
+                                                 'instance_name_id')
+        self.assertFalse(output)
+
+    @mock.patch.object(openstack_utils, 'log')
+    def test_delete_instance_exception(self, mock_logger):
+        self.mock_shade_client.delete_server.side_effect = (
+            exc.OpenStackCloudException('error message'))
+        output = openstack_utils.delete_instance(self.mock_shade_client,
+                                                 'instance_name_id')
+        mock_logger.error.assert_called_once()
+        self.assertFalse(output)
+
+
+class CreateKeypairTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.mock_shade_client = mock.Mock()
+        self.name = 'key_name'
+
+    def test_create_keypair(self):
+        self.mock_shade_client.create_keypair.return_value = (
+            {'name': 'key-name', 'type': 'ssh'})
+        output = openstack_utils.create_keypair(
+            self.mock_shade_client, self.name)
+        self.assertEqual(
+            {'name': 'key-name', 'type': 'ssh'},
+            output)
+
+    @mock.patch.object(openstack_utils, 'log')
+    def test_create_keypair_fail(self, mock_logger):
+        self.mock_shade_client.create_keypair.side_effect = (
+            exc.OpenStackCloudException('error message'))
+        output = openstack_utils.create_keypair(
+            self.mock_shade_client, self.name)
+        mock_logger.error.assert_called_once()
+        self.assertIsNone(output)
+
+
+class DeleteKeypairTestCase(unittest.TestCase):
+
+    def setUp(self):
+        self.mock_shade_client = mock.Mock()
+
+    def test_delete_keypair(self):
+        self.mock_shade_client.delete_keypair.return_value = True
+        output = openstack_utils.delete_keypair(self.mock_shade_client,
+                                                'key_name')
+        self.assertTrue(output)
+
+    def test_delete_keypair_fail(self):
+        self.mock_shade_client.delete_keypair.return_value = False
+        output = openstack_utils.delete_keypair(self.mock_shade_client,
+                                                'key_name')
+        self.assertFalse(output)
+
+    @mock.patch.object(openstack_utils, 'log')
+    def test_delete_keypair_exception(self, mock_logger):
+        self.mock_shade_client.delete_keypair.side_effect = (
+            exc.OpenStackCloudException('error message'))
+        output = openstack_utils.delete_keypair(self.mock_shade_client,
+                                                'key_name')
+        mock_logger.error.assert_called_once()
+        self.assertFalse(output)
+
+
+class AttachVolumeToServerTestCase(unittest.TestCase):
+
+    def test_attach_volume_to_server(self):
+        self.mock_shade_client = mock.Mock()
+        self.mock_shade_client.get_server.return_value = {'server_dict'}
+        self.mock_shade_client.get_volume.return_value = {'volume_dict'}
+        self.mock_shade_client.attach_volume.return_value = True
+        output = openstack_utils.attach_volume_to_server(
+            self.mock_shade_client, 'server_name_or_id', 'volume_name_or_id')
+        self.assertTrue(output)
+
+    @mock.patch.object(openstack_utils, 'log')
+    def test_attach_volume_to_server_fail(self, mock_logger):
+        self.mock_shade_client = mock.Mock()
+        self.mock_shade_client.attach_volume.side_effect = (
+            exc.OpenStackCloudException('error message'))
+        output = openstack_utils.attach_volume_to_server(
+            self.mock_shade_client, 'server_name_or_id', 'volume_name_or_id')
+        mock_logger.error.assert_called_once()
+        self.assertFalse(output)
+
+
+class GetServerTestCase(unittest.TestCase):
+
+    def test_get_server(self):
+        self.mock_shade_client = mock.Mock()
+        _uuid = uuidutils.generate_uuid()
+        self.mock_shade_client.get_server.return_value = {
+            'name': 'server_name', 'id': _uuid}
+        output = openstack_utils.get_server(self.mock_shade_client,
+                                            'server_name_or_id')
+        self.assertEqual({'name': 'server_name', 'id': _uuid}, output)
+
+    @mock.patch.object(openstack_utils, 'log')
+    def test_get_server_exception(self, mock_logger):
+        self.mock_shade_client = mock.Mock()
+        self.mock_shade_client.get_server.side_effect = (
+            exc.OpenStackCloudException('error message'))
+        output = openstack_utils.get_server(self.mock_shade_client,
+                                            'server_name_or_id')
+        mock_logger.error.assert_called_once()
+        self.assertIsNone(output)
+
+
+class GetFlavorTestCase(unittest.TestCase):
+
+    def test_get_flavor(self):
+        self.mock_shade_client = mock.Mock()
+        _uuid = uuidutils.generate_uuid()
+        self.mock_shade_client.get_flavor.return_value = {
+            'name': 'flavor_name', 'id': _uuid}
+        output = openstack_utils.get_flavor(self.mock_shade_client,
+                                            'flavor_name_or_id')
+        self.assertEqual({'name': 'flavor_name', 'id': _uuid}, output)
+
+    @mock.patch.object(openstack_utils, 'log')
+    def test_get_flavor_exception(self, mock_logger):
+        self.mock_shade_client = mock.Mock()
+        self.mock_shade_client.get_flavor.side_effect = (
+            exc.OpenStackCloudException('error message'))
+        output = openstack_utils.get_flavor(self.mock_shade_client,
+                                            'flavor_name_or_id')
+        mock_logger.error.assert_called_once()
+        self.assertIsNone(output)
index 9540a39..666b29b 100644 (file)
@@ -16,13 +16,15 @@ import mock
 import os
 import six
 from six.moves import configparser
+import time
 import unittest
 
 import yardstick
 from yardstick import ssh
 import yardstick.error
-from yardstick.common import utils
 from yardstick.common import constants
+from yardstick.common import utils
+from yardstick.common import exceptions
 
 
 class IterSubclassesTestCase(unittest.TestCase):
@@ -1158,3 +1160,43 @@ class ReadMeminfoTestCase(unittest.TestCase):
             output = utils.read_meminfo(ssh_client)
             mock_get_client.assert_called_once_with('/proc/meminfo', mock.ANY)
         self.assertEqual(self.MEMINFO_DICT, output)
+
+
+class TimerTestCase(unittest.TestCase):
+
+    def test__getattr(self):
+        with utils.Timer() as timer:
+            time.sleep(1)
+        self.assertEqual(1, round(timer.total_seconds(), 0))
+        self.assertEqual(1, timer.delta.seconds)
+
+    def test__enter_with_timeout(self):
+        with utils.Timer(timeout=10) as timer:
+            time.sleep(1)
+        self.assertEqual(1, round(timer.total_seconds(), 0))
+
+    def test__enter_with_timeout_exception(self):
+        with self.assertRaises(exceptions.TimerTimeout):
+            with utils.Timer(timeout=1):
+                time.sleep(2)
+
+
+class WaitUntilTrueTestCase(unittest.TestCase):
+
+    def test_no_timeout(self):
+        self.assertIsNone(utils.wait_until_true(lambda: True,
+                                                timeout=1, sleep=1))
+
+    def test_timeout_generic_exception(self):
+        with self.assertRaises(exceptions.WaitTimeout):
+            self.assertIsNone(utils.wait_until_true(lambda: False,
+                                                    timeout=1, sleep=1))
+
+    def test_timeout_given_exception(self):
+        class MyTimeoutException(exceptions.YardstickException):
+            message = 'My timeout exception'
+
+        with self.assertRaises(MyTimeoutException):
+            self.assertIsNone(
+                utils.wait_until_true(lambda: False, timeout=1, sleep=1,
+                                      exception=MyTimeoutException))
diff --git a/yardstick/tests/unit/network_services/__init__.py b/yardstick/tests/unit/network_services/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/yardstick/tests/unit/network_services/collector/__init__.py b/yardstick/tests/unit/network_services/collector/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
@@ -13,9 +13,6 @@
 # limitations under the License.
 #
 
-# Unittest for yardstick.network_services.collector.publisher
-
-from __future__ import absolute_import
 import unittest
 
 from yardstick.network_services.collector import publisher
@@ -13,9 +13,6 @@
 # limitations under the License.
 #
 
-# Unittest for yardstick.network_services.collector.subscriber
-
-from __future__ import absolute_import
 import unittest
 import mock
 
@@ -81,7 +78,8 @@ class CollectorTestCase(unittest.TestCase):
         pass
 
     def test_start(self, *_):
-        self.assertIsNone(self.collector.start())
+        with self.assertRaises(Exception):
+            self.collector.start()
 
     def test_stop(self, *_):
         self.assertIsNone(self.collector.stop())
diff --git a/yardstick/tests/unit/network_services/libs/__init__.py b/yardstick/tests/unit/network_services/libs/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/yardstick/tests/unit/network_services/libs/ixia_libs/__init__.py b/yardstick/tests/unit/network_services/libs/ixia_libs/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
 # limitations under the License.
 #
 
-# Unittest for yardstick.network_services.libs.ixia_libs.IxNet
-
-from __future__ import absolute_import
-import unittest
 import mock
+import IxNetwork
+import unittest
 
 from yardstick.network_services.libs.ixia_libs.IxNet.IxNet import IxNextgen
 from yardstick.network_services.libs.ixia_libs.IxNet.IxNet import IP_VERSION_4
 from yardstick.network_services.libs.ixia_libs.IxNet.IxNet import IP_VERSION_6
 
-
 UPLINK = "uplink"
 DOWNLINK = "downlink"
 
+
 class TestIxNextgen(unittest.TestCase):
 
     def test___init__(self):
         ixnet_gen = IxNextgen()
         self.assertIsNone(ixnet_gen._bidir)
 
-    @mock.patch("yardstick.network_services.libs.ixia_libs.IxNet.IxNet.sys")
-    def test_connect(self, *args):
-
+    @mock.patch.object(IxNetwork, 'IxNet')
+    def test_connect(self, mock_ixnet):
+        ixnet_instance = mock.Mock()
+        mock_ixnet.return_value = ixnet_instance
         ixnet_gen = IxNextgen()
-        ixnet_gen.get_config = mock.MagicMock()
-        ixnet_gen.get_ixnet = mock.MagicMock()
+        with mock.patch.object(ixnet_gen, 'get_config') as mock_config:
+            mock_config.return_value = {'machine': 'machine_fake',
+                                        'port': 'port_fake',
+                                        'version': 12345}
+            ixnet_gen._connect(mock.ANY)
 
-        self.assertRaises(ImportError, ixnet_gen._connect, {"py_lib_path": "/tmp"})
+        ixnet_instance.connect.assert_called_once_with(
+            'machine_fake', '-port', 'port_fake', '-version', '12345')
+        mock_config.assert_called_once()
 
     def test_clear_ixia_config(self):
         ixnet = mock.MagicMock()
@@ -628,11 +632,9 @@ class TestIxNextgen(unittest.TestCase):
     def test_set_random_ip_multi_attributes_bad_ip_version(self):
         bad_ip_version = object()
         ixnet_gen = IxNextgen(mock.Mock())
-        mock1 = mock.Mock()
-        mock2 = mock.Mock()
-        mock3 = mock.Mock()
         with self.assertRaises(ValueError):
-            ixnet_gen.set_random_ip_multi_attributes(mock1, bad_ip_version, mock2, mock3)
+            ixnet_gen.set_random_ip_multi_attributes(
+                mock.Mock(), bad_ip_version, mock.Mock(), mock.Mock())
 
     def test_get_config(self):
         tg_cfg = {
@@ -659,13 +661,11 @@ class TestIxNextgen(unittest.TestCase):
                     "version": "test3",
                     "ixchassis": "test4",
                     "tcl_port": "test5",
-                    "py_lib_path": "test6",
                 },
             }
         }
 
         expected = {
-            'py_lib_path': 'test6',
             'machine': 'test1',
             'port': 'test5',
             'chassis': 'test4',
@@ -13,8 +13,6 @@
 # limitations under the License.
 #
 
-# Unittest for yardstick.network_services.utils
-
 import os
 import unittest
 import mock
 # limitations under the License.
 #
 
-# Unittest for yardstick.network_services.utils
-
-from __future__ import absolute_import
-
-import unittest
 import mock
-
-import yaml
+import unittest
 
 from yardstick.network_services.yang_model import YangModel
 
@@ -95,9 +89,9 @@ class YangModelTestCase(unittest.TestCase):
         y._get_entries()
         self.assertEqual(y._rules, '')
 
-    @mock.patch('yardstick.network_services.yang_model.yaml_load')
     @mock.patch('yardstick.network_services.yang_model.open')
-    def test__read_config(self, mock_open, mock_safe_load):
+    @mock.patch('yardstick.network_services.yang_model.yaml_load')
+    def test__read_config(self, mock_safe_load, *args):
         cfg = "yang.yaml"
         y = YangModel(cfg)
         mock_safe_load.return_value = expected = {'key1': 'value1', 'key2': 'value2'}
index aae2487..3ec59a3 100644 (file)
@@ -17,6 +17,7 @@ import shade
 import unittest
 
 from yardstick.benchmark.contexts import node
+from yardstick.common import constants
 from yardstick.common import exceptions
 from yardstick.orchestrator import heat
 
@@ -53,6 +54,14 @@ class HeatStackTestCase(unittest.TestCase):
         self._mock_stack_get.stop()
         heat._DEPLOYED_STACKS = {}
 
+    @mock.patch.object(shade, 'openstack_cloud')
+    def test__init(self, mock_openstack_cloud):
+        os_cloud_config = {'key': 'value'}
+        heatstack = heat.HeatStack('name', os_cloud_config=os_cloud_config)
+        self.assertEqual('name', heatstack.name)
+        os_cloud_config.update(constants.OS_CLOUD_DEFAULT_CONFIG)
+        mock_openstack_cloud.assert_called_once_with(**os_cloud_config)
+
     def test_create(self):
         template = {'tkey': 'tval'}
         heat_parameters = {'pkey': 'pval'}
@@ -192,7 +201,9 @@ class HeatStackTestCase(unittest.TestCase):
 class HeatTemplateTestCase(unittest.TestCase):
 
     def setUp(self):
-        self.template = heat.HeatTemplate('test')
+        self._os_cloud_config = {'key1': 'value1'}
+        self.template = heat.HeatTemplate(
+            'test', os_cloud_config=self._os_cloud_config)
 
     def test_add_tenant_network(self):
         self.template.add_network('some-network')
@@ -337,8 +348,12 @@ class HeatTemplateTestCase(unittest.TestCase):
 
     def test_create_not_block(self):
         heat_stack = mock.Mock()
-        with mock.patch.object(heat, 'HeatStack', return_value=heat_stack):
+        with mock.patch.object(heat, 'HeatStack', return_value=heat_stack) \
+                as mock_heatstack:
             ret = self.template.create(block=False)
+
+        mock_heatstack.assert_called_once_with(
+            self.template.name, os_cloud_config=self.template._os_cloud_config)
         heat_stack.create.assert_called_once_with(
             self.template._template, self.template.heat_parameters, False,
             3600)
@@ -354,13 +369,30 @@ class HeatTemplateTestCase(unittest.TestCase):
             3600)
         self.assertEqual(heat_stack, ret)
 
-
     def test_create_block_status_no_complete(self):
         heat_stack = mock.Mock()
         heat_stack.status = 'other status'
+        heat_stack.get_failures.return_value = []
         with mock.patch.object(heat, 'HeatStack', return_value=heat_stack):
             self.assertRaises(exceptions.HeatTemplateError,
                               self.template.create, block=True)
         heat_stack.create.assert_called_once_with(
             self.template._template, self.template.heat_parameters, True,
             3600)
+
+    def test_create_block_status_no_complete_with_reasons(self):
+        heat_stack = mock.Mock()
+        heat_stack.status = 'other status'
+        heat_stack.get_failures.return_value = [
+            mock.Mock(resource_status_reason="A reason"),
+            mock.Mock(resource_status_reason="Something else")
+        ]
+        with mock.patch.object(heat, 'HeatStack', return_value=heat_stack):
+            with mock.patch.object(heat, 'log') as mock_log:
+                self.assertRaises(exceptions.HeatTemplateError,
+                                  self.template.create, block=True)
+                mock_log.error.assert_any_call("%s", "A reason")
+                mock_log.error.assert_any_call("%s", "Something else")
+        heat_stack.create.assert_called_once_with(
+            self.template._template, self.template.heat_parameters, True,
+            3600)