Merge "cleanup: rm node_ID from yardstick prepare_env file"
authorRoss Brattain <ross.b.brattain@intel.com>
Mon, 12 Feb 2018 03:05:09 +0000 (03:05 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Mon, 12 Feb 2018 03:05:09 +0000 (03:05 +0000)
39 files changed:
INFO.yaml [new file with mode: 0644]
ansible/infra_deploy.yml
ansible/nsb_setup.yml
ansible/roles/create_samplevnfs_image/tasks/main.yml
ansible/roles/infra_destroy_previous_configuration/tasks/delete_network.yml [new file with mode: 0644]
ansible/roles/infra_destroy_previous_configuration/tasks/main.yml [new file with mode: 0644]
ansible/roles/install_dpdk/tasks/main.yml
ansible/roles/install_trex/tasks/main.yml
docs/testing/user/userguide/15-list-of-tcs.rst
docs/testing/user/userguide/opnfv_yardstick_tc056.rst
etc/infra/infra_deploy.yaml.sample
requirements.txt
samples/vnf_samples/nsut/prox/prox-tg-topology-1.yaml
samples/vnf_samples/nsut/prox/prox-tg-topology-2.yaml
samples/vnf_samples/nsut/prox/prox-tg-topology-4.yaml
samples/vnf_samples/vnf_descriptors/prox_vnf-2.yaml [deleted file]
samples/vnf_samples/vnf_descriptors/prox_vnf-4.yaml [deleted file]
samples/vnf_samples/vnf_descriptors/prox_vnf.yaml [moved from samples/vnf_samples/vnf_descriptors/prox_vnf-1.yaml with 100% similarity]
samples/vnf_samples/vnf_descriptors/tg_prox_tpl-1.yaml [deleted file]
samples/vnf_samples/vnf_descriptors/tg_prox_tpl-4.yaml [deleted file]
samples/vnf_samples/vnf_descriptors/tg_prox_tpl.yaml [moved from samples/vnf_samples/vnf_descriptors/tg_prox_tpl-2.yaml with 100% similarity]
setup.py
test-requirements.txt
tests/unit/benchmark/scenarios/test_base.py
tests/unit/network_services/helpers/test_dpdkbindnic_helper.py
tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py
tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py
tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py
tools/virt_ci_rampup.sh
yardstick/benchmark/contexts/standalone/model.py
yardstick/benchmark/contexts/standalone/ovs_dpdk.py
yardstick/benchmark/contexts/standalone/sriov.py
yardstick/benchmark/scenarios/base.py
yardstick/common/exceptions.py
yardstick/network_services/helpers/dpdkbindnic_helper.py
yardstick/orchestrator/heat.py
yardstick/resources/scripts/install/ovs_deploy.bash
yardstick/tests/unit/orchestrator/test_heat.py

diff --git a/INFO.yaml b/INFO.yaml
new file mode 100644 (file)
index 0000000..f84f695
--- /dev/null
+++ b/INFO.yaml
@@ -0,0 +1,80 @@
+---
+project: 'Test framework for verifying infrastructure compliance (yardstick)'
+project_creation_date: 'April 28th, 2015'
+project_category: 'Integration & Testing'
+lifecycle_state: 'Incubation'
+project_lead: &opnfv_yardstick_ptl
+    name: 'Ross Brattain'
+    email: 'ross.b.brattain@intel.com'
+    id: 'rbbratta'
+    company: 'intel.com'
+    timezone: 'PST'
+primary_contact: *opnfv_yardstick_ptl
+issue_tracking:
+    type: 'jira'
+    url: 'https://jira.opnfv.org/projects/Yardstick'
+    key: 'Yardstick'
+mailing_list:
+    type: 'mailman2'
+    url: 'opnfv-tech-discuss@lists.opnfv.org'
+    tag: '[yardstick]'
+realtime_discussion:
+    type: irc
+    server: 'freenode.net'
+    channel: '#opnfv-yardstick'
+meetings:
+    - type: 'gotomeeting+irc'
+      agenda:  'https://wiki.opnfv.org/display/yardstick/Yardstick+Meetings'
+      url:  'https://global.gotomeeting.com/join/819733085'
+      server: 'freenode.net'
+      channel: '#opnfv-yardstick'
+      repeats: 'weekly'
+      time:  '08:30 UTC'
+repositories:
+    - 'yardstick'
+committers:
+    - <<: *opnfv_yardstick_ptl
+    - name: 'Jörgen Karlsson'
+      email: 'jorgen.w.karlsson@ericsson.com'
+      company: 'ericsson.com'
+      id: 'jnon'
+    - name: 'Kubi'
+      email: 'jean.gaoliang@huawei.com'
+      company: 'huawei.com'
+      id: 'kubi'
+    - name: 'Rex Lee'
+      email: 'limingjiang@huawei.com'
+      company: 'huawei.com'
+      id: 'rexlee8776'
+    - name: 'Jing Lu'
+      email: 'lvjing5@huawei.com'
+      company: 'huawei.com'
+      id: 'JingLu5'
+    - name: 'zhihui wu'
+      email: 'wu.zhihui1@zte.com.cn'
+      company: 'zte.com.cn'
+      id: 'wu.zhihui'
+    - name: 'Trevor Cooper'
+      email: 'trevor.cooper@intel.com'
+      company: 'intel.com'
+      id: 'trev'
+    - name: 'Jack Chan'
+      email: 'chenjiankun1@huawei.com'
+      company: 'huawei.com'
+      id: 'chenjiankun'
+    - name: 'Emma Foley'
+      email: 'emma.l.foley@intel.com'
+      company: 'intel.com'
+      id: 'elfoley'
+    - name: 'Rodolfo Alonso Hernandez'
+      email: 'rodolfo.alonso.hernandez@intel.com'
+      company: 'intel.com'
+      id: 'rodolfo.ah'
+    - name: 'Kanglin Yin'
+      email: '14_ykl@tongji.edu.cn'
+      company: 'tongji.edu.cn'
+      id: 'tjuyinkanglin'
+tsc:
+    # yamllint disable rule:line-length
+    approval: 'http//meetbot.opnfv.org/meetings/'
+    # yamllint enable rule:line-length
index 10f53fb..948dd33 100644 (file)
@@ -16,3 +16,4 @@
 
   roles:
     - infra_check_requirements
+    - infra_destroy_previous_configuration
index bfe5d23..98a59f9 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 ---
-#- name: Prepare baremetal machine
-#  include: ubuntu_server_baremetal_deploy_samplevnfs.yml
-#  vars:
-#    YARD_IMG_ARCH: amd64
-#
-#- name: Install jumphost dependencies and configure docker
-#  hosts: jumphost
-#  environment:
-#    "{{ proxy_env }}"
-#  roles:
-#    - install_dependencies
-#    - docker
+- name: Prepare baremetal machine
+  include: ubuntu_server_baremetal_deploy_samplevnfs.yml
+  vars:
+    YARD_IMG_ARCH: amd64
+
+- name: Install jumphost dependencies and configure docker
+  hosts: jumphost
+  environment:
+    "{{ proxy_env }}"
+  roles:
+    - install_dependencies
+    - docker
 
 - name: "handle all openstack stuff when: openrc_file is defined"
   include: prepare_openstack.yml
index c83ccca..ab7371a 100644 (file)
@@ -19,6 +19,6 @@
     is_public: yes
     disk_format: qcow2
     container_format: bare
-    filename: "{{ raw_imgfile }}"
+    filename: "{{ imgfile }}"
     properties:
       hw_vif_multiqueue_enabled: true
diff --git a/ansible/roles/infra_destroy_previous_configuration/tasks/delete_network.yml b/ansible/roles/infra_destroy_previous_configuration/tasks/delete_network.yml
new file mode 100644 (file)
index 0000000..314ee30
--- /dev/null
@@ -0,0 +1,48 @@
+# Copyright (c) 2017-2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Destroy old networks created by virt
+  virt_net:
+    name: "{{ network_item.name }}"
+    command: destroy
+  when: network_item.name in virt_nets.list_nets
+
+# Ignoring erros as network can be created without being defined.
+# This can happen if a user manually creates a network using the virsh command.
+# If the network is not defined the undefine code will throw an error.
+- name: Undefine old networks defined by virt
+  virt_net:
+    name: "{{ network_item.name }}"
+    command: undefine
+  when: network_item.name in virt_nets.list_nets
+  ignore_errors: yes
+
+- name: Check if "ovs-vsctl" command is present
+  command: which ovs-vsctl
+  register: ovs_vsctl_present
+  ignore_errors: yes
+
+- name: Destroy OVS bridge if it exists
+  command: ovs-vsctl --if-exists -- del-br "{{ network_item.name }}"
+  when: ovs_vsctl_present.rc == 0
+
+- name: Check if linux bridge is present
+  stat: path="{{ '/sys/class/net/'+network_item.name+'/brif/' }}"
+  register: check_linux_bridge
+
+- name: Remove linux bridge if it exists
+  shell: |
+    ifconfig "{{ network_item.name }}" down
+    brctl delbr "{{ network_item.name }}"
+  when: check_linux_bridge.stat.exists
diff --git a/ansible/roles/infra_destroy_previous_configuration/tasks/main.yml b/ansible/roles/infra_destroy_previous_configuration/tasks/main.yml
new file mode 100644 (file)
index 0000000..5595cd5
--- /dev/null
@@ -0,0 +1,47 @@
+# Copyright (c) 2017-2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Include
+  include_vars:
+    file: "{{ rs_file }}"
+    name: infra_deploy_vars
+
+- name: List virt-nets
+  virt_net: command=list_nets
+  register: virt_nets
+
+- name: List VMs
+  virt: command=list_vms
+  register: virt_vms
+
+- name: Destroy old VMs
+  virt:
+    command: destroy
+    name: "{{ item.hostname }}"
+  when: item.hostname in virt_vms.list_vms
+  with_items: "{{ infra_deploy_vars.nodes }}"
+
+- name: Undefine old VMs
+  virt:
+    command: undefine
+    name: "{{ item.hostname }}"
+  when: item.hostname in virt_vms.list_vms
+  with_items: "{{ infra_deploy_vars.nodes }}"
+
+- name: Delete old networks
+  include_tasks: delete_network.yml
+  extra_vars: "{{ virt_nets }}"
+  loop_control:
+    loop_var: network_item
+  with_items: "{{ infra_deploy_vars.networks }}"
index 01ad4ba..e82ad83 100644 (file)
     remote_src: yes
     force: yes
     mode: 0755
-
-- name: make dpdk_nic_bind.py for backwards compatibility
-  copy:
-    src: "{{ dpdk_devbind[dpdk_version] }}"
-    dest: "{{ INSTALL_BIN_PATH }}/dpdk_nic_bind.py"
-    remote_src: yes
-    force: yes
-    mode: 0755
index 7ba1fc8..9113c88 100644 (file)
@@ -31,9 +31,6 @@
     dest: "{{ INSTALL_BIN_PATH }}/trex_client"
     state: link
 
-# Don't use trex/scripts/dpdk_nic_bind.py use DPDK usertools/dpdk-devbind.py
-#- command: cp "{{ INSTALL_BIN_PATH }}/trex/scripts/dpdk_nic_bind.py" "{{ INSTALL_BIN_PATH }}"
-
 - name: add scripts to PYTHONPATH
   lineinfile:
     dest: /etc/environment
index b62bf63..47526cd 100644 (file)
@@ -80,6 +80,9 @@ H A
    opnfv_yardstick_tc052.rst
    opnfv_yardstick_tc053.rst
    opnfv_yardstick_tc054.rst
+   opnfv_yardstick_tc056.rst
+   opnfv_yardstick_tc057.rst
+   opnfv_yardstick_tc058.rst
 
 IPv6
 ----
index 01aa99a..e6e06df 100644 (file)
@@ -98,7 +98,7 @@ Yardstick Test Case Description TC056
 |              |                                                              |
 +--------------+--------------------------------------------------------------+
 |configuration | This test case needs two configuration files:                |
-|              | 1) test case file:opnfv_yardstick_tc056.yaml                 |
+|              | 1) test case file: opnfv_yardstick_tc056.yaml                |
 |              | -Attackers: see above "attackers" description                |
 |              | -waiting_time: which is the time (seconds) from the process  |
 |              | being killed to stoping monitors the monitors                |
index fb162d3..df682ac 100644 (file)
@@ -12,7 +12,7 @@ nodes:
     ram: 8192
     vcpus: 4
 
-  - name Controller_Compute VM
+  - name: Controller_Compute VM
     openstack_node: controller_compute
     hostname: controller_compute
     interfaces:
index 88c0e65..aacafdf 100644 (file)
@@ -49,7 +49,6 @@ pyroute2==0.4.21        # dual license GPLv2+ and Apache v2; OSI Approved  GNU G
 pyrsistent==0.14.1      # LICENSE.mit; OSI Approved  MIT License
 python-cinderclient==3.1.0      # OSI Approved  Apache Software License
 python-glanceclient==2.8.0      # OSI Approved  Apache Software License
-python-heatclient==1.11.1       # OSI Approved  Apache Software License
 python-keystoneclient==3.13.0   # OSI Approved  Apache Software License
 python-neutronclient==6.5.0     # OSI Approved  Apache Software License
 python-novaclient==9.1.1        # OSI Approved  Apache Software License
index 10902a7..f59146c 100644 (file)
@@ -21,10 +21,10 @@ nsd:nsd-catalog:
         constituent-vnfd:
         -   member-vnf-index: '1'
             vnfd-id-ref: tg__0
-            VNF model: ../../vnf_descriptors/tg_prox_tpl-1.yaml
+            VNF model: ../../vnf_descriptors/tg_prox_tpl.yaml
         -   member-vnf-index: '2'
             vnfd-id-ref: vnf__0
-            VNF model: ../../vnf_descriptors/prox_vnf-1.yaml
+            VNF model: ../../vnf_descriptors/prox_vnf.yaml
         vld:
         -   id: uplink_0
             name: tg__0 to vnf__0 link 1
index 11eed52..63d0acc 100644 (file)
@@ -21,10 +21,10 @@ nsd:nsd-catalog:
         constituent-vnfd:
         -   member-vnf-index: '1'
             vnfd-id-ref: tg__0
-            VNF model: ../../vnf_descriptors/tg_prox_tpl-2.yaml
+            VNF model: ../../vnf_descriptors/tg_prox_tpl.yaml
         -   member-vnf-index: '2'
             vnfd-id-ref: vnf__0
-            VNF model: ../../vnf_descriptors/prox_vnf-2.yaml
+            VNF model: ../../vnf_descriptors/prox_vnf.yaml
         vld:
         -   id: uplink_0
             name: tg__0 to vnf__0 link 1
index eda239e..b4b0036 100644 (file)
@@ -21,10 +21,10 @@ nsd:nsd-catalog:
         constituent-vnfd:
         -   member-vnf-index: '1'
             vnfd-id-ref: tg__0
-            VNF model: ../../vnf_descriptors/tg_prox_tpl-4.yaml
+            VNF model: ../../vnf_descriptors/tg_prox_tpl.yaml
         -   member-vnf-index: '2'
             vnfd-id-ref: vnf__0
-            VNF model: ../../vnf_descriptors/prox_vnf-4.yaml
+            VNF model: ../../vnf_descriptors/prox_vnf.yaml
         vld:
         -   id: uplink_0
             name: tg__0 to vnf__0 link 1
diff --git a/samples/vnf_samples/vnf_descriptors/prox_vnf-2.yaml b/samples/vnf_samples/vnf_descriptors/prox_vnf-2.yaml
deleted file mode 100644 (file)
index 13c4e9d..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-vnfd:vnfd-catalog:
-    vnfd:
-    -   id: ProxApproxVnf
-        name: ProxVnf
-        short-name: ProxVnf
-        description: PROX approximation using DPDK
-        mgmt-interface:
-            vdu-id: prox-baremetal
-            {% if user is defined %}
-            user: '{{user}}'  # Value filled by vnfdgen
-            {% endif %}
-            {% if password is defined %}
-            password: '{{password}}'  # Value filled by vnfdgen
-            {% endif %}
-            {% if ip is defined %}
-            ip: '{{ip}}'  # Value filled by vnfdgen
-            {% endif %}
-            {% if key_filename is defined %}
-            key_filename: '{{key_filename}}'  # Value filled by vnfdgen
-            {% endif %}
-        vdu:
-        -   id: proxvnf-baremetal
-            name: proxvnf-baremetal
-            description: PROX approximation using DPDK
-            vm-flavor:
-                vcpu-count: '4'
-                memory-mb: '4096'
-            routing_table: {{ routing_table }}
-            nd_route_tbl: {{ nd_route_tbl }}
-        benchmark:
-            kpi:
-                - packets_in
-                - packets_fwd
-                - packets_dropped
diff --git a/samples/vnf_samples/vnf_descriptors/prox_vnf-4.yaml b/samples/vnf_samples/vnf_descriptors/prox_vnf-4.yaml
deleted file mode 100644 (file)
index 13c4e9d..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-vnfd:vnfd-catalog:
-    vnfd:
-    -   id: ProxApproxVnf
-        name: ProxVnf
-        short-name: ProxVnf
-        description: PROX approximation using DPDK
-        mgmt-interface:
-            vdu-id: prox-baremetal
-            {% if user is defined %}
-            user: '{{user}}'  # Value filled by vnfdgen
-            {% endif %}
-            {% if password is defined %}
-            password: '{{password}}'  # Value filled by vnfdgen
-            {% endif %}
-            {% if ip is defined %}
-            ip: '{{ip}}'  # Value filled by vnfdgen
-            {% endif %}
-            {% if key_filename is defined %}
-            key_filename: '{{key_filename}}'  # Value filled by vnfdgen
-            {% endif %}
-        vdu:
-        -   id: proxvnf-baremetal
-            name: proxvnf-baremetal
-            description: PROX approximation using DPDK
-            vm-flavor:
-                vcpu-count: '4'
-                memory-mb: '4096'
-            routing_table: {{ routing_table }}
-            nd_route_tbl: {{ nd_route_tbl }}
-        benchmark:
-            kpi:
-                - packets_in
-                - packets_fwd
-                - packets_dropped
diff --git a/samples/vnf_samples/vnf_descriptors/tg_prox_tpl-1.yaml b/samples/vnf_samples/vnf_descriptors/tg_prox_tpl-1.yaml
deleted file mode 100644 (file)
index 7301439..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-vnfd:vnfd-catalog:
-    vnfd:
-    -   id: ProxTrafficGen  # nsb class mapping
-        name: proxverifier
-        short-name: proxverifier
-        description: prox stateless traffic verifier
-        mgmt-interface:
-            vdu-id: proxgen-baremetal
-            {% if user is defined %}
-            user: '{{user}}'  # Value filled by vnfdgen
-            {% endif %}
-            {% if password is defined %}
-            password: '{{password}}'  # Value filled by vnfdgen
-            {% endif %}
-            {% if ip is defined %}
-            ip: '{{ip}}'  # Value filled by vnfdgen
-            {% endif %}
-            {% if key_filename is defined %}
-            key_filename: '{{key_filename}}'  # Value filled by vnfdgen
-            {% endif %}
-        vdu:
-        -   id: proxgen-baremetal
-            name: proxgen-baremetal
-            description: prox stateless traffic verifier
-        benchmark:
-            kpi:
-                - rx_throughput_fps
-                - tx_throughput_fps
-                - tx_throughput_mbps
-                - rx_throughput_mbps
-                - in_packets
-                - out_packets
diff --git a/samples/vnf_samples/vnf_descriptors/tg_prox_tpl-4.yaml b/samples/vnf_samples/vnf_descriptors/tg_prox_tpl-4.yaml
deleted file mode 100644 (file)
index 20bd12c..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright (c) 2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-vnfd:vnfd-catalog:
-    vnfd:
-    -   id: ProxTrafficGen  # nsb class mapping
-        name: proxverifier
-        short-name: proxverifier
-        description: prox stateless traffic verifier
-        mgmt-interface:
-            vdu-id: proxgen-baremetal
-            {% if user is defined %}
-            user: '{{user}}'  # Value filled by vnfdgen
-            {% endif %}
-            {% if password is defined %}
-            password: '{{password}}'  # Value filled by vnfdgen
-            {% endif %}
-            {% if ip is defined %}
-            ip: '{{ip}}'  # Value filled by vnfdgen
-            {% endif %}
-            {% if key_filename is defined %}
-            key_filename: '{{key_filename}}'  # Value filled by vnfdgen
-            {% endif %}
-        vdu:
-        -   id: proxgen-baremetal
-            name: proxgen-baremetal
-            description: prox stateless traffic verifier
-
-        benchmark:
-            kpi:
-                - rx_throughput_fps
-                - tx_throughput_fps
-                - tx_throughput_mbps
-                - rx_throughput_mbps
-                - in_packets
-                - out_packets
index 7f6571d..881ef92 100755 (executable)
--- a/setup.py
+++ b/setup.py
@@ -53,6 +53,7 @@ setup(
             'yardstick=yardstick.main:main',
             'yardstick-plot=yardstick.plot.plotter:main [plot]'
         ],
+        'yardstick.scenario': []
     },
     scripts=[
         'tools/yardstick-img-modify',
index f933df2..ee9815c 100644 (file)
@@ -12,6 +12,9 @@ testrepository==0.0.20      # OSI Approved  BSD License; OSI Approved  Apache So
 testtools==2.3.0            # OSI Approved  MIT License
 unittest2==1.1.0            # OSI Approved  BSD License
 
+# NOTE(ralonsoh): to be removed, only for coverage support
+python-heatclient==1.8.1        # OSI Approved  Apache Software License
+
 # Yardstick F release <-> OpenStack Pike release
 openstack_requirements==1.1.0   # OSI Approved  Apache Software License
 -e git+https://github.com/openstack/requirements.git@stable/pike#egg=os_requirements
index 78e3429..a95e6bc 100644 (file)
@@ -51,3 +51,56 @@ class ScenarioTestCase(unittest.TestCase):
             pass
 
         self.assertEqual(str(None), DummyScenario.get_description())
+
+    def test_get_types(self):
+        scenario_names = set(
+            scenario.__scenario_type__ for scenario in
+            base.Scenario.get_types() if hasattr(scenario,
+                                                 '__scenario_type__'))
+        existing_scenario_class_names = {
+            'Iperf3', 'CACHEstat', 'SpecCPU2006', 'Dummy', 'NSPerf', 'Parser'}
+        self.assertTrue(existing_scenario_class_names.issubset(scenario_names))
+
+    def test_get_cls_existing_scenario(self):
+        scenario_name = 'NSPerf'
+        scenario = base.Scenario.get_cls(scenario_name)
+        self.assertEqual(scenario_name, scenario.__scenario_type__)
+
+    def test_get_cls_non_existing_scenario(self):
+        wrong_scenario_name = 'Non-existing-scenario'
+        with self.assertRaises(RuntimeError) as exc:
+            base.Scenario.get_cls(wrong_scenario_name)
+        self.assertEqual('No such scenario type %s' % wrong_scenario_name,
+                         str(exc.exception))
+
+    def test_get_existing_scenario(self):
+        scenario_name = 'NSPerf'
+        scenario_module = ('yardstick.benchmark.scenarios.networking.'
+                           'vnf_generic.NetworkServiceTestCase')
+        self.assertEqual(scenario_module, base.Scenario.get(scenario_name))
+
+    def test_get_non_existing_scenario(self):
+        wrong_scenario_name = 'Non-existing-scenario'
+        with self.assertRaises(RuntimeError) as exc:
+            base.Scenario.get(wrong_scenario_name)
+        self.assertEqual('No such scenario type %s' % wrong_scenario_name,
+                         str(exc.exception))
+
+
+class IterScenarioClassesTestCase(unittest.TestCase):
+
+    def test_no_scenario_type_defined(self):
+        some_existing_scenario_class_names = [
+            'Iperf3', 'CACHEstat', 'SpecCPU2006', 'Dummy', 'NSPerf', 'Parser']
+        scenario_types = [scenario.__scenario_type__ for scenario
+                          in base._iter_scenario_classes()]
+        for class_name in some_existing_scenario_class_names:
+            self.assertIn(class_name, scenario_types)
+
+    def test_scenario_type_defined(self):
+        some_existing_scenario_class_names = [
+            'Iperf3', 'CACHEstat', 'SpecCPU2006', 'Dummy', 'NSPerf', 'Parser']
+        for class_name in some_existing_scenario_class_names:
+            scenario_class = next(base._iter_scenario_classes(
+                scenario_type=class_name))
+            self.assertEqual(class_name, scenario_class.__scenario_type__)
index 9bb5ed3..e30aee8 100644 (file)
@@ -117,7 +117,7 @@ Other crypto devices
         self.assertEqual(conn, dpdk_bind_helper.ssh_helper)
         self.assertEqual(self.CLEAN_STATUS, dpdk_bind_helper.dpdk_status)
         self.assertIsNone(dpdk_bind_helper.status_nic_row_re)
-        self.assertIsNone(dpdk_bind_helper._dpdk_nic_bind_attr)
+        self.assertIsNone(dpdk_bind_helper._dpdk_devbind)
         self.assertIsNone(dpdk_bind_helper._status_cmd_attr)
 
     def test__dpdk_execute(self):
index 2a2647a..f9a1014 100644 (file)
@@ -343,6 +343,6 @@ class TestAclApproxVnf(unittest.TestCase):
         acl_approx_vnf.used_drivers = {"01:01.0": "i40e",
                                        "01:01.1": "i40e"}
         acl_approx_vnf.vnf_execute = mock.MagicMock()
-        acl_approx_vnf.dpdk_nic_bind = "dpdk_nic_bind.py"
+        acl_approx_vnf.dpdk_devbind = "dpdk-devbind.py"
         acl_approx_vnf._resource_collect_stop = mock.Mock()
         self.assertEqual(None, acl_approx_vnf.terminate())
index f2ce18f..62b3c74 100644 (file)
@@ -390,22 +390,6 @@ class TestCgnaptApproxVnf(unittest.TestCase):
         self.assertIsNone(cgnapt_approx_vnf.instantiate(self.scenario_cfg,
                                                         self.context_cfg))
 
-    @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
-    @mock.patch(SSH_HELPER)
-    def test_terminate(self, ssh, *args):
-        mock_ssh(ssh)
-
-        vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
-        cgnapt_approx_vnf = CgnaptApproxVnf(name, vnfd)
-        cgnapt_approx_vnf._vnf_process = mock.MagicMock()
-        cgnapt_approx_vnf._vnf_process.terminate = mock.Mock()
-        cgnapt_approx_vnf.used_drivers = {"01:01.0": "i40e",
-                                          "01:01.1": "i40e"}
-        cgnapt_approx_vnf.vnf_execute = mock.MagicMock()
-        cgnapt_approx_vnf.dpdk_nic_bind = "dpdk_nic_bind.py"
-        cgnapt_approx_vnf._resource_collect_stop = mock.Mock()
-        self.assertEqual(None, cgnapt_approx_vnf.terminate())
-
     @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
     @mock.patch(SSH_HELPER)
     def test__vnf_up_post(self, ssh, *args):
index cda3852..472052b 100644 (file)
@@ -462,15 +462,3 @@ class TestUdpReplayApproxVnf(unittest.TestCase):
         self.assertIsNone(udp_replay_approx_vnf.instantiate(self.SCENARIO_CFG, self.CONTEXT_CFG))
         with self.assertRaises(RuntimeError):
             udp_replay_approx_vnf.wait_for_instantiate()
-
-    @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
-    @mock.patch(SSH_HELPER)
-    def test_terminate(self, ssh, *args):
-        mock_ssh(ssh)
-
-        udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0)
-        udp_replay_approx_vnf._vnf_process = mock.MagicMock()
-        udp_replay_approx_vnf._vnf_process.terminate = mock.Mock()
-        udp_replay_approx_vnf.used_drivers = {"01:01.0": "i40e", "01:01.1": "i40e"}
-        udp_replay_approx_vnf.dpdk_nic_bind = "dpdk_nic_bind.py"
-        self.assertEqual(None, udp_replay_approx_vnf.terminate())
index d128db0..f0a5666 100644 (file)
@@ -348,18 +348,3 @@ pipeline>
                                                     'rules': ""}}
         self.scenario_cfg.update({"nodes": {"vnf__1": ""}})
         self.assertIsNone(vfw_approx_vnf.instantiate(self.scenario_cfg, self.context_cfg))
-
-    @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
-    @mock.patch(SSH_HELPER)
-    def test_terminate(self, ssh, *args):
-        mock_ssh(ssh)
-
-        vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
-        vfw_approx_vnf = FWApproxVnf(name, vnfd)
-        vfw_approx_vnf._vnf_process = mock.MagicMock()
-        vfw_approx_vnf.used_drivers = {"01:01.0": "i40e",
-                                       "01:01.1": "i40e"}
-        vfw_approx_vnf.vnf_execute = mock.Mock()
-        vfw_approx_vnf.dpdk_nic_bind = "dpdk_nic_bind.py"
-        vfw_approx_vnf._resource_collect_stop = mock.Mock()
-        self.assertIsNone(vfw_approx_vnf.terminate())
index 210e6ed..6a9f2e7 100755 (executable)
@@ -16,6 +16,6 @@
 ANSIBLE_SCRIPTS="${0%/*}/../ansible"
 
 cd ${ANSIBLE_SCRIPTS} &&\
-ansible-playbook \
+sudo -EH ansible-playbook \
          -e rs_file='../etc/infra/infra_deploy.yaml' \
          -i inventory.ini infra_deploy.yml
index 3017083..14738da 100644 (file)
@@ -310,7 +310,7 @@ class StandaloneContextHelper(object):
         return driver
 
     @classmethod
-    def get_nic_details(cls, connection, networks, dpdk_nic_bind):
+    def get_nic_details(cls, connection, networks, dpdk_devbind):
         for key, ports in networks.items():
             if key == "mgmt":
                 continue
@@ -320,11 +320,11 @@ class StandaloneContextHelper(object):
             driver = cls.get_kernel_module(connection, phy_ports, phy_driver)
 
             # Make sure that ports are bound to kernel drivers e.g. i40e/ixgbe
-            bind_cmd = "{dpdk_nic_bind} --force -b {driver} {port}"
+            bind_cmd = "{dpdk_devbind} --force -b {driver} {port}"
             lshw_cmd = "lshw -c network -businfo | grep '{port}'"
             link_show_cmd = "ip -s link show {interface}"
 
-            cmd = bind_cmd.format(dpdk_nic_bind=dpdk_nic_bind,
+            cmd = bind_cmd.format(dpdk_devbind=dpdk_devbind,
                                   driver=driver, port=ports['phy_port'])
             connection.execute(cmd)
 
index 3755b84..c931d85 100644 (file)
@@ -57,7 +57,7 @@ class OvsDpdkContext(Context):
         self.file_path = None
         self.sriov = []
         self.first_run = True
-        self.dpdk_nic_bind = ""
+        self.dpdk_devbind = ''
         self.vm_names = []
         self.name = None
         self.nfvi_host = []
@@ -116,12 +116,12 @@ class OvsDpdkContext(Context):
         ]
         for cmd in cmd_list:
             self.connection.execute(cmd)
-        bind_cmd = "{dpdk_nic_bind} --force -b {driver} {port}"
+        bind_cmd = "{dpdk_devbind} --force -b {driver} {port}"
         phy_driver = "vfio-pci"
-        for _, port in self.networks.items():
+        for port in self.networks.values():
             vpci = port.get("phy_port")
-            self.connection.execute(bind_cmd.format(dpdk_nic_bind=self.dpdk_nic_bind,
-                                                    driver=phy_driver, port=vpci))
+            self.connection.execute(bind_cmd.format(
+                dpdk_devbind=self.dpdk_devbind, driver=phy_driver, port=vpci))
 
     def start_ovs_serverswitch(self):
         vpath = self.ovs_properties.get("vpath")
@@ -241,7 +241,7 @@ class OvsDpdkContext(Context):
             return
 
         self.connection = ssh.SSH.from_node(self.host_mgmt)
-        self.dpdk_nic_bind = provision_tool(
+        self.dpdk_devbind = provision_tool(
             self.connection,
             os.path.join(get_nsb_option("bin_path"), "dpdk-devbind.py"))
 
@@ -249,9 +249,8 @@ class OvsDpdkContext(Context):
         self.check_ovs_dpdk_env()
         #    Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.
         StandaloneContextHelper.install_req_libs(self.connection)
-        self.networks = StandaloneContextHelper.get_nic_details(self.connection,
-                                                                self.networks,
-                                                                self.dpdk_nic_bind)
+        self.networks = StandaloneContextHelper.get_nic_details(
+            self.connection, self.networks, self.dpdk_devbind)
 
         self.setup_ovs()
         self.start_ovs_serverswitch()
@@ -271,12 +270,12 @@ class OvsDpdkContext(Context):
         self.cleanup_ovs_dpdk_env()
 
         # Bind nics back to kernel
-        bind_cmd = "{dpdk_nic_bind} --force -b {driver} {port}"
+        bind_cmd = "{dpdk_devbind} --force -b {driver} {port}"
         for port in self.networks.values():
             vpci = port.get("phy_port")
             phy_driver = port.get("driver")
-            self.connection.execute(bind_cmd.format(dpdk_nic_bind=self.dpdk_nic_bind,
-                                                    driver=phy_driver, port=vpci))
+            self.connection.execute(bind_cmd.format(
+                dpdk_devbind=self.dpdk_devbind, driver=phy_driver, port=vpci))
 
         # Todo: NFVi undeploy (sriov, vswitch, ovs etc) based on the config.
         for vm in self.vm_names:
index 9d8423b..9cca3e1 100644 (file)
@@ -41,7 +41,7 @@ class SriovContext(Context):
         self.file_path = None
         self.sriov = []
         self.first_run = True
-        self.dpdk_nic_bind = ""
+        self.dpdk_devbind = ''
         self.vm_names = []
         self.name = None
         self.nfvi_host = []
@@ -83,15 +83,14 @@ class SriovContext(Context):
             return
 
         self.connection = ssh.SSH.from_node(self.host_mgmt)
-        self.dpdk_nic_bind = provision_tool(
+        self.dpdk_devbind = provision_tool(
             self.connection,
-            os.path.join(get_nsb_option("bin_path"), "dpdk_nic_bind.py"))
+            os.path.join(get_nsb_option("bin_path"), "dpdk-devbind.py"))
 
         #    Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.
         StandaloneContextHelper.install_req_libs(self.connection)
-        self.networks = StandaloneContextHelper.get_nic_details(self.connection,
-                                                                self.networks,
-                                                                self.dpdk_nic_bind)
+        self.networks = StandaloneContextHelper.get_nic_details(
+            self.connection, self.networks, self.dpdk_devbind)
         self.nodes = self.setup_sriov_context()
 
         LOG.debug("Waiting for VM to come up...")
@@ -138,7 +137,7 @@ class SriovContext(Context):
         except StopIteration:
             pass
         else:
-            raise ValueError("Duplicate nodes!!! Nodes: %s %s",
+            raise ValueError("Duplicate nodes!!! Nodes: %s %s" %
                              (node, duplicate))
 
         node["name"] = attr_name
index 7af8583..10a7288 100644 (file)
 # yardstick comment: this is a modified copy of
 # rally/rally/benchmark/scenarios/base.py
 
-""" Scenario base class
-"""
+from stevedore import extension
 
-from __future__ import absolute_import
 import yardstick.common.utils as utils
 
 
+def _iter_scenario_classes(scenario_type=None):
+    """Generator over all 'Scenario' subclasses
+
+    This function will iterate over all 'Scenario' subclasses defined in this
+    project and will load any class introduced by any installed plugin project,
+    defined in 'entry_points' section, under 'yardstick.scenarios' subsection.
+    """
+    extension.ExtensionManager(namespace='yardstick.scenarios',
+                               invoke_on_load=False)
+    for scenario in utils.itersubclasses(Scenario):
+        if not scenario_type:
+            yield scenario
+        elif getattr(scenario, '__scenario_type__', None) == scenario_type:
+            yield scenario
+
+
 class Scenario(object):
 
     def setup(self):
         """ default impl for scenario setup """
         pass
 
-    def run(self, args):
+    def run(self, *args):
         """ catcher for not implemented run methods in subclasses """
         raise RuntimeError("run method not implemented")
 
@@ -41,16 +55,15 @@ class Scenario(object):
     def get_types():
         """return a list of known runner type (class) names"""
         scenarios = []
-        for scenario in utils.itersubclasses(Scenario):
+        for scenario in _iter_scenario_classes():
             scenarios.append(scenario)
         return scenarios
 
     @staticmethod
     def get_cls(scenario_type):
         """return class of specified type"""
-        for scenario in utils.itersubclasses(Scenario):
-            if scenario_type == scenario.__scenario_type__:
-                return scenario
+        for scenario in _iter_scenario_classes(scenario_type):
+            return scenario
 
         raise RuntimeError("No such scenario type %s" % scenario_type)
 
@@ -58,11 +71,8 @@ class Scenario(object):
     def get(scenario_type):
         """Returns instance of a scenario runner for execution type.
         """
-        for scenario in utils.itersubclasses(Scenario):
-            if scenario_type == scenario.__scenario_type__:
-                return scenario.__module__ + "." + scenario.__name__
-
-        raise RuntimeError("No such scenario type %s" % scenario_type)
+        scenario = Scenario.get_cls(scenario_type)
+        return scenario.__module__ + "." + scenario.__name__
 
     @classmethod
     def get_scenario_type(cls):
index 4780822..e38dd24 100644 (file)
@@ -57,3 +57,9 @@ class YardstickException(Exception):
 class FunctionNotImplemented(YardstickException):
     message = ('The function "%(function_name)s" is not implemented in '
                '"%(class_name)" class.')
+
+
+class HeatTemplateError(YardstickException):
+    """Error in Heat during the stack deployment"""
+    message = ('Error in Heat during the creation of the OpenStack stack '
+               '"%(stack_name)"')
index c076131..8c44b26 100644 (file)
@@ -34,11 +34,11 @@ class DpdkBindHelperException(Exception):
 
 
 class DpdkBindHelper(object):
-    DPDK_STATUS_CMD = "{dpdk_nic_bind} --status"
-    DPDK_BIND_CMD = "sudo {dpdk_nic_bind} {force} -b {driver} {vpci}"
+    DPDK_STATUS_CMD = "{dpdk_devbind} --status"
+    DPDK_BIND_CMD = "sudo {dpdk_devbind} {force} -b {driver} {vpci}"
 
-    NIC_ROW_RE = re.compile("([^ ]+) '([^']+)' (?:if=([^ ]+) )?drv=([^ ]+) "
-                            "unused=([^ ]*)(?: (\*Active\*))?")
+    NIC_ROW_RE = re.compile(r"([^ ]+) '([^']+)' (?:if=([^ ]+) )?drv=([^ ]+) "
+                            r"unused=([^ ]*)(?: (\*Active\*))?")
     SKIP_RE = re.compile('(====|<none>|^$)')
     NIC_ROW_FIELDS = ['vpci', 'dev_type', 'iface', 'driver', 'unused', 'active']
 
@@ -64,7 +64,7 @@ class DpdkBindHelper(object):
     def __init__(self, ssh_helper):
         self.dpdk_status = None
         self.status_nic_row_re = None
-        self._dpdk_nic_bind_attr = None
+        self._dpdk_devbind = None
         self._status_cmd_attr = None
 
         self.ssh_helper = ssh_helper
@@ -74,19 +74,19 @@ class DpdkBindHelper(object):
         res = self.ssh_helper.execute(*args, **kwargs)
         if res[0] != 0:
             raise DpdkBindHelperException('{} command failed with rc={}'.format(
-                self._dpdk_nic_bind, res[0]))
+                self.dpdk_devbind, res[0]))
         return res
 
     @property
-    def _dpdk_nic_bind(self):
-        if self._dpdk_nic_bind_attr is None:
-            self._dpdk_nic_bind_attr = self.ssh_helper.provision_tool(tool_file="dpdk-devbind.py")
-        return self._dpdk_nic_bind_attr
+    def dpdk_devbind(self):
+        if self._dpdk_devbind is None:
+            self._dpdk_devbind = self.ssh_helper.provision_tool(tool_file="dpdk-devbind.py")
+        return self._dpdk_devbind
 
     @property
     def _status_cmd(self):
         if self._status_cmd_attr is None:
-            self._status_cmd_attr = self.DPDK_STATUS_CMD.format(dpdk_nic_bind=self._dpdk_nic_bind)
+            self._status_cmd_attr = self.DPDK_STATUS_CMD.format(dpdk_devbind=self.dpdk_devbind)
         return self._status_cmd_attr
 
     def _addline(self, active_list, line):
@@ -139,7 +139,7 @@ class DpdkBindHelper(object):
         # accept single PCI or list of PCI
         if isinstance(pci_addresses, six.string_types):
             pci_addresses = [pci_addresses]
-        cmd = self.DPDK_BIND_CMD.format(dpdk_nic_bind=self._dpdk_nic_bind,
+        cmd = self.DPDK_BIND_CMD.format(dpdk_devbind=self.dpdk_devbind,
                                         driver=driver,
                                         vpci=' '.join(list(pci_addresses)),
                                         force='--force' if force else '')
index d58ae56..1a8beae 100644 (file)
 """Heat template and stack management"""
 
 from __future__ import absolute_import
-from __future__ import print_function
-from six.moves import range
-
 import collections
 import datetime
 import getpass
 import logging
-
+import pkg_resources
 import socket
+import tempfile
 import time
 
-import heatclient.client
-import pkg_resources
-
+from oslo_serialization import jsonutils
 from oslo_utils import encodeutils
+import shade
 
 import yardstick.common.openstack_utils as op_utils
+from yardstick.common import exceptions
 from yardstick.common import template_format
 
 log = logging.getLogger(__name__)
@@ -36,123 +34,82 @@ HEAT_KEY_UUID_LENGTH = 8
 
 PROVIDER_SRIOV = "sriov"
 
+_DEPLOYED_STACKS = {}
+
 
 def get_short_key_uuid(uuid):
     return str(uuid)[:HEAT_KEY_UUID_LENGTH]
 
 
-class HeatObject(object):
-    """base class for template and stack"""
-
-    def __init__(self):
-        self._heat_client = None
-        self.uuid = None
-
-    @property
-    def heat_client(self):
-        """returns a heat client instance"""
-
-        if self._heat_client is None:
-            sess = op_utils.get_session()
-            heat_endpoint = op_utils.get_endpoint(service_type='orchestration')
-            self._heat_client = heatclient.client.Client(
-                op_utils.get_heat_api_version(),
-                endpoint=heat_endpoint, session=sess)
-
-        return self._heat_client
-
-    def status(self):
-        """returns stack state as a string"""
-        heat_client = self.heat_client
-        stack = heat_client.stacks.get(self.uuid)
-        return stack.stack_status
-
-
-class HeatStack(HeatObject):
+class HeatStack(object):
     """Represents a Heat stack (deployed template) """
-    stacks = []
 
     def __init__(self, name):
-        super(HeatStack, self).__init__()
-        self.uuid = None
         self.name = name
-        self.outputs = None
-        HeatStack.stacks.append(self)
+        self.outputs = {}
+        self._cloud = shade.openstack_cloud()
+        self._stack = None
+
+    def create(self, template, heat_parameters, wait, timeout):
+        """Creates an OpenStack stack from a template"""
+        with tempfile.NamedTemporaryFile('wb', delete=False) as template_file:
+            template_file.write(jsonutils.dumps(template))
+            template_file.close()
+            self._stack = self._cloud.create_stack(
+                self.name, template_file=template_file.name, wait=wait,
+                timeout=timeout, **heat_parameters)
+        outputs = self._stack.outputs
+        self.outputs = {output['output_key']: output['output_value'] for output
+                        in outputs}
+        if self.uuid:
+            _DEPLOYED_STACKS[self.uuid] = self._stack
 
     @staticmethod
     def stacks_exist():
-        """check if any stack has been deployed"""
-        return len(HeatStack.stacks) > 0
+        """Check if any stack has been deployed"""
+        return len(_DEPLOYED_STACKS) > 0
 
-    def _delete(self):
-        """deletes a stack from the target cloud using heat"""
+    def delete(self, wait=True):
+        """Deletes a stack in the target cloud"""
         if self.uuid is None:
             return
 
-        log.info("Deleting stack '%s' START, uuid:%s", self.name, self.uuid)
-        heat = self.heat_client
-        template = heat.stacks.get(self.uuid)
-        start_time = time.time()
-        template.delete()
-
-        for status in iter(self.status, u'DELETE_COMPLETE'):
-            log.debug("Deleting stack state: %s", status)
-            if status == u'DELETE_FAILED':
-                raise RuntimeError(
-                    heat.stacks.get(self.uuid).stack_status_reason)
-
-            time.sleep(2)
-
-        end_time = time.time()
-        log.info("Deleting stack '%s' DONE in %d secs", self.name,
-                 end_time - start_time)
-        self.uuid = None
-
-    def delete(self, block=True, retries=3):
-        """deletes a stack in the target cloud using heat (with retry)
-        Sometimes delete fail with "InternalServerError" and the next attempt
-        succeeds. So it is worthwhile to test a couple of times.
-        """
-        if self.uuid is None:
-            return
-
-        if not block:
-            self._delete()
-            return
-
-        for _ in range(retries):
-            try:
-                self._delete()
-                break
-            except RuntimeError as err:
-                log.warning(err.args)
-                time.sleep(2)
-
-        # if still not deleted try once more and let it fail everything
-        if self.uuid is not None:
-            self._delete()
-
-        HeatStack.stacks.remove(self)
+        ret = self._cloud.delete_stack(self.uuid, wait=wait)
+        _DEPLOYED_STACKS.pop(self.uuid)
+        self._stack = None
+        return ret
 
     @staticmethod
     def delete_all():
-        for stack in HeatStack.stacks[:]:
+        """Delete all deployed stacks"""
+        for stack in _DEPLOYED_STACKS:
             stack.delete()
 
-    def update(self):
-        """update a stack"""
-        raise RuntimeError("not implemented")
+    @property
+    def status(self):
+        """Retrieve the current stack status"""
+        if self._stack:
+            return self._stack.status
+
+    @property
+    def uuid(self):
+        """Retrieve the current stack ID"""
+        if self._stack:
+            return self._stack.id
 
 
-class HeatTemplate(HeatObject):
+class HeatTemplate(object):
     """Describes a Heat template and a method to deploy template to a stack"""
 
-    DESCRIPTION_TEMPLATE = """\
+    DESCRIPTION_TEMPLATE = """
 Stack built by the yardstick framework for %s on host %s %s.
 All referred generated resources are prefixed with the template
-name (i.e. %s).\
+name (i.e. %s).
 """
 
+    HEAT_WAIT_LOOP_INTERVAL = 2
+    HEAT_STATUS_COMPLETE = 'COMPLETE'
+
     def _init_template(self):
         timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
         self._template = {
@@ -171,9 +128,7 @@ name (i.e. %s).\
         self.resources = self._template['resources']
 
     def __init__(self, name, template_file=None, heat_parameters=None):
-        super(HeatTemplate, self).__init__()
         self.name = name
-        self.state = "NOT_CREATED"
         self.keystone_client = None
         self.heat_parameters = {}
 
@@ -184,16 +139,13 @@ name (i.e. %s).\
 
         if template_file:
             with open(template_file) as stream:
-                print("Parsing external template:", template_file)
+                log.info('Parsing external template: %s', template_file)
                 template_str = stream.read()
             self._template = template_format.parse(template_str)
             self._parameters = heat_parameters
         else:
             self._init_template()
 
-        # holds results of requested output after deployment
-        self.outputs = {}
-
         log.debug("template object '%s' created", name)
 
     def add_flavor(self, name, vcpus=1, ram=1024, disk=1, ephemeral=0,
@@ -202,9 +154,9 @@ name (i.e. %s).\
         """add to the template a Flavor description"""
         if name is None:
             name = 'auto'
-        log.debug("adding Nova::Flavor '%s' vcpus '%d' ram '%d' disk '%d' " +
-                  "ephemeral '%d' is_public '%s' rxtx_factor '%d' " +
-                  "swap '%d' extra_specs '%s' ",
+        log.debug("adding Nova::Flavor '%s' vcpus '%d' ram '%d' disk '%d' "
+                  "ephemeral '%d' is_public '%s' rxtx_factor '%d' "
+                  "swap '%d' extra_specs '%s'",
                   name, vcpus, ram, disk, ephemeral, is_public,
                   rxtx_factor, swap, str(extra_specs))
 
@@ -600,57 +552,28 @@ name (i.e. %s).\
             'value': {'get_resource': name}
         }
 
-    HEAT_WAIT_LOOP_INTERVAL = 2
-    HEAT_CREATE_COMPLETE_STATUS = u'CREATE_COMPLETE'
-
     def create(self, block=True, timeout=3600):
-        """
-        creates a template in the target cloud using heat
-        returns a dict with the requested output values from the template
+        """Creates a stack in the target based on the stored template
 
-        :param block: Wait for Heat create to finish
-        :type block: bool
-        :param: timeout: timeout in seconds for Heat create, default 3600s
-        :type timeout: int
+        :param block: (bool) Wait for Heat create to finish
+        :param timeout: (int) Timeout in seconds for Heat create,
+               default 3600s
+        :return A dict with the requested output values from the template
         """
         log.info("Creating stack '%s' START", self.name)
 
-        # create stack early to support cleanup, e.g. ctrl-c while waiting
-        stack = HeatStack(self.name)
-
-        heat_client = self.heat_client
         start_time = time.time()
-        stack.uuid = self.uuid = heat_client.stacks.create(
-            stack_name=self.name, template=self._template,
-            parameters=self.heat_parameters)['stack']['id']
+        stack = HeatStack(self.name)
+        stack.create(self._template, self.heat_parameters, block, timeout)
 
         if not block:
-            self.outputs = stack.outputs = {}
-            end_time = time.time()
             log.info("Creating stack '%s' DONE in %d secs",
-                     self.name, end_time - start_time)
+                     self.name, time.time() - start_time)
             return stack
 
-        time_limit = start_time + timeout
-        for status in iter(self.status, self.HEAT_CREATE_COMPLETE_STATUS):
-            log.debug("Creating stack state: %s", status)
-            if status == u'CREATE_FAILED':
-                stack_status_reason = heat_client.stacks.get(self.uuid).stack_status_reason
-                heat_client.stacks.delete(self.uuid)
-                raise RuntimeError(stack_status_reason)
-            if time.time() > time_limit:
-                raise RuntimeError("Heat stack create timeout")
-
-            time.sleep(self.HEAT_WAIT_LOOP_INTERVAL)
+        if stack.status != self.HEAT_STATUS_COMPLETE:
+            raise exceptions.HeatTemplateError(stack_name=self.name)
 
-        end_time = time.time()
-        outputs = heat_client.stacks.get(self.uuid).outputs
         log.info("Creating stack '%s' DONE in %d secs",
-                 self.name, end_time - start_time)
-
-        # keep outputs as unicode
-        self.outputs = {output["output_key"]: output["output_value"] for output
-                        in outputs}
-
-        stack.outputs = self.outputs
+                 self.name, time.time() - start_time)
         return stack
index d94f30d..beda9a5 100755 (executable)
@@ -14,6 +14,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+set -e
+
 INSTALL_OVS_BIN="/usr/src"
 cd $INSTALL_OVS_BIN
 
@@ -22,20 +24,6 @@ if [[ $EUID -ne 0 ]]; then
   exit 1;
 fi
 
-prerequisite()
-{
-  echo "Install required libraries to run collectd..."
-  pkg=(git flex bison build-essential pkg-config automake autotools-dev libltdl-dev cmake qemu-kvm libvirt-bin bridge-utils numactl libnuma-dev libpcap-dev)
-  for i in "${pkg[@]}"; do
-  dpkg-query -W --showformat='${Status}\n' "${i}"|grep "install ok installed"
-  if [  "$?" -eq "1" ]; then
-    apt-get update
-    apt-get -y install "${i}";
-  fi
-  done
-  echo "Done"
-}
-
 download_zip()
 {
   url=$1
@@ -53,6 +41,7 @@ download_zip()
 
 dpdk_build()
 {
+  echo "Build DPDK libraries"
   pushd .
   if [[ $DPDK_VERSION != "" ]]; then
     export DPDK_DIR=$INSTALL_OVS_BIN/dpdk-stable-$DPDK_VERSION
@@ -62,13 +51,15 @@ dpdk_build()
     DPDK_DOWNLOAD="http://fast.dpdk.org/rel/dpdk-$DPDK_VERSION.tar.xz"
     download_zip "${DPDK_DOWNLOAD}" "DPDK"
     cd dpdk-stable-"$DPDK_VERSION"
-    make install -j T=$RTE_TARGET
+    make config T=$RTE_TARGET
+    make install -j $(nproc) T=$RTE_TARGET
   fi
   popd
 }
 
 ovs()
 {
+  echo "Build and install OVS with DPDK"
   pushd .
   if [[ $OVS_VERSION != "" ]]; then
     rm -rf openswitch-"$OVS_VERSION"
@@ -82,7 +73,7 @@ ovs()
     else
       ./configure
     fi
-    make install -j
+    make install -j $(nproc)
   fi
   popd
 }
index faf70cd..9164197 100644 (file)
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
 ##############################################################################
 # Copyright (c) 2017 Intel Corporation
 #
@@ -9,62 +7,87 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-# Unittest for yardstick.benchmark.orchestrator.heat
-from contextlib import contextmanager
-from itertools import count
-from tempfile import NamedTemporaryFile
-import time
-import uuid
+import tempfile
 
 import mock
+from oslo_serialization import jsonutils
+from oslo_utils import uuidutils
+import shade
 import unittest
 
 from yardstick.benchmark.contexts import node
+from yardstick.common import exceptions
 from yardstick.orchestrator import heat
 
 
-TARGET_MODULE = 'yardstick.orchestrator.heat'
-
-
-def mock_patch_target_module(inner_import):
-    return mock.patch('.'.join([TARGET_MODULE, inner_import]))
-
-
-@contextmanager
-def timer():
-    start = time.time()
-    data = {'start': start}
-    try:
-        yield data
-    finally:
-        data['end'] = end = time.time()
-        data['delta'] = end - start
-
-
-def index_value_iter(index, index_value, base_value=None):
-    for current_index in count():
-        if current_index == index:
-            yield index_value
-        else:
-            yield base_value
-
+class FakeStack(object):
 
-def get_error_message(error):
-    try:
-        # py2
-        return error.message
-    except AttributeError:
-        # py3
-        return next((arg for arg in error.args if isinstance(arg, str)), None)
+    def __init__(self, outputs=None, status=None, id=None):
+        self.outputs = outputs
+        self.status = status
+        self.id = id
 
 
-class HeatContextTestCase(unittest.TestCase):
+class HeatStackTestCase(unittest.TestCase):
 
-    def test_get_short_key_uuid(self):
-        u = uuid.uuid4()
-        k = heat.get_short_key_uuid(u)
-        self.assertEqual(heat.HEAT_KEY_UUID_LENGTH, len(k))
-        self.assertIn(k, str(u))
+    def setUp(self):
+        self.stack_name = 'STACK NAME'
+        with mock.patch.object(shade, 'openstack_cloud'):
+            self.heatstack = heat.HeatStack(self.stack_name)
+        self._mock_stack_create = mock.patch.object(self.heatstack._cloud,
+                                                    'create_stack')
+        self.mock_stack_create = self._mock_stack_create.start()
+        self._mock_stack_delete = mock.patch.object(self.heatstack._cloud,
+                                                    'delete_stack')
+        self.mock_stack_delete = self._mock_stack_delete.start()
+
+        self.addCleanup(self._cleanup)
+
+    def _cleanup(self):
+        self._mock_stack_create.stop()
+        self._mock_stack_delete.stop()
+        heat._DEPLOYED_STACKS = {}
+
+    def test_create(self):
+        template = {'tkey': 'tval'}
+        heat_parameters = {'pkey': 'pval'}
+        outputs = [{'output_key': 'okey', 'output_value': 'oval'}]
+        id = uuidutils.generate_uuid()
+        self.mock_stack_create.return_value = FakeStack(
+            outputs=outputs, status=mock.Mock(), id=id)
+        mock_tfile = mock.Mock()
+        with mock.patch.object(tempfile._TemporaryFileWrapper, '__enter__',
+                               return_value=mock_tfile):
+            self.heatstack.create(template, heat_parameters, True, 100)
+            mock_tfile.write.assert_called_once_with(jsonutils.dumps(template))
+            mock_tfile.close.assert_called_once()
+
+        self.mock_stack_create.assert_called_once_with(
+            self.stack_name, template_file=mock_tfile.name, wait=True,
+            timeout=100, pkey='pval')
+        self.assertEqual({'okey': 'oval'}, self.heatstack.outputs)
+        self.assertEqual(heat._DEPLOYED_STACKS[id], self.heatstack._stack)
+
+    def test_stacks_exist(self):
+        self.assertEqual(0, self.heatstack.stacks_exist())
+        heat._DEPLOYED_STACKS['id'] = 'stack'
+        self.assertEqual(1, self.heatstack.stacks_exist())
+
+    def test_delete_not_uuid(self):
+        self.assertIsNone(self.heatstack.delete())
+
+    def test_delete_existing_uuid(self):
+        id = uuidutils.generate_uuid()
+        self.heatstack._stack = FakeStack(
+            outputs=mock.Mock(), status=mock.Mock(), id=id)
+        heat._DEPLOYED_STACKS[id] = self.heatstack._stack
+        delete_return = mock.Mock()
+        self.mock_stack_delete.return_value = delete_return
+
+        ret = self.heatstack.delete(wait=True)
+        self.assertEqual(delete_return, ret)
+        self.assertFalse(heat._DEPLOYED_STACKS)
+        self.mock_stack_delete.assert_called_once_with(id, wait=True)
 
 
 class HeatTemplateTestCase(unittest.TestCase):
@@ -75,63 +98,53 @@ class HeatTemplateTestCase(unittest.TestCase):
     def test_add_tenant_network(self):
         self.template.add_network('some-network')
 
-        self.assertEqual(
-            self.template.resources['some-network']['type'],
-            'OS::Neutron::Net')
+        self.assertEqual('OS::Neutron::Net',
+                         self.template.resources['some-network']['type'])
 
     def test_add_provider_network(self):
         self.template.add_network('some-network', 'physnet2', 'sriov')
 
-        self.assertEqual(
-            self.template.resources['some-network']['type'],
-            'OS::Neutron::ProviderNet')
-        self.assertEqual(
-            self.template.resources['some-network']['properties']['physical_network'],
-            'physnet2')
+        self.assertEqual(self.template.resources['some-network']['type'],
+                         'OS::Neutron::ProviderNet')
+        self.assertEqual(self.template.resources['some-network'][
+                             'properties']['physical_network'], 'physnet2')
 
     def test_add_subnet(self):
         netattrs = {'cidr': '10.0.0.0/24',
-                    'provider': None, 'external_network': 'ext_net'}
-        self.template.add_subnet(
-            'some-subnet', "some-network", netattrs['cidr'])
+                    'provider': None,
+                    'external_network': 'ext_net'}
+        self.template.add_subnet('some-subnet', "some-network",
+                                 netattrs['cidr'])
 
-        self.assertEqual(
-            self.template.resources['some-subnet']['type'],
-            'OS::Neutron::Subnet')
-        self.assertEqual(
-            self.template.resources['some-subnet']['properties']['cidr'],
-            '10.0.0.0/24')
+        self.assertEqual(self.template.resources['some-subnet']['type'],
+                         'OS::Neutron::Subnet')
+        self.assertEqual(self.template.resources['some-subnet']['properties'][
+                             'cidr'], '10.0.0.0/24')
 
     def test_add_router(self):
         self.template.add_router('some-router', 'ext-net', 'some-subnet')
 
-        self.assertEqual(
-            self.template.resources['some-router']['type'],
-            'OS::Neutron::Router')
-        self.assertIn(
-            'some-subnet',
-            self.template.resources['some-router']['depends_on'])
+        self.assertEqual(self.template.resources['some-router']['type'],
+                         'OS::Neutron::Router')
+        self.assertIn('some-subnet',
+                      self.template.resources['some-router']['depends_on'])
 
     def test_add_router_interface(self):
-        self.template.add_router_interface(
-            'some-router-if', 'some-router', 'some-subnet')
+        self.template.add_router_interface('some-router-if', 'some-router',
+                                           'some-subnet')
 
-        self.assertEqual(
-            self.template.resources['some-router-if']['type'],
-            'OS::Neutron::RouterInterface')
-        self.assertIn(
-            'some-subnet',
-            self.template.resources['some-router-if']['depends_on'])
+        self.assertEqual(self.template.resources['some-router-if']['type'],
+                         'OS::Neutron::RouterInterface')
+        self.assertIn('some-subnet',
+                      self.template.resources['some-router-if']['depends_on'])
 
     def test_add_servergroup(self):
         self.template.add_servergroup('some-server-group', 'anti-affinity')
 
-        self.assertEqual(
-            self.template.resources['some-server-group']['type'],
-            'OS::Nova::ServerGroup')
-        self.assertEqual(
-            self.template.resources['some-server-group']['properties']['policies'],
-            ['anti-affinity'])
+        self.assertEqual(self.template.resources['some-server-group']['type'],
+                         'OS::Nova::ServerGroup')
+        self.assertEqual(self.template.resources['some-server-group'][
+                             'properties']['policies'], ['anti-affinity'])
 
     def test__add_resources_to_template_raw(self):
         test_context = node.NodeContext()
@@ -142,16 +155,13 @@ class HeatTemplateTestCase(unittest.TestCase):
         test_context.keypair_name = "foo-key"
         test_context.secgroup_name = "foo-secgroup"
         test_context.key_uuid = "2f2e4997-0a8e-4eb7-9fa4-f3f8fbbc393b"
-        heat_object = heat.HeatObject()
 
-        heat_stack = heat.HeatStack("tmpStack")
-        self.assertTrue(heat_stack.stacks_exist())
-
-        test_context.tmpfile = NamedTemporaryFile(delete=True, mode='w+t')
+        test_context.tmpfile = tempfile.NamedTemporaryFile(
+            delete=True, mode='w+t')
         test_context.tmpfile.write("heat_template_version: 2015-04-30")
         test_context.tmpfile.flush()
         test_context.tmpfile.seek(0)
-        heat_template = heat.HeatTemplate(heat_object)
+        heat_template = heat.HeatTemplate('template name')
         heat_template.resources = {}
 
         heat_template.add_network("network1")
@@ -163,324 +173,86 @@ class HeatTemplateTestCase(unittest.TestCase):
         heat_template.add_router("router1", "gw1", "subnet1")
         heat_template.add_router_interface("router_if1", "router1", "subnet1")
         heat_template.add_port("port1", "network1", "subnet1", "normal")
-        heat_template.add_port(
-            "port2",
-            "network2",
-            "subnet2",
-            "normal",
-            sec_group_id="sec_group1",
-            provider="not-sriov")
-        heat_template.add_port(
-            "port3",
-            "network2",
-            "subnet2",
-            "normal",
-            sec_group_id="sec_group1",
-            provider="sriov")
-        heat_template.add_floating_ip(
-            "floating_ip1", "network1", "port1", "router_if1")
-        heat_template.add_floating_ip(
-            "floating_ip2", "network2", "port2", "router_if2", "foo-secgroup")
-        heat_template.add_floating_ip_association(
-            "floating_ip1_association", "floating_ip1", "port1")
+        heat_template.add_port("port2", "network2", "subnet2", "normal",
+                               sec_group_id="sec_group1", provider="not-sriov")
+        heat_template.add_port("port3", "network2", "subnet2", "normal",
+                               sec_group_id="sec_group1", provider="sriov")
+        heat_template.add_floating_ip("floating_ip1", "network1", "port1",
+                                      "router_if1")
+        heat_template.add_floating_ip("floating_ip2", "network2", "port2",
+                                      "router_if2", "foo-secgroup")
+        heat_template.add_floating_ip_association("floating_ip1_association",
+                                                  "floating_ip1", "port1")
         heat_template.add_servergroup("server_grp2", "affinity")
         heat_template.add_servergroup("server_grp3", "anti-affinity")
         heat_template.add_security_group("security_group")
+        heat_template.add_server(name="server1", image="image1",
+                                 flavor="flavor1", flavors=[])
+        heat_template.add_server_group(name="servergroup",
+                                       policies=["policy1", "policy2"])
+        heat_template.add_server_group(name="servergroup",
+                                       policies="policy1")
         heat_template.add_server(
-            name="server1", image="image1", flavor="flavor1", flavors=[])
-        heat_template.add_server_group(
-            name="servergroup", policies=["policy1", "policy2"])
-        heat_template.add_server_group(name="servergroup", policies="policy1")
-        heat_template.add_server(
-            name="server2",
-            image="image1",
-            flavor="flavor1",
-            flavors=[],
-            ports=[
-                "port1",
-                "port2"],
-            networks=[
-                "network1",
-                "network2"],
-            scheduler_hints="hints1",
-            user="user1",
-            key_name="foo-key",
-            user_data="user",
-            metadata={
-                "cat": 1,
-                "doc": 2},
-            additional_properties={
-                "prop1": 1,
-                "prop2": 2})
+            name="server2", image="image1", flavor="flavor1", flavors=[],
+            ports=["port1", "port2"], networks=["network1", "network2"],
+            scheduler_hints="hints1", user="user1", key_name="foo-key",
+            user_data="user", metadata={"cat": 1, "doc": 2},
+            additional_properties={"prop1": 1, "prop2": 2})
         heat_template.add_server(
-            name="server2",
-            image="image1",
-            flavor="flavor1",
-            flavors=[
-                "flavor1",
-                "flavor2"],
-            ports=[
-                "port1",
-                "port2"],
-            networks=[
-                "network1",
-                "network2"],
-            scheduler_hints="hints1",
-            user="user1",
-            key_name="foo-key",
-            user_data="user",
-            metadata={
-                "cat": 1,
-                "doc": 2},
-            additional_properties={
-                "prop1": 1,
-                "prop2": 2})
+            name="server2", image="image1", flavor="flavor1",
+            flavors=["flavor1", "flavor2"], ports=["port1", "port2"],
+            networks=["network1", "network2"], scheduler_hints="hints1",
+            user="user1", key_name="foo-key", user_data="user",
+            metadata={"cat": 1, "doc": 2},
+            additional_properties={"prop1": 1, "prop2": 2})
         heat_template.add_server(
-            name="server2",
-            image="image1",
-            flavor="flavor1",
-            flavors=[
-                "flavor3",
-                "flavor4"],
-            ports=[
-                "port1",
-                "port2"],
-            networks=[
-                "network1",
-                "network2"],
-            scheduler_hints="hints1",
-            user="user1",
-            key_name="foo-key",
-            user_data="user",
-            metadata={
-                "cat": 1,
-                "doc": 2},
-            additional_properties={
-                "prop1": 1,
-                "prop2": 2})
-        heat_template.add_flavor(
-            name="flavor1",
-            vcpus=1,
-            ram=2048,
-            disk=1,
-            extra_specs={
-                "cat": 1,
-                "dog": 2})
+            name="server2", image="image1", flavor="flavor1",
+            flavors=["flavor3", "flavor4"], ports=["port1", "port2"],
+            networks=["network1", "network2"], scheduler_hints="hints1",
+            user="user1", key_name="foo-key", user_data="user",
+            metadata={"cat": 1, "doc": 2},
+            additional_properties={"prop1": 1, "prop2": 2})
+        heat_template.add_flavor(name="flavor1", vcpus=1, ram=2048, disk=1,
+                                 extra_specs={"cat": 1, "dog": 2})
         heat_template.add_flavor(name=None, vcpus=1, ram=2048)
         heat_template.add_server(
-            name="server1",
-            image="image1",
-            flavor="flavor1",
-            flavors=[],
-            ports=[
-                "port1",
-                "port2"],
-            networks=[
-                "network1",
-                "network2"],
-            scheduler_hints="hints1",
-            user="user1",
-            key_name="foo-key",
-            user_data="user",
-            metadata={
-                "cat": 1,
-                "doc": 2},
-            additional_properties={
-                "prop1": 1,
-                "prop2": 2})
+            name="server1", image="image1", flavor="flavor1", flavors=[],
+            ports=["port1", "port2"], networks=["network1", "network2"],
+            scheduler_hints="hints1", user="user1", key_name="foo-key",
+            user_data="user", metadata={"cat": 1, "doc": 2},
+            additional_properties={"prop1": 1, "prop2": 2})
         heat_template.add_network("network1")
 
         heat_template.add_flavor("test")
-        self.assertEqual(
-            heat_template.resources['test']['type'], 'OS::Nova::Flavor')
-
-    @mock_patch_target_module('op_utils')
-    @mock_patch_target_module('heatclient')
-    def test_create_negative(self, mock_heat_client_class, mock_op_utils):
-        self.template.HEAT_WAIT_LOOP_INTERVAL = 0
-        mock_heat_client = mock_heat_client_class()  # get the constructed mock
-
-        # populate attributes of the constructed mock
-        mock_heat_client.stacks.get().stack_status_reason = 'the reason'
-
-        expected_status_calls = 0
-        expected_constructor_calls = 1  # above, to get the instance
-        expected_create_calls = 0
-        expected_op_utils_usage = 0
-
-        with mock.patch.object(self.template, 'status', return_value=None) as mock_status:
-            # block with timeout hit
-            timeout = 0
-            with self.assertRaises(RuntimeError) as raised, timer():
-                self.template.create(block=True, timeout=timeout)
-
-            # ensure op_utils was used
-            expected_op_utils_usage += 1
-            self.assertEqual(
-                mock_op_utils.get_session.call_count, expected_op_utils_usage)
-            self.assertEqual(
-                mock_op_utils.get_endpoint.call_count, expected_op_utils_usage)
-            self.assertEqual(
-                mock_op_utils.get_heat_api_version.call_count,
-                expected_op_utils_usage)
-
-            # ensure the constructor and instance were used
-            self.assertEqual(mock_heat_client_class.call_count,
-                             expected_constructor_calls)
-            self.assertEqual(
-                mock_heat_client.stacks.create.call_count,
-                expected_create_calls)
-
-            # ensure that the status was used
-            self.assertGreater(mock_status.call_count, expected_status_calls)
-            expected_status_calls = mock_status.call_count  # synchronize the value
-
-            # ensure the expected exception was raised
-            error_message = get_error_message(raised.exception)
-            self.assertIn('timeout', error_message)
-            self.assertNotIn('the reason', error_message)
-
-            # block with create failed
-            timeout = 10
-            mock_status.side_effect = iter([None, None, u'CREATE_FAILED'])
-            with self.assertRaises(RuntimeError) as raised, timer():
-                self.template.create(block=True, timeout=timeout)
-
-            # ensure the existing heat_client was used and op_utils was used
-            # again
-            self.assertEqual(
-                mock_op_utils.get_session.call_count, expected_op_utils_usage)
-            self.assertEqual(
-                mock_op_utils.get_endpoint.call_count, expected_op_utils_usage)
-            self.assertEqual(
-                mock_op_utils.get_heat_api_version.call_count,
-                expected_op_utils_usage)
-
-            # ensure the constructor was not used but the instance was used
-            self.assertEqual(mock_heat_client_class.call_count,
-                             expected_constructor_calls)
-            self.assertEqual(
-                mock_heat_client.stacks.create.call_count,
-                expected_create_calls)
-
-            # ensure that the status was used three times
-            expected_status_calls += 3
-            self.assertEqual(mock_status.call_count, expected_status_calls)
-
-    # NOTE(elfoley): This needs to be split into multiple tests.
-    # The lines where the template is reset should serve as a guide for where
-    # to split.
-    @mock_patch_target_module('op_utils')
-    @mock_patch_target_module('heatclient')
-    def test_create(self, mock_heat_client_class, mock_op_utils):
-        self.template.HEAT_WAIT_LOOP_INTERVAL = 0.2
-        mock_heat_client = mock_heat_client_class()
-
-        # populate attributes of the constructed mock
-        mock_heat_client.stacks.get().outputs = [
-            {'output_key': 'key1', 'output_value': 'value1'},
-            {'output_key': 'key2', 'output_value': 'value2'},
-            {'output_key': 'key3', 'output_value': 'value3'},
-        ]
-        expected_outputs = {  # pylint: disable=unused-variable
-            'key1': 'value1',
-            'key2': 'value2',
-            'key3': 'value3',
-        }
-
-        expected_status_calls = 0
-        expected_constructor_calls = 1  # above, to get the instance
-        expected_create_calls = 0
-        expected_op_utils_usage = 0
-
-        with mock.patch.object(self.template, 'status') as mock_status:
-            self.template.name = 'no block test'
-            mock_status.return_value = None
-
-            # no block
-            self.assertIsInstance(self.template.create(
-                block=False, timeout=2), heat.HeatStack)
-
-            # ensure op_utils was used
-            expected_op_utils_usage += 1
-            self.assertEqual(
-                mock_op_utils.get_session.call_count, expected_op_utils_usage)
-            self.assertEqual(
-                mock_op_utils.get_endpoint.call_count, expected_op_utils_usage)
-            self.assertEqual(
-                mock_op_utils.get_heat_api_version.call_count,
-                expected_op_utils_usage)
-
-            # ensure the constructor and instance were used
-            self.assertEqual(mock_heat_client_class.call_count,
-                             expected_constructor_calls)
-            self.assertEqual(
-                mock_heat_client.stacks.create.call_count,
-                expected_create_calls)
-
-            # ensure that the status was not used
-            self.assertEqual(mock_status.call_count, expected_status_calls)
-
-            # ensure no outputs because this requires blocking
-            self.assertEqual(self.template.outputs, {})
-
-            # block with immediate complete
-            self.template.name = 'block, immediate complete test'
-
-            mock_status.return_value = self.template.HEAT_CREATE_COMPLETE_STATUS
-            self.assertIsInstance(self.template.create(
-                block=True, timeout=2), heat.HeatStack)
-
-            # ensure existing instance was re-used and op_utils was not used
-            self.assertEqual(mock_heat_client_class.call_count,
-                             expected_constructor_calls)
-            self.assertEqual(
-                mock_heat_client.stacks.create.call_count,
-                expected_create_calls)
-
-            # ensure status was checked once
-            expected_status_calls += 1
-            self.assertEqual(mock_status.call_count, expected_status_calls)
-
-            # reset template outputs
-            self.template.outputs = None
-
-            # block with delayed complete
-            self.template.name = 'block, delayed complete test'
-
-            success_index = 2
-            mock_status.side_effect = index_value_iter(
-                success_index, self.template.HEAT_CREATE_COMPLETE_STATUS)
-            self.assertIsInstance(self.template.create(
-                block=True, timeout=2), heat.HeatStack)
-
-            # ensure existing instance was re-used and op_utils was not used
-            self.assertEqual(mock_heat_client_class.call_count,
-                             expected_constructor_calls)
-            self.assertEqual(
-                mock_heat_client.stacks.create.call_count,
-                expected_create_calls)
-
-            # ensure status was checked three more times
-            expected_status_calls += 1 + success_index
-            self.assertEqual(mock_status.call_count, expected_status_calls)
-
-
-class HeatStackTestCase(unittest.TestCase):
-
-    def test_delete_calls__delete_multiple_times(self):
-        stack = heat.HeatStack('test')
-        stack.uuid = 1
-        with mock.patch.object(stack, "_delete") as delete_mock:
-            stack.delete()
-        # call once and then call again if uuid is not none
-        self.assertGreater(delete_mock.call_count, 1)
-
-    def test_delete_all_calls_delete(self):
-        # we must patch the object before we create an instance
-        # so we can override delete() in all the instances
-        with mock.patch.object(heat.HeatStack, "delete") as delete_mock:
-            stack = heat.HeatStack('test')
-            stack.uuid = 1
-            stack.delete_all()
-            self.assertGreater(delete_mock.call_count, 0)
+        self.assertEqual(heat_template.resources['test']['type'],
+                         'OS::Nova::Flavor')
+
+    def test_create_not_block(self):
+        heat_stack = mock.Mock()
+        with mock.patch.object(heat, 'HeatStack', return_value=heat_stack):
+            ret = self.template.create(block=False)
+        heat_stack.create.assert_called_once_with(
+            self.template._template, self.template.heat_parameters, False,
+            3600)
+        self.assertEqual(heat_stack, ret)
+
+    def test_create_block(self):
+        heat_stack = mock.Mock()
+        heat_stack.status = self.template.HEAT_STATUS_COMPLETE
+        with mock.patch.object(heat, 'HeatStack', return_value=heat_stack):
+            ret = self.template.create(block=False)
+        heat_stack.create.assert_called_once_with(
+            self.template._template, self.template.heat_parameters, False,
+            3600)
+        self.assertEqual(heat_stack, ret)
+
+
+    def test_create_block_status_no_complete(self):
+        heat_stack = mock.Mock()
+        heat_stack.status = 'other status'
+        with mock.patch.object(heat, 'HeatStack', return_value=heat_stack):
+            self.assertRaises(exceptions.HeatTemplateError,
+                              self.template.create, block=True)
+        heat_stack.create.assert_called_once_with(
+            self.template._template, self.template.heat_parameters, True,
+            3600)