From: Emma Foley Date: Sun, 11 Feb 2018 22:08:55 +0000 (+0000) Subject: Merge "Remove references to "dpdk_nic_bind" utility" X-Git-Tag: opnfv-6.0.0~167 X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=commitdiff_plain;h=62699b55160b4308e80c0e98ace98ecf4c2eacb9;hp=a3364a6b9e86308007895b69571028386969a17c;p=yardstick.git Merge "Remove references to "dpdk_nic_bind" utility" --- diff --git a/INFO.yaml b/INFO.yaml new file mode 100644 index 000000000..f84f6952e --- /dev/null +++ b/INFO.yaml @@ -0,0 +1,80 @@ +--- +project: 'Test framework for verifying infrastructure compliance (yardstick)' +project_creation_date: 'April 28th, 2015' +project_category: 'Integration & Testing' +lifecycle_state: 'Incubation' +project_lead: &opnfv_yardstick_ptl + name: 'Ross Brattain' + email: 'ross.b.brattain@intel.com' + id: 'rbbratta' + company: 'intel.com' + timezone: 'PST' +primary_contact: *opnfv_yardstick_ptl +issue_tracking: + type: 'jira' + url: 'https://jira.opnfv.org/projects/Yardstick' + key: 'Yardstick' +mailing_list: + type: 'mailman2' + url: 'opnfv-tech-discuss@lists.opnfv.org' + tag: '[yardstick]' +realtime_discussion: + type: irc + server: 'freenode.net' + channel: '#opnfv-yardstick' +meetings: + - type: 'gotomeeting+irc' + agenda: 'https://wiki.opnfv.org/display/yardstick/Yardstick+Meetings' + url: 'https://global.gotomeeting.com/join/819733085' + server: 'freenode.net' + channel: '#opnfv-yardstick' + repeats: 'weekly' + time: '08:30 UTC' +repositories: + - 'yardstick' +committers: + - <<: *opnfv_yardstick_ptl + - name: 'Jörgen Karlsson' + email: 'jorgen.w.karlsson@ericsson.com' + company: 'ericsson.com' + id: 'jnon' + - name: 'Kubi' + email: 'jean.gaoliang@huawei.com' + company: 'huawei.com' + id: 'kubi' + - name: 'Rex Lee' + email: 'limingjiang@huawei.com' + company: 'huawei.com' + id: 'rexlee8776' + - name: 'Jing Lu' + email: 'lvjing5@huawei.com' + company: 'huawei.com' + id: 'JingLu5' + - name: 'zhihui wu' + email: 'wu.zhihui1@zte.com.cn' + company: 'zte.com.cn' + id: 'wu.zhihui' + - name: 'Trevor Cooper' + email: 'trevor.cooper@intel.com' + company: 'intel.com' + id: 'trev' + - name: 'Jack Chan' + email: 'chenjiankun1@huawei.com' + company: 'huawei.com' + id: 'chenjiankun' + - name: 'Emma Foley' + email: 'emma.l.foley@intel.com' + company: 'intel.com' + id: 'elfoley' + - name: 'Rodolfo Alonso Hernandez' + email: 'rodolfo.alonso.hernandez@intel.com' + company: 'intel.com' + id: 'rodolfo.ah' + - name: 'Kanglin Yin' + email: '14_ykl@tongji.edu.cn' + company: 'tongji.edu.cn' + id: 'tjuyinkanglin' +tsc: + # yamllint disable rule:line-length + approval: 'http//meetbot.opnfv.org/meetings/' + # yamllint enable rule:line-length diff --git a/ansible/infra_deploy.yml b/ansible/infra_deploy.yml index 10f53fbad..948dd338a 100644 --- a/ansible/infra_deploy.yml +++ b/ansible/infra_deploy.yml @@ -16,3 +16,4 @@ roles: - infra_check_requirements + - infra_destroy_previous_configuration diff --git a/ansible/nsb_setup.yml b/ansible/nsb_setup.yml index bfe5d2349..98a59f984 100644 --- a/ansible/nsb_setup.yml +++ b/ansible/nsb_setup.yml @@ -12,18 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. --- -#- name: Prepare baremetal machine -# include: ubuntu_server_baremetal_deploy_samplevnfs.yml -# vars: -# YARD_IMG_ARCH: amd64 -# -#- name: Install jumphost dependencies and configure docker -# hosts: jumphost -# environment: -# "{{ proxy_env }}" -# roles: -# - install_dependencies -# - docker +- name: Prepare baremetal machine + include: ubuntu_server_baremetal_deploy_samplevnfs.yml + vars: + YARD_IMG_ARCH: amd64 + +- name: Install jumphost dependencies and configure docker + hosts: jumphost + environment: + "{{ proxy_env }}" + roles: + - install_dependencies + - docker - name: "handle all openstack stuff when: openrc_file is defined" include: prepare_openstack.yml diff --git a/ansible/roles/create_samplevnfs_image/tasks/main.yml b/ansible/roles/create_samplevnfs_image/tasks/main.yml index c83cccab5..ab7371a12 100644 --- a/ansible/roles/create_samplevnfs_image/tasks/main.yml +++ b/ansible/roles/create_samplevnfs_image/tasks/main.yml @@ -19,6 +19,6 @@ is_public: yes disk_format: qcow2 container_format: bare - filename: "{{ raw_imgfile }}" + filename: "{{ imgfile }}" properties: hw_vif_multiqueue_enabled: true diff --git a/ansible/roles/download_drivers/defaults/main.yml b/ansible/roles/download_drivers/defaults/main.yml index ff4c30785..ab68a3c6d 100644 --- a/ansible/roles/download_drivers/defaults/main.yml +++ b/ansible/roles/download_drivers/defaults/main.yml @@ -14,7 +14,7 @@ --- i40evf_version: "3.4.2" i40evf_gzfile: "i40evf-{{ i40evf_version }}.tar.gz" -i40evf_url: "https://sourceforge.net/projects/e1000/files/i40evf%20stable/{{ i40evf_version }}/{{ i40evf_gzfile }}/download" +i40evf_url: "https://netix.dl.sourceforge.net/project/e1000/i40evf%20stable/{{ i40evf_version }}/{{ i40evf_gzfile }}" i40evf_dest: "{{ clone_dest }}/" i40evf_path: "{{ i40evf_dest }}/{{ i40evf_gzfile|regex_replace('[.]tar[.]gz$', '') }}" i40evf_checksum: diff --git a/ansible/roles/infra_destroy_previous_configuration/tasks/delete_network.yml b/ansible/roles/infra_destroy_previous_configuration/tasks/delete_network.yml new file mode 100644 index 000000000..314ee30af --- /dev/null +++ b/ansible/roles/infra_destroy_previous_configuration/tasks/delete_network.yml @@ -0,0 +1,48 @@ +# Copyright (c) 2017-2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +- name: Destroy old networks created by virt + virt_net: + name: "{{ network_item.name }}" + command: destroy + when: network_item.name in virt_nets.list_nets + +# Ignoring erros as network can be created without being defined. +# This can happen if a user manually creates a network using the virsh command. +# If the network is not defined the undefine code will throw an error. +- name: Undefine old networks defined by virt + virt_net: + name: "{{ network_item.name }}" + command: undefine + when: network_item.name in virt_nets.list_nets + ignore_errors: yes + +- name: Check if "ovs-vsctl" command is present + command: which ovs-vsctl + register: ovs_vsctl_present + ignore_errors: yes + +- name: Destroy OVS bridge if it exists + command: ovs-vsctl --if-exists -- del-br "{{ network_item.name }}" + when: ovs_vsctl_present.rc == 0 + +- name: Check if linux bridge is present + stat: path="{{ '/sys/class/net/'+network_item.name+'/brif/' }}" + register: check_linux_bridge + +- name: Remove linux bridge if it exists + shell: | + ifconfig "{{ network_item.name }}" down + brctl delbr "{{ network_item.name }}" + when: check_linux_bridge.stat.exists diff --git a/ansible/roles/infra_destroy_previous_configuration/tasks/main.yml b/ansible/roles/infra_destroy_previous_configuration/tasks/main.yml new file mode 100644 index 000000000..5595cd501 --- /dev/null +++ b/ansible/roles/infra_destroy_previous_configuration/tasks/main.yml @@ -0,0 +1,47 @@ +# Copyright (c) 2017-2018 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +- name: Include + include_vars: + file: "{{ rs_file }}" + name: infra_deploy_vars + +- name: List virt-nets + virt_net: command=list_nets + register: virt_nets + +- name: List VMs + virt: command=list_vms + register: virt_vms + +- name: Destroy old VMs + virt: + command: destroy + name: "{{ item.hostname }}" + when: item.hostname in virt_vms.list_vms + with_items: "{{ infra_deploy_vars.nodes }}" + +- name: Undefine old VMs + virt: + command: undefine + name: "{{ item.hostname }}" + when: item.hostname in virt_vms.list_vms + with_items: "{{ infra_deploy_vars.nodes }}" + +- name: Delete old networks + include_tasks: delete_network.yml + extra_vars: "{{ virt_nets }}" + loop_control: + loop_var: network_item + with_items: "{{ infra_deploy_vars.networks }}" diff --git a/docs/testing/user/userguide/15-list-of-tcs.rst b/docs/testing/user/userguide/15-list-of-tcs.rst index b62bf6390..47526cdda 100644 --- a/docs/testing/user/userguide/15-list-of-tcs.rst +++ b/docs/testing/user/userguide/15-list-of-tcs.rst @@ -80,6 +80,9 @@ H A opnfv_yardstick_tc052.rst opnfv_yardstick_tc053.rst opnfv_yardstick_tc054.rst + opnfv_yardstick_tc056.rst + opnfv_yardstick_tc057.rst + opnfv_yardstick_tc058.rst IPv6 ---- diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc042.rst b/docs/testing/user/userguide/opnfv_yardstick_tc042.rst index 8660d9297..a0c487c7b 100644 --- a/docs/testing/user/userguide/opnfv_yardstick_tc042.rst +++ b/docs/testing/user/userguide/opnfv_yardstick_tc042.rst @@ -4,7 +4,7 @@ .. (c) OPNFV, ZTE and others. *************************************** -Yardstick Test Case Description TC0042 +Yardstick Test Case Description TC042 *************************************** .. _DPDK: http://dpdk.org/doc/guides/index.html diff --git a/docs/testing/user/userguide/opnfv_yardstick_tc056.rst b/docs/testing/user/userguide/opnfv_yardstick_tc056.rst index 01aa99ac2..e6e06df57 100644 --- a/docs/testing/user/userguide/opnfv_yardstick_tc056.rst +++ b/docs/testing/user/userguide/opnfv_yardstick_tc056.rst @@ -98,7 +98,7 @@ Yardstick Test Case Description TC056 | | | +--------------+--------------------------------------------------------------+ |configuration | This test case needs two configuration files: | -| | 1) test case file:opnfv_yardstick_tc056.yaml | +| | 1) test case file: opnfv_yardstick_tc056.yaml | | | -Attackers: see above "attackers" description | | | -waiting_time: which is the time (seconds) from the process | | | being killed to stoping monitors the monitors | diff --git a/etc/infra/infra_deploy.yaml.sample b/etc/infra/infra_deploy.yaml.sample index fb162d35b..df682ac3b 100644 --- a/etc/infra/infra_deploy.yaml.sample +++ b/etc/infra/infra_deploy.yaml.sample @@ -12,7 +12,7 @@ nodes: ram: 8192 vcpus: 4 - - name Controller_Compute VM + - name: Controller_Compute VM openstack_node: controller_compute hostname: controller_compute interfaces: diff --git a/requirements.txt b/requirements.txt index 88c0e659a..aacafdf93 100644 --- a/requirements.txt +++ b/requirements.txt @@ -49,7 +49,6 @@ pyroute2==0.4.21 # dual license GPLv2+ and Apache v2; OSI Approved GNU G pyrsistent==0.14.1 # LICENSE.mit; OSI Approved MIT License python-cinderclient==3.1.0 # OSI Approved Apache Software License python-glanceclient==2.8.0 # OSI Approved Apache Software License -python-heatclient==1.11.1 # OSI Approved Apache Software License python-keystoneclient==3.13.0 # OSI Approved Apache Software License python-neutronclient==6.5.0 # OSI Approved Apache Software License python-novaclient==9.1.1 # OSI Approved Apache Software License diff --git a/samples/vnf_samples/nsut/prox/prox-tg-topology-1.yaml b/samples/vnf_samples/nsut/prox/prox-tg-topology-1.yaml index 10902a7b8..f59146c0b 100644 --- a/samples/vnf_samples/nsut/prox/prox-tg-topology-1.yaml +++ b/samples/vnf_samples/nsut/prox/prox-tg-topology-1.yaml @@ -21,10 +21,10 @@ nsd:nsd-catalog: constituent-vnfd: - member-vnf-index: '1' vnfd-id-ref: tg__0 - VNF model: ../../vnf_descriptors/tg_prox_tpl-1.yaml + VNF model: ../../vnf_descriptors/tg_prox_tpl.yaml - member-vnf-index: '2' vnfd-id-ref: vnf__0 - VNF model: ../../vnf_descriptors/prox_vnf-1.yaml + VNF model: ../../vnf_descriptors/prox_vnf.yaml vld: - id: uplink_0 name: tg__0 to vnf__0 link 1 diff --git a/samples/vnf_samples/nsut/prox/prox-tg-topology-2.yaml b/samples/vnf_samples/nsut/prox/prox-tg-topology-2.yaml index 11eed52fc..63d0acc91 100644 --- a/samples/vnf_samples/nsut/prox/prox-tg-topology-2.yaml +++ b/samples/vnf_samples/nsut/prox/prox-tg-topology-2.yaml @@ -21,10 +21,10 @@ nsd:nsd-catalog: constituent-vnfd: - member-vnf-index: '1' vnfd-id-ref: tg__0 - VNF model: ../../vnf_descriptors/tg_prox_tpl-2.yaml + VNF model: ../../vnf_descriptors/tg_prox_tpl.yaml - member-vnf-index: '2' vnfd-id-ref: vnf__0 - VNF model: ../../vnf_descriptors/prox_vnf-2.yaml + VNF model: ../../vnf_descriptors/prox_vnf.yaml vld: - id: uplink_0 name: tg__0 to vnf__0 link 1 diff --git a/samples/vnf_samples/nsut/prox/prox-tg-topology-4.yaml b/samples/vnf_samples/nsut/prox/prox-tg-topology-4.yaml index eda239e3b..b4b003680 100644 --- a/samples/vnf_samples/nsut/prox/prox-tg-topology-4.yaml +++ b/samples/vnf_samples/nsut/prox/prox-tg-topology-4.yaml @@ -21,10 +21,10 @@ nsd:nsd-catalog: constituent-vnfd: - member-vnf-index: '1' vnfd-id-ref: tg__0 - VNF model: ../../vnf_descriptors/tg_prox_tpl-4.yaml + VNF model: ../../vnf_descriptors/tg_prox_tpl.yaml - member-vnf-index: '2' vnfd-id-ref: vnf__0 - VNF model: ../../vnf_descriptors/prox_vnf-4.yaml + VNF model: ../../vnf_descriptors/prox_vnf.yaml vld: - id: uplink_0 name: tg__0 to vnf__0 link 1 diff --git a/samples/vnf_samples/vnf_descriptors/prox_vnf-2.yaml b/samples/vnf_samples/vnf_descriptors/prox_vnf-2.yaml deleted file mode 100644 index 13c4e9db7..000000000 --- a/samples/vnf_samples/vnf_descriptors/prox_vnf-2.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) 2017 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -vnfd:vnfd-catalog: - vnfd: - - id: ProxApproxVnf - name: ProxVnf - short-name: ProxVnf - description: PROX approximation using DPDK - mgmt-interface: - vdu-id: prox-baremetal - {% if user is defined %} - user: '{{user}}' # Value filled by vnfdgen - {% endif %} - {% if password is defined %} - password: '{{password}}' # Value filled by vnfdgen - {% endif %} - {% if ip is defined %} - ip: '{{ip}}' # Value filled by vnfdgen - {% endif %} - {% if key_filename is defined %} - key_filename: '{{key_filename}}' # Value filled by vnfdgen - {% endif %} - vdu: - - id: proxvnf-baremetal - name: proxvnf-baremetal - description: PROX approximation using DPDK - vm-flavor: - vcpu-count: '4' - memory-mb: '4096' - routing_table: {{ routing_table }} - nd_route_tbl: {{ nd_route_tbl }} - benchmark: - kpi: - - packets_in - - packets_fwd - - packets_dropped diff --git a/samples/vnf_samples/vnf_descriptors/prox_vnf-4.yaml b/samples/vnf_samples/vnf_descriptors/prox_vnf-4.yaml deleted file mode 100644 index 13c4e9db7..000000000 --- a/samples/vnf_samples/vnf_descriptors/prox_vnf-4.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) 2017 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -vnfd:vnfd-catalog: - vnfd: - - id: ProxApproxVnf - name: ProxVnf - short-name: ProxVnf - description: PROX approximation using DPDK - mgmt-interface: - vdu-id: prox-baremetal - {% if user is defined %} - user: '{{user}}' # Value filled by vnfdgen - {% endif %} - {% if password is defined %} - password: '{{password}}' # Value filled by vnfdgen - {% endif %} - {% if ip is defined %} - ip: '{{ip}}' # Value filled by vnfdgen - {% endif %} - {% if key_filename is defined %} - key_filename: '{{key_filename}}' # Value filled by vnfdgen - {% endif %} - vdu: - - id: proxvnf-baremetal - name: proxvnf-baremetal - description: PROX approximation using DPDK - vm-flavor: - vcpu-count: '4' - memory-mb: '4096' - routing_table: {{ routing_table }} - nd_route_tbl: {{ nd_route_tbl }} - benchmark: - kpi: - - packets_in - - packets_fwd - - packets_dropped diff --git a/samples/vnf_samples/vnf_descriptors/prox_vnf-1.yaml b/samples/vnf_samples/vnf_descriptors/prox_vnf.yaml similarity index 100% rename from samples/vnf_samples/vnf_descriptors/prox_vnf-1.yaml rename to samples/vnf_samples/vnf_descriptors/prox_vnf.yaml diff --git a/samples/vnf_samples/vnf_descriptors/tg_prox_tpl-1.yaml b/samples/vnf_samples/vnf_descriptors/tg_prox_tpl-1.yaml deleted file mode 100644 index 730143972..000000000 --- a/samples/vnf_samples/vnf_descriptors/tg_prox_tpl-1.yaml +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (c) 2017 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -vnfd:vnfd-catalog: - vnfd: - - id: ProxTrafficGen # nsb class mapping - name: proxverifier - short-name: proxverifier - description: prox stateless traffic verifier - mgmt-interface: - vdu-id: proxgen-baremetal - {% if user is defined %} - user: '{{user}}' # Value filled by vnfdgen - {% endif %} - {% if password is defined %} - password: '{{password}}' # Value filled by vnfdgen - {% endif %} - {% if ip is defined %} - ip: '{{ip}}' # Value filled by vnfdgen - {% endif %} - {% if key_filename is defined %} - key_filename: '{{key_filename}}' # Value filled by vnfdgen - {% endif %} - vdu: - - id: proxgen-baremetal - name: proxgen-baremetal - description: prox stateless traffic verifier - benchmark: - kpi: - - rx_throughput_fps - - tx_throughput_fps - - tx_throughput_mbps - - rx_throughput_mbps - - in_packets - - out_packets diff --git a/samples/vnf_samples/vnf_descriptors/tg_prox_tpl-4.yaml b/samples/vnf_samples/vnf_descriptors/tg_prox_tpl-4.yaml deleted file mode 100644 index 20bd12ca2..000000000 --- a/samples/vnf_samples/vnf_descriptors/tg_prox_tpl-4.yaml +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) 2017 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -vnfd:vnfd-catalog: - vnfd: - - id: ProxTrafficGen # nsb class mapping - name: proxverifier - short-name: proxverifier - description: prox stateless traffic verifier - mgmt-interface: - vdu-id: proxgen-baremetal - {% if user is defined %} - user: '{{user}}' # Value filled by vnfdgen - {% endif %} - {% if password is defined %} - password: '{{password}}' # Value filled by vnfdgen - {% endif %} - {% if ip is defined %} - ip: '{{ip}}' # Value filled by vnfdgen - {% endif %} - {% if key_filename is defined %} - key_filename: '{{key_filename}}' # Value filled by vnfdgen - {% endif %} - vdu: - - id: proxgen-baremetal - name: proxgen-baremetal - description: prox stateless traffic verifier - - benchmark: - kpi: - - rx_throughput_fps - - tx_throughput_fps - - tx_throughput_mbps - - rx_throughput_mbps - - in_packets - - out_packets diff --git a/samples/vnf_samples/vnf_descriptors/tg_prox_tpl-2.yaml b/samples/vnf_samples/vnf_descriptors/tg_prox_tpl.yaml similarity index 100% rename from samples/vnf_samples/vnf_descriptors/tg_prox_tpl-2.yaml rename to samples/vnf_samples/vnf_descriptors/tg_prox_tpl.yaml diff --git a/setup.py b/setup.py index 7f6571d61..881ef9272 100755 --- a/setup.py +++ b/setup.py @@ -53,6 +53,7 @@ setup( 'yardstick=yardstick.main:main', 'yardstick-plot=yardstick.plot.plotter:main [plot]' ], + 'yardstick.scenario': [] }, scripts=[ 'tools/yardstick-img-modify', diff --git a/test-requirements.txt b/test-requirements.txt index f933df29a..ee9815c45 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -12,6 +12,9 @@ testrepository==0.0.20 # OSI Approved BSD License; OSI Approved Apache So testtools==2.3.0 # OSI Approved MIT License unittest2==1.1.0 # OSI Approved BSD License +# NOTE(ralonsoh): to be removed, only for coverage support +python-heatclient==1.8.1 # OSI Approved Apache Software License + # Yardstick F release <-> OpenStack Pike release openstack_requirements==1.1.0 # OSI Approved Apache Software License -e git+https://github.com/openstack/requirements.git@stable/pike#egg=os_requirements diff --git a/tests/unit/benchmark/scenarios/test_base.py b/tests/unit/benchmark/scenarios/test_base.py index 78e342978..a95e6bc86 100644 --- a/tests/unit/benchmark/scenarios/test_base.py +++ b/tests/unit/benchmark/scenarios/test_base.py @@ -51,3 +51,56 @@ class ScenarioTestCase(unittest.TestCase): pass self.assertEqual(str(None), DummyScenario.get_description()) + + def test_get_types(self): + scenario_names = set( + scenario.__scenario_type__ for scenario in + base.Scenario.get_types() if hasattr(scenario, + '__scenario_type__')) + existing_scenario_class_names = { + 'Iperf3', 'CACHEstat', 'SpecCPU2006', 'Dummy', 'NSPerf', 'Parser'} + self.assertTrue(existing_scenario_class_names.issubset(scenario_names)) + + def test_get_cls_existing_scenario(self): + scenario_name = 'NSPerf' + scenario = base.Scenario.get_cls(scenario_name) + self.assertEqual(scenario_name, scenario.__scenario_type__) + + def test_get_cls_non_existing_scenario(self): + wrong_scenario_name = 'Non-existing-scenario' + with self.assertRaises(RuntimeError) as exc: + base.Scenario.get_cls(wrong_scenario_name) + self.assertEqual('No such scenario type %s' % wrong_scenario_name, + str(exc.exception)) + + def test_get_existing_scenario(self): + scenario_name = 'NSPerf' + scenario_module = ('yardstick.benchmark.scenarios.networking.' + 'vnf_generic.NetworkServiceTestCase') + self.assertEqual(scenario_module, base.Scenario.get(scenario_name)) + + def test_get_non_existing_scenario(self): + wrong_scenario_name = 'Non-existing-scenario' + with self.assertRaises(RuntimeError) as exc: + base.Scenario.get(wrong_scenario_name) + self.assertEqual('No such scenario type %s' % wrong_scenario_name, + str(exc.exception)) + + +class IterScenarioClassesTestCase(unittest.TestCase): + + def test_no_scenario_type_defined(self): + some_existing_scenario_class_names = [ + 'Iperf3', 'CACHEstat', 'SpecCPU2006', 'Dummy', 'NSPerf', 'Parser'] + scenario_types = [scenario.__scenario_type__ for scenario + in base._iter_scenario_classes()] + for class_name in some_existing_scenario_class_names: + self.assertIn(class_name, scenario_types) + + def test_scenario_type_defined(self): + some_existing_scenario_class_names = [ + 'Iperf3', 'CACHEstat', 'SpecCPU2006', 'Dummy', 'NSPerf', 'Parser'] + for class_name in some_existing_scenario_class_names: + scenario_class = next(base._iter_scenario_classes( + scenario_type=class_name)) + self.assertEqual(class_name, scenario_class.__scenario_type__) diff --git a/tools/virt_ci_rampup.sh b/tools/virt_ci_rampup.sh index 210e6ed40..6a9f2e7cb 100755 --- a/tools/virt_ci_rampup.sh +++ b/tools/virt_ci_rampup.sh @@ -16,6 +16,6 @@ ANSIBLE_SCRIPTS="${0%/*}/../ansible" cd ${ANSIBLE_SCRIPTS} &&\ -ansible-playbook \ +sudo -EH ansible-playbook \ -e rs_file='../etc/infra/infra_deploy.yaml' \ -i inventory.ini infra_deploy.yml diff --git a/yardstick/benchmark/scenarios/base.py b/yardstick/benchmark/scenarios/base.py index 7af85834c..10a728828 100644 --- a/yardstick/benchmark/scenarios/base.py +++ b/yardstick/benchmark/scenarios/base.py @@ -16,20 +16,34 @@ # yardstick comment: this is a modified copy of # rally/rally/benchmark/scenarios/base.py -""" Scenario base class -""" +from stevedore import extension -from __future__ import absolute_import import yardstick.common.utils as utils +def _iter_scenario_classes(scenario_type=None): + """Generator over all 'Scenario' subclasses + + This function will iterate over all 'Scenario' subclasses defined in this + project and will load any class introduced by any installed plugin project, + defined in 'entry_points' section, under 'yardstick.scenarios' subsection. + """ + extension.ExtensionManager(namespace='yardstick.scenarios', + invoke_on_load=False) + for scenario in utils.itersubclasses(Scenario): + if not scenario_type: + yield scenario + elif getattr(scenario, '__scenario_type__', None) == scenario_type: + yield scenario + + class Scenario(object): def setup(self): """ default impl for scenario setup """ pass - def run(self, args): + def run(self, *args): """ catcher for not implemented run methods in subclasses """ raise RuntimeError("run method not implemented") @@ -41,16 +55,15 @@ class Scenario(object): def get_types(): """return a list of known runner type (class) names""" scenarios = [] - for scenario in utils.itersubclasses(Scenario): + for scenario in _iter_scenario_classes(): scenarios.append(scenario) return scenarios @staticmethod def get_cls(scenario_type): """return class of specified type""" - for scenario in utils.itersubclasses(Scenario): - if scenario_type == scenario.__scenario_type__: - return scenario + for scenario in _iter_scenario_classes(scenario_type): + return scenario raise RuntimeError("No such scenario type %s" % scenario_type) @@ -58,11 +71,8 @@ class Scenario(object): def get(scenario_type): """Returns instance of a scenario runner for execution type. """ - for scenario in utils.itersubclasses(Scenario): - if scenario_type == scenario.__scenario_type__: - return scenario.__module__ + "." + scenario.__name__ - - raise RuntimeError("No such scenario type %s" % scenario_type) + scenario = Scenario.get_cls(scenario_type) + return scenario.__module__ + "." + scenario.__name__ @classmethod def get_scenario_type(cls): diff --git a/yardstick/common/exceptions.py b/yardstick/common/exceptions.py index 4780822a4..e38dd246c 100644 --- a/yardstick/common/exceptions.py +++ b/yardstick/common/exceptions.py @@ -57,3 +57,9 @@ class YardstickException(Exception): class FunctionNotImplemented(YardstickException): message = ('The function "%(function_name)s" is not implemented in ' '"%(class_name)" class.') + + +class HeatTemplateError(YardstickException): + """Error in Heat during the stack deployment""" + message = ('Error in Heat during the creation of the OpenStack stack ' + '"%(stack_name)"') diff --git a/yardstick/orchestrator/heat.py b/yardstick/orchestrator/heat.py index d58ae5618..1a8beaeb6 100644 --- a/yardstick/orchestrator/heat.py +++ b/yardstick/orchestrator/heat.py @@ -10,23 +10,21 @@ """Heat template and stack management""" from __future__ import absolute_import -from __future__ import print_function -from six.moves import range - import collections import datetime import getpass import logging - +import pkg_resources import socket +import tempfile import time -import heatclient.client -import pkg_resources - +from oslo_serialization import jsonutils from oslo_utils import encodeutils +import shade import yardstick.common.openstack_utils as op_utils +from yardstick.common import exceptions from yardstick.common import template_format log = logging.getLogger(__name__) @@ -36,123 +34,82 @@ HEAT_KEY_UUID_LENGTH = 8 PROVIDER_SRIOV = "sriov" +_DEPLOYED_STACKS = {} + def get_short_key_uuid(uuid): return str(uuid)[:HEAT_KEY_UUID_LENGTH] -class HeatObject(object): - """base class for template and stack""" - - def __init__(self): - self._heat_client = None - self.uuid = None - - @property - def heat_client(self): - """returns a heat client instance""" - - if self._heat_client is None: - sess = op_utils.get_session() - heat_endpoint = op_utils.get_endpoint(service_type='orchestration') - self._heat_client = heatclient.client.Client( - op_utils.get_heat_api_version(), - endpoint=heat_endpoint, session=sess) - - return self._heat_client - - def status(self): - """returns stack state as a string""" - heat_client = self.heat_client - stack = heat_client.stacks.get(self.uuid) - return stack.stack_status - - -class HeatStack(HeatObject): +class HeatStack(object): """Represents a Heat stack (deployed template) """ - stacks = [] def __init__(self, name): - super(HeatStack, self).__init__() - self.uuid = None self.name = name - self.outputs = None - HeatStack.stacks.append(self) + self.outputs = {} + self._cloud = shade.openstack_cloud() + self._stack = None + + def create(self, template, heat_parameters, wait, timeout): + """Creates an OpenStack stack from a template""" + with tempfile.NamedTemporaryFile('wb', delete=False) as template_file: + template_file.write(jsonutils.dumps(template)) + template_file.close() + self._stack = self._cloud.create_stack( + self.name, template_file=template_file.name, wait=wait, + timeout=timeout, **heat_parameters) + outputs = self._stack.outputs + self.outputs = {output['output_key']: output['output_value'] for output + in outputs} + if self.uuid: + _DEPLOYED_STACKS[self.uuid] = self._stack @staticmethod def stacks_exist(): - """check if any stack has been deployed""" - return len(HeatStack.stacks) > 0 + """Check if any stack has been deployed""" + return len(_DEPLOYED_STACKS) > 0 - def _delete(self): - """deletes a stack from the target cloud using heat""" + def delete(self, wait=True): + """Deletes a stack in the target cloud""" if self.uuid is None: return - log.info("Deleting stack '%s' START, uuid:%s", self.name, self.uuid) - heat = self.heat_client - template = heat.stacks.get(self.uuid) - start_time = time.time() - template.delete() - - for status in iter(self.status, u'DELETE_COMPLETE'): - log.debug("Deleting stack state: %s", status) - if status == u'DELETE_FAILED': - raise RuntimeError( - heat.stacks.get(self.uuid).stack_status_reason) - - time.sleep(2) - - end_time = time.time() - log.info("Deleting stack '%s' DONE in %d secs", self.name, - end_time - start_time) - self.uuid = None - - def delete(self, block=True, retries=3): - """deletes a stack in the target cloud using heat (with retry) - Sometimes delete fail with "InternalServerError" and the next attempt - succeeds. So it is worthwhile to test a couple of times. - """ - if self.uuid is None: - return - - if not block: - self._delete() - return - - for _ in range(retries): - try: - self._delete() - break - except RuntimeError as err: - log.warning(err.args) - time.sleep(2) - - # if still not deleted try once more and let it fail everything - if self.uuid is not None: - self._delete() - - HeatStack.stacks.remove(self) + ret = self._cloud.delete_stack(self.uuid, wait=wait) + _DEPLOYED_STACKS.pop(self.uuid) + self._stack = None + return ret @staticmethod def delete_all(): - for stack in HeatStack.stacks[:]: + """Delete all deployed stacks""" + for stack in _DEPLOYED_STACKS: stack.delete() - def update(self): - """update a stack""" - raise RuntimeError("not implemented") + @property + def status(self): + """Retrieve the current stack status""" + if self._stack: + return self._stack.status + + @property + def uuid(self): + """Retrieve the current stack ID""" + if self._stack: + return self._stack.id -class HeatTemplate(HeatObject): +class HeatTemplate(object): """Describes a Heat template and a method to deploy template to a stack""" - DESCRIPTION_TEMPLATE = """\ + DESCRIPTION_TEMPLATE = """ Stack built by the yardstick framework for %s on host %s %s. All referred generated resources are prefixed with the template -name (i.e. %s).\ +name (i.e. %s). """ + HEAT_WAIT_LOOP_INTERVAL = 2 + HEAT_STATUS_COMPLETE = 'COMPLETE' + def _init_template(self): timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") self._template = { @@ -171,9 +128,7 @@ name (i.e. %s).\ self.resources = self._template['resources'] def __init__(self, name, template_file=None, heat_parameters=None): - super(HeatTemplate, self).__init__() self.name = name - self.state = "NOT_CREATED" self.keystone_client = None self.heat_parameters = {} @@ -184,16 +139,13 @@ name (i.e. %s).\ if template_file: with open(template_file) as stream: - print("Parsing external template:", template_file) + log.info('Parsing external template: %s', template_file) template_str = stream.read() self._template = template_format.parse(template_str) self._parameters = heat_parameters else: self._init_template() - # holds results of requested output after deployment - self.outputs = {} - log.debug("template object '%s' created", name) def add_flavor(self, name, vcpus=1, ram=1024, disk=1, ephemeral=0, @@ -202,9 +154,9 @@ name (i.e. %s).\ """add to the template a Flavor description""" if name is None: name = 'auto' - log.debug("adding Nova::Flavor '%s' vcpus '%d' ram '%d' disk '%d' " + - "ephemeral '%d' is_public '%s' rxtx_factor '%d' " + - "swap '%d' extra_specs '%s' ", + log.debug("adding Nova::Flavor '%s' vcpus '%d' ram '%d' disk '%d' " + "ephemeral '%d' is_public '%s' rxtx_factor '%d' " + "swap '%d' extra_specs '%s'", name, vcpus, ram, disk, ephemeral, is_public, rxtx_factor, swap, str(extra_specs)) @@ -600,57 +552,28 @@ name (i.e. %s).\ 'value': {'get_resource': name} } - HEAT_WAIT_LOOP_INTERVAL = 2 - HEAT_CREATE_COMPLETE_STATUS = u'CREATE_COMPLETE' - def create(self, block=True, timeout=3600): - """ - creates a template in the target cloud using heat - returns a dict with the requested output values from the template + """Creates a stack in the target based on the stored template - :param block: Wait for Heat create to finish - :type block: bool - :param: timeout: timeout in seconds for Heat create, default 3600s - :type timeout: int + :param block: (bool) Wait for Heat create to finish + :param timeout: (int) Timeout in seconds for Heat create, + default 3600s + :return A dict with the requested output values from the template """ log.info("Creating stack '%s' START", self.name) - # create stack early to support cleanup, e.g. ctrl-c while waiting - stack = HeatStack(self.name) - - heat_client = self.heat_client start_time = time.time() - stack.uuid = self.uuid = heat_client.stacks.create( - stack_name=self.name, template=self._template, - parameters=self.heat_parameters)['stack']['id'] + stack = HeatStack(self.name) + stack.create(self._template, self.heat_parameters, block, timeout) if not block: - self.outputs = stack.outputs = {} - end_time = time.time() log.info("Creating stack '%s' DONE in %d secs", - self.name, end_time - start_time) + self.name, time.time() - start_time) return stack - time_limit = start_time + timeout - for status in iter(self.status, self.HEAT_CREATE_COMPLETE_STATUS): - log.debug("Creating stack state: %s", status) - if status == u'CREATE_FAILED': - stack_status_reason = heat_client.stacks.get(self.uuid).stack_status_reason - heat_client.stacks.delete(self.uuid) - raise RuntimeError(stack_status_reason) - if time.time() > time_limit: - raise RuntimeError("Heat stack create timeout") - - time.sleep(self.HEAT_WAIT_LOOP_INTERVAL) + if stack.status != self.HEAT_STATUS_COMPLETE: + raise exceptions.HeatTemplateError(stack_name=self.name) - end_time = time.time() - outputs = heat_client.stacks.get(self.uuid).outputs log.info("Creating stack '%s' DONE in %d secs", - self.name, end_time - start_time) - - # keep outputs as unicode - self.outputs = {output["output_key"]: output["output_value"] for output - in outputs} - - stack.outputs = self.outputs + self.name, time.time() - start_time) return stack diff --git a/yardstick/resources/scripts/install/ovs_deploy.bash b/yardstick/resources/scripts/install/ovs_deploy.bash index d94f30db1..beda9a5e0 100755 --- a/yardstick/resources/scripts/install/ovs_deploy.bash +++ b/yardstick/resources/scripts/install/ovs_deploy.bash @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +set -e + INSTALL_OVS_BIN="/usr/src" cd $INSTALL_OVS_BIN @@ -22,20 +24,6 @@ if [[ $EUID -ne 0 ]]; then exit 1; fi -prerequisite() -{ - echo "Install required libraries to run collectd..." - pkg=(git flex bison build-essential pkg-config automake autotools-dev libltdl-dev cmake qemu-kvm libvirt-bin bridge-utils numactl libnuma-dev libpcap-dev) - for i in "${pkg[@]}"; do - dpkg-query -W --showformat='${Status}\n' "${i}"|grep "install ok installed" - if [ "$?" -eq "1" ]; then - apt-get update - apt-get -y install "${i}"; - fi - done - echo "Done" -} - download_zip() { url=$1 @@ -53,6 +41,7 @@ download_zip() dpdk_build() { + echo "Build DPDK libraries" pushd . if [[ $DPDK_VERSION != "" ]]; then export DPDK_DIR=$INSTALL_OVS_BIN/dpdk-stable-$DPDK_VERSION @@ -62,13 +51,15 @@ dpdk_build() DPDK_DOWNLOAD="http://fast.dpdk.org/rel/dpdk-$DPDK_VERSION.tar.xz" download_zip "${DPDK_DOWNLOAD}" "DPDK" cd dpdk-stable-"$DPDK_VERSION" - make install -j T=$RTE_TARGET + make config T=$RTE_TARGET + make install -j $(nproc) T=$RTE_TARGET fi popd } ovs() { + echo "Build and install OVS with DPDK" pushd . if [[ $OVS_VERSION != "" ]]; then rm -rf openswitch-"$OVS_VERSION" @@ -82,7 +73,7 @@ ovs() else ./configure fi - make install -j + make install -j $(nproc) fi popd } diff --git a/yardstick/tests/unit/orchestrator/test_heat.py b/yardstick/tests/unit/orchestrator/test_heat.py index faf70cdbc..9164197b8 100644 --- a/yardstick/tests/unit/orchestrator/test_heat.py +++ b/yardstick/tests/unit/orchestrator/test_heat.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - ############################################################################## # Copyright (c) 2017 Intel Corporation # @@ -9,62 +7,87 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -# Unittest for yardstick.benchmark.orchestrator.heat -from contextlib import contextmanager -from itertools import count -from tempfile import NamedTemporaryFile -import time -import uuid +import tempfile import mock +from oslo_serialization import jsonutils +from oslo_utils import uuidutils +import shade import unittest from yardstick.benchmark.contexts import node +from yardstick.common import exceptions from yardstick.orchestrator import heat -TARGET_MODULE = 'yardstick.orchestrator.heat' - - -def mock_patch_target_module(inner_import): - return mock.patch('.'.join([TARGET_MODULE, inner_import])) - - -@contextmanager -def timer(): - start = time.time() - data = {'start': start} - try: - yield data - finally: - data['end'] = end = time.time() - data['delta'] = end - start - - -def index_value_iter(index, index_value, base_value=None): - for current_index in count(): - if current_index == index: - yield index_value - else: - yield base_value - +class FakeStack(object): -def get_error_message(error): - try: - # py2 - return error.message - except AttributeError: - # py3 - return next((arg for arg in error.args if isinstance(arg, str)), None) + def __init__(self, outputs=None, status=None, id=None): + self.outputs = outputs + self.status = status + self.id = id -class HeatContextTestCase(unittest.TestCase): +class HeatStackTestCase(unittest.TestCase): - def test_get_short_key_uuid(self): - u = uuid.uuid4() - k = heat.get_short_key_uuid(u) - self.assertEqual(heat.HEAT_KEY_UUID_LENGTH, len(k)) - self.assertIn(k, str(u)) + def setUp(self): + self.stack_name = 'STACK NAME' + with mock.patch.object(shade, 'openstack_cloud'): + self.heatstack = heat.HeatStack(self.stack_name) + self._mock_stack_create = mock.patch.object(self.heatstack._cloud, + 'create_stack') + self.mock_stack_create = self._mock_stack_create.start() + self._mock_stack_delete = mock.patch.object(self.heatstack._cloud, + 'delete_stack') + self.mock_stack_delete = self._mock_stack_delete.start() + + self.addCleanup(self._cleanup) + + def _cleanup(self): + self._mock_stack_create.stop() + self._mock_stack_delete.stop() + heat._DEPLOYED_STACKS = {} + + def test_create(self): + template = {'tkey': 'tval'} + heat_parameters = {'pkey': 'pval'} + outputs = [{'output_key': 'okey', 'output_value': 'oval'}] + id = uuidutils.generate_uuid() + self.mock_stack_create.return_value = FakeStack( + outputs=outputs, status=mock.Mock(), id=id) + mock_tfile = mock.Mock() + with mock.patch.object(tempfile._TemporaryFileWrapper, '__enter__', + return_value=mock_tfile): + self.heatstack.create(template, heat_parameters, True, 100) + mock_tfile.write.assert_called_once_with(jsonutils.dumps(template)) + mock_tfile.close.assert_called_once() + + self.mock_stack_create.assert_called_once_with( + self.stack_name, template_file=mock_tfile.name, wait=True, + timeout=100, pkey='pval') + self.assertEqual({'okey': 'oval'}, self.heatstack.outputs) + self.assertEqual(heat._DEPLOYED_STACKS[id], self.heatstack._stack) + + def test_stacks_exist(self): + self.assertEqual(0, self.heatstack.stacks_exist()) + heat._DEPLOYED_STACKS['id'] = 'stack' + self.assertEqual(1, self.heatstack.stacks_exist()) + + def test_delete_not_uuid(self): + self.assertIsNone(self.heatstack.delete()) + + def test_delete_existing_uuid(self): + id = uuidutils.generate_uuid() + self.heatstack._stack = FakeStack( + outputs=mock.Mock(), status=mock.Mock(), id=id) + heat._DEPLOYED_STACKS[id] = self.heatstack._stack + delete_return = mock.Mock() + self.mock_stack_delete.return_value = delete_return + + ret = self.heatstack.delete(wait=True) + self.assertEqual(delete_return, ret) + self.assertFalse(heat._DEPLOYED_STACKS) + self.mock_stack_delete.assert_called_once_with(id, wait=True) class HeatTemplateTestCase(unittest.TestCase): @@ -75,63 +98,53 @@ class HeatTemplateTestCase(unittest.TestCase): def test_add_tenant_network(self): self.template.add_network('some-network') - self.assertEqual( - self.template.resources['some-network']['type'], - 'OS::Neutron::Net') + self.assertEqual('OS::Neutron::Net', + self.template.resources['some-network']['type']) def test_add_provider_network(self): self.template.add_network('some-network', 'physnet2', 'sriov') - self.assertEqual( - self.template.resources['some-network']['type'], - 'OS::Neutron::ProviderNet') - self.assertEqual( - self.template.resources['some-network']['properties']['physical_network'], - 'physnet2') + self.assertEqual(self.template.resources['some-network']['type'], + 'OS::Neutron::ProviderNet') + self.assertEqual(self.template.resources['some-network'][ + 'properties']['physical_network'], 'physnet2') def test_add_subnet(self): netattrs = {'cidr': '10.0.0.0/24', - 'provider': None, 'external_network': 'ext_net'} - self.template.add_subnet( - 'some-subnet', "some-network", netattrs['cidr']) + 'provider': None, + 'external_network': 'ext_net'} + self.template.add_subnet('some-subnet', "some-network", + netattrs['cidr']) - self.assertEqual( - self.template.resources['some-subnet']['type'], - 'OS::Neutron::Subnet') - self.assertEqual( - self.template.resources['some-subnet']['properties']['cidr'], - '10.0.0.0/24') + self.assertEqual(self.template.resources['some-subnet']['type'], + 'OS::Neutron::Subnet') + self.assertEqual(self.template.resources['some-subnet']['properties'][ + 'cidr'], '10.0.0.0/24') def test_add_router(self): self.template.add_router('some-router', 'ext-net', 'some-subnet') - self.assertEqual( - self.template.resources['some-router']['type'], - 'OS::Neutron::Router') - self.assertIn( - 'some-subnet', - self.template.resources['some-router']['depends_on']) + self.assertEqual(self.template.resources['some-router']['type'], + 'OS::Neutron::Router') + self.assertIn('some-subnet', + self.template.resources['some-router']['depends_on']) def test_add_router_interface(self): - self.template.add_router_interface( - 'some-router-if', 'some-router', 'some-subnet') + self.template.add_router_interface('some-router-if', 'some-router', + 'some-subnet') - self.assertEqual( - self.template.resources['some-router-if']['type'], - 'OS::Neutron::RouterInterface') - self.assertIn( - 'some-subnet', - self.template.resources['some-router-if']['depends_on']) + self.assertEqual(self.template.resources['some-router-if']['type'], + 'OS::Neutron::RouterInterface') + self.assertIn('some-subnet', + self.template.resources['some-router-if']['depends_on']) def test_add_servergroup(self): self.template.add_servergroup('some-server-group', 'anti-affinity') - self.assertEqual( - self.template.resources['some-server-group']['type'], - 'OS::Nova::ServerGroup') - self.assertEqual( - self.template.resources['some-server-group']['properties']['policies'], - ['anti-affinity']) + self.assertEqual(self.template.resources['some-server-group']['type'], + 'OS::Nova::ServerGroup') + self.assertEqual(self.template.resources['some-server-group'][ + 'properties']['policies'], ['anti-affinity']) def test__add_resources_to_template_raw(self): test_context = node.NodeContext() @@ -142,16 +155,13 @@ class HeatTemplateTestCase(unittest.TestCase): test_context.keypair_name = "foo-key" test_context.secgroup_name = "foo-secgroup" test_context.key_uuid = "2f2e4997-0a8e-4eb7-9fa4-f3f8fbbc393b" - heat_object = heat.HeatObject() - heat_stack = heat.HeatStack("tmpStack") - self.assertTrue(heat_stack.stacks_exist()) - - test_context.tmpfile = NamedTemporaryFile(delete=True, mode='w+t') + test_context.tmpfile = tempfile.NamedTemporaryFile( + delete=True, mode='w+t') test_context.tmpfile.write("heat_template_version: 2015-04-30") test_context.tmpfile.flush() test_context.tmpfile.seek(0) - heat_template = heat.HeatTemplate(heat_object) + heat_template = heat.HeatTemplate('template name') heat_template.resources = {} heat_template.add_network("network1") @@ -163,324 +173,86 @@ class HeatTemplateTestCase(unittest.TestCase): heat_template.add_router("router1", "gw1", "subnet1") heat_template.add_router_interface("router_if1", "router1", "subnet1") heat_template.add_port("port1", "network1", "subnet1", "normal") - heat_template.add_port( - "port2", - "network2", - "subnet2", - "normal", - sec_group_id="sec_group1", - provider="not-sriov") - heat_template.add_port( - "port3", - "network2", - "subnet2", - "normal", - sec_group_id="sec_group1", - provider="sriov") - heat_template.add_floating_ip( - "floating_ip1", "network1", "port1", "router_if1") - heat_template.add_floating_ip( - "floating_ip2", "network2", "port2", "router_if2", "foo-secgroup") - heat_template.add_floating_ip_association( - "floating_ip1_association", "floating_ip1", "port1") + heat_template.add_port("port2", "network2", "subnet2", "normal", + sec_group_id="sec_group1", provider="not-sriov") + heat_template.add_port("port3", "network2", "subnet2", "normal", + sec_group_id="sec_group1", provider="sriov") + heat_template.add_floating_ip("floating_ip1", "network1", "port1", + "router_if1") + heat_template.add_floating_ip("floating_ip2", "network2", "port2", + "router_if2", "foo-secgroup") + heat_template.add_floating_ip_association("floating_ip1_association", + "floating_ip1", "port1") heat_template.add_servergroup("server_grp2", "affinity") heat_template.add_servergroup("server_grp3", "anti-affinity") heat_template.add_security_group("security_group") + heat_template.add_server(name="server1", image="image1", + flavor="flavor1", flavors=[]) + heat_template.add_server_group(name="servergroup", + policies=["policy1", "policy2"]) + heat_template.add_server_group(name="servergroup", + policies="policy1") heat_template.add_server( - name="server1", image="image1", flavor="flavor1", flavors=[]) - heat_template.add_server_group( - name="servergroup", policies=["policy1", "policy2"]) - heat_template.add_server_group(name="servergroup", policies="policy1") - heat_template.add_server( - name="server2", - image="image1", - flavor="flavor1", - flavors=[], - ports=[ - "port1", - "port2"], - networks=[ - "network1", - "network2"], - scheduler_hints="hints1", - user="user1", - key_name="foo-key", - user_data="user", - metadata={ - "cat": 1, - "doc": 2}, - additional_properties={ - "prop1": 1, - "prop2": 2}) + name="server2", image="image1", flavor="flavor1", flavors=[], + ports=["port1", "port2"], networks=["network1", "network2"], + scheduler_hints="hints1", user="user1", key_name="foo-key", + user_data="user", metadata={"cat": 1, "doc": 2}, + additional_properties={"prop1": 1, "prop2": 2}) heat_template.add_server( - name="server2", - image="image1", - flavor="flavor1", - flavors=[ - "flavor1", - "flavor2"], - ports=[ - "port1", - "port2"], - networks=[ - "network1", - "network2"], - scheduler_hints="hints1", - user="user1", - key_name="foo-key", - user_data="user", - metadata={ - "cat": 1, - "doc": 2}, - additional_properties={ - "prop1": 1, - "prop2": 2}) + name="server2", image="image1", flavor="flavor1", + flavors=["flavor1", "flavor2"], ports=["port1", "port2"], + networks=["network1", "network2"], scheduler_hints="hints1", + user="user1", key_name="foo-key", user_data="user", + metadata={"cat": 1, "doc": 2}, + additional_properties={"prop1": 1, "prop2": 2}) heat_template.add_server( - name="server2", - image="image1", - flavor="flavor1", - flavors=[ - "flavor3", - "flavor4"], - ports=[ - "port1", - "port2"], - networks=[ - "network1", - "network2"], - scheduler_hints="hints1", - user="user1", - key_name="foo-key", - user_data="user", - metadata={ - "cat": 1, - "doc": 2}, - additional_properties={ - "prop1": 1, - "prop2": 2}) - heat_template.add_flavor( - name="flavor1", - vcpus=1, - ram=2048, - disk=1, - extra_specs={ - "cat": 1, - "dog": 2}) + name="server2", image="image1", flavor="flavor1", + flavors=["flavor3", "flavor4"], ports=["port1", "port2"], + networks=["network1", "network2"], scheduler_hints="hints1", + user="user1", key_name="foo-key", user_data="user", + metadata={"cat": 1, "doc": 2}, + additional_properties={"prop1": 1, "prop2": 2}) + heat_template.add_flavor(name="flavor1", vcpus=1, ram=2048, disk=1, + extra_specs={"cat": 1, "dog": 2}) heat_template.add_flavor(name=None, vcpus=1, ram=2048) heat_template.add_server( - name="server1", - image="image1", - flavor="flavor1", - flavors=[], - ports=[ - "port1", - "port2"], - networks=[ - "network1", - "network2"], - scheduler_hints="hints1", - user="user1", - key_name="foo-key", - user_data="user", - metadata={ - "cat": 1, - "doc": 2}, - additional_properties={ - "prop1": 1, - "prop2": 2}) + name="server1", image="image1", flavor="flavor1", flavors=[], + ports=["port1", "port2"], networks=["network1", "network2"], + scheduler_hints="hints1", user="user1", key_name="foo-key", + user_data="user", metadata={"cat": 1, "doc": 2}, + additional_properties={"prop1": 1, "prop2": 2}) heat_template.add_network("network1") heat_template.add_flavor("test") - self.assertEqual( - heat_template.resources['test']['type'], 'OS::Nova::Flavor') - - @mock_patch_target_module('op_utils') - @mock_patch_target_module('heatclient') - def test_create_negative(self, mock_heat_client_class, mock_op_utils): - self.template.HEAT_WAIT_LOOP_INTERVAL = 0 - mock_heat_client = mock_heat_client_class() # get the constructed mock - - # populate attributes of the constructed mock - mock_heat_client.stacks.get().stack_status_reason = 'the reason' - - expected_status_calls = 0 - expected_constructor_calls = 1 # above, to get the instance - expected_create_calls = 0 - expected_op_utils_usage = 0 - - with mock.patch.object(self.template, 'status', return_value=None) as mock_status: - # block with timeout hit - timeout = 0 - with self.assertRaises(RuntimeError) as raised, timer(): - self.template.create(block=True, timeout=timeout) - - # ensure op_utils was used - expected_op_utils_usage += 1 - self.assertEqual( - mock_op_utils.get_session.call_count, expected_op_utils_usage) - self.assertEqual( - mock_op_utils.get_endpoint.call_count, expected_op_utils_usage) - self.assertEqual( - mock_op_utils.get_heat_api_version.call_count, - expected_op_utils_usage) - - # ensure the constructor and instance were used - self.assertEqual(mock_heat_client_class.call_count, - expected_constructor_calls) - self.assertEqual( - mock_heat_client.stacks.create.call_count, - expected_create_calls) - - # ensure that the status was used - self.assertGreater(mock_status.call_count, expected_status_calls) - expected_status_calls = mock_status.call_count # synchronize the value - - # ensure the expected exception was raised - error_message = get_error_message(raised.exception) - self.assertIn('timeout', error_message) - self.assertNotIn('the reason', error_message) - - # block with create failed - timeout = 10 - mock_status.side_effect = iter([None, None, u'CREATE_FAILED']) - with self.assertRaises(RuntimeError) as raised, timer(): - self.template.create(block=True, timeout=timeout) - - # ensure the existing heat_client was used and op_utils was used - # again - self.assertEqual( - mock_op_utils.get_session.call_count, expected_op_utils_usage) - self.assertEqual( - mock_op_utils.get_endpoint.call_count, expected_op_utils_usage) - self.assertEqual( - mock_op_utils.get_heat_api_version.call_count, - expected_op_utils_usage) - - # ensure the constructor was not used but the instance was used - self.assertEqual(mock_heat_client_class.call_count, - expected_constructor_calls) - self.assertEqual( - mock_heat_client.stacks.create.call_count, - expected_create_calls) - - # ensure that the status was used three times - expected_status_calls += 3 - self.assertEqual(mock_status.call_count, expected_status_calls) - - # NOTE(elfoley): This needs to be split into multiple tests. - # The lines where the template is reset should serve as a guide for where - # to split. - @mock_patch_target_module('op_utils') - @mock_patch_target_module('heatclient') - def test_create(self, mock_heat_client_class, mock_op_utils): - self.template.HEAT_WAIT_LOOP_INTERVAL = 0.2 - mock_heat_client = mock_heat_client_class() - - # populate attributes of the constructed mock - mock_heat_client.stacks.get().outputs = [ - {'output_key': 'key1', 'output_value': 'value1'}, - {'output_key': 'key2', 'output_value': 'value2'}, - {'output_key': 'key3', 'output_value': 'value3'}, - ] - expected_outputs = { # pylint: disable=unused-variable - 'key1': 'value1', - 'key2': 'value2', - 'key3': 'value3', - } - - expected_status_calls = 0 - expected_constructor_calls = 1 # above, to get the instance - expected_create_calls = 0 - expected_op_utils_usage = 0 - - with mock.patch.object(self.template, 'status') as mock_status: - self.template.name = 'no block test' - mock_status.return_value = None - - # no block - self.assertIsInstance(self.template.create( - block=False, timeout=2), heat.HeatStack) - - # ensure op_utils was used - expected_op_utils_usage += 1 - self.assertEqual( - mock_op_utils.get_session.call_count, expected_op_utils_usage) - self.assertEqual( - mock_op_utils.get_endpoint.call_count, expected_op_utils_usage) - self.assertEqual( - mock_op_utils.get_heat_api_version.call_count, - expected_op_utils_usage) - - # ensure the constructor and instance were used - self.assertEqual(mock_heat_client_class.call_count, - expected_constructor_calls) - self.assertEqual( - mock_heat_client.stacks.create.call_count, - expected_create_calls) - - # ensure that the status was not used - self.assertEqual(mock_status.call_count, expected_status_calls) - - # ensure no outputs because this requires blocking - self.assertEqual(self.template.outputs, {}) - - # block with immediate complete - self.template.name = 'block, immediate complete test' - - mock_status.return_value = self.template.HEAT_CREATE_COMPLETE_STATUS - self.assertIsInstance(self.template.create( - block=True, timeout=2), heat.HeatStack) - - # ensure existing instance was re-used and op_utils was not used - self.assertEqual(mock_heat_client_class.call_count, - expected_constructor_calls) - self.assertEqual( - mock_heat_client.stacks.create.call_count, - expected_create_calls) - - # ensure status was checked once - expected_status_calls += 1 - self.assertEqual(mock_status.call_count, expected_status_calls) - - # reset template outputs - self.template.outputs = None - - # block with delayed complete - self.template.name = 'block, delayed complete test' - - success_index = 2 - mock_status.side_effect = index_value_iter( - success_index, self.template.HEAT_CREATE_COMPLETE_STATUS) - self.assertIsInstance(self.template.create( - block=True, timeout=2), heat.HeatStack) - - # ensure existing instance was re-used and op_utils was not used - self.assertEqual(mock_heat_client_class.call_count, - expected_constructor_calls) - self.assertEqual( - mock_heat_client.stacks.create.call_count, - expected_create_calls) - - # ensure status was checked three more times - expected_status_calls += 1 + success_index - self.assertEqual(mock_status.call_count, expected_status_calls) - - -class HeatStackTestCase(unittest.TestCase): - - def test_delete_calls__delete_multiple_times(self): - stack = heat.HeatStack('test') - stack.uuid = 1 - with mock.patch.object(stack, "_delete") as delete_mock: - stack.delete() - # call once and then call again if uuid is not none - self.assertGreater(delete_mock.call_count, 1) - - def test_delete_all_calls_delete(self): - # we must patch the object before we create an instance - # so we can override delete() in all the instances - with mock.patch.object(heat.HeatStack, "delete") as delete_mock: - stack = heat.HeatStack('test') - stack.uuid = 1 - stack.delete_all() - self.assertGreater(delete_mock.call_count, 0) + self.assertEqual(heat_template.resources['test']['type'], + 'OS::Nova::Flavor') + + def test_create_not_block(self): + heat_stack = mock.Mock() + with mock.patch.object(heat, 'HeatStack', return_value=heat_stack): + ret = self.template.create(block=False) + heat_stack.create.assert_called_once_with( + self.template._template, self.template.heat_parameters, False, + 3600) + self.assertEqual(heat_stack, ret) + + def test_create_block(self): + heat_stack = mock.Mock() + heat_stack.status = self.template.HEAT_STATUS_COMPLETE + with mock.patch.object(heat, 'HeatStack', return_value=heat_stack): + ret = self.template.create(block=False) + heat_stack.create.assert_called_once_with( + self.template._template, self.template.heat_parameters, False, + 3600) + self.assertEqual(heat_stack, ret) + + + def test_create_block_status_no_complete(self): + heat_stack = mock.Mock() + heat_stack.status = 'other status' + with mock.patch.object(heat, 'HeatStack', return_value=heat_stack): + self.assertRaises(exceptions.HeatTemplateError, + self.template.create, block=True) + heat_stack.create.assert_called_once_with( + self.template._template, self.template.heat_parameters, True, + 3600)