roles:
- infra_check_requirements
- infra_destroy_previous_configuration
+ - infra_create_network
+ - infra_create_vms
--- /dev/null
+# Copyright (c) 2017-2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Define XML file name
+ set_fact:
+ xml_file: "{{ '/tmp/'+item.name+'.xml' }}"
+
+- name: Delete the file, if exists
+ file:
+ path: "{{ xml_file }}"
+ state: absent
+
+- name: Create a new empty file
+ file:
+ path: "{{ xml_file }}"
+ state: touch
+
+- name: Add root "network" node
+ blockinfile:
+ path: "{{ xml_file }}"
+ marker: ""
+ content: |
+ <network>
+ </network>
+
+- name: Add new children nodes to "network" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /network
+ add_children:
+ - name: "{{ item.name }}"
+ - bridge
+ - ip
+ pretty_print: yes
+
+- name: Add "name" attribute to "bridge" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /network/bridge
+ attribute: name
+ value: "{{ item.name }}"
+ pretty_print: yes
+
+- name: Add "stp" attribute to "bridge" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /network/bridge
+ attribute: stp
+ value: "on"
+ pretty_print: yes
+
+- name: Add "delay" attribute to "bridge" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /network/bridge
+ attribute: delay
+ value: "0"
+ pretty_print: yes
+
+- name: Add "address" attribute to "ip" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /network/ip
+ attribute: address
+ value: "{{ item.host_ip }}"
+ pretty_print: yes
+
+- name: Add "netmask" attribute to "ip" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /network/ip
+ attribute: netmask
+ value: "{{ item.netmask }}"
+ pretty_print: yes
+
+- name: Define the networks
+ virt_net:
+ command: define
+ name: "{{ item.name }}"
+ xml: "{{ lookup('file', xml_file) }}"
+
+- name: Set autostart to yes
+ virt_net:
+ autostart: yes
+ name: "{{ item.name }}"
+
+- name: Start the networks
+ virt_net:
+ command: start
+ name: "{{ item.name }}"
+
+- name: Remove XML file
+ file:
+ path: "{{ xml_file }}"
+ state: absent
--- /dev/null
+# Copyright (c) 2017-2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Include
+ include_vars:
+ file: "{{ rs_file }}"
+ name: infra_deploy_vars
+
+- name: Create XML file
+ include_tasks: create_xml.yaml
+ with_items: "{{ infra_deploy_vars.networks }}"
--- /dev/null
+# Copyright (c) 2017-2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Remove directory
+ file:
+ path: "{{ '/tmp/'+node_item.hostname }}"
+ state: absent
+
+- name: Create directory
+ file:
+ path: "{{ '/tmp/'+node_item.hostname }}"
+ state: directory
+ mode: 0755
+
+- name: Define user-data file name
+ set_fact:
+ user_data: "{{ '/tmp/'+node_item.hostname+'/user-data' }}"
+
+- name: Define image-dir
+ set_fact:
+ image_dir: "{{ '/var/lib/libvirt/images/' }}"
+
+- name: Create a new empty file for user-data
+ file:
+ path: "{{ user_data }}"
+ state: touch
+
+- name: Add user-data
+ blockinfile:
+ path: "{{ user_data }}"
+ marker: "MARKER"
+ content: |
+ #cloud-config
+ preserve_hostname: False
+ hostname: {{ node_item.hostname }}
+ output:
+ all: ">> /var/log/cloud-init.log"
+ ssh_pwauth: True
+ bootcmd:
+ - echo 127.0.0.1 {{ node_item.hostname }} >> /etc/hosts
+ users:
+ - name: {{ node_item.user }}
+ lock-passwd: False
+ plain_text_passwd: {{ node_item.password }}
+ chpasswd: { expire: False }
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ ssh_pwauth: True
+
+- name: Remove the marker
+ lineinfile:
+ dest: "{{ user_data }}"
+ state: absent
+ regexp: "MARKER"
+
+- name: Define network-config file name
+ set_fact:
+ network_config: "{{ '/tmp/'+node_item.hostname+'/network-config' }}"
+
+- name: Create a new empty file for network-config
+ file:
+ path: "{{ network_config }}"
+ state: touch
+
+- name: Add network-data
+ blockinfile:
+ path: "{{ network_config }}"
+ marker: "MARKER"
+ content: |
+ version: 2
+ ethernets:
+
+- name: Define meta-data file name
+ set_fact:
+ meta_data: "{{ '/tmp/'+node_item.hostname+'/meta-data' }}"
+
+- name: Create a new empty file for meta-data
+ file:
+ path: "{{ meta_data }}"
+ state: touch
+
+- name: Add meta-data
+ blockinfile:
+ path: "{{ meta_data }}"
+ marker: "MARKER"
+ content: |
+ instance-id: {{ node_item.hostname }}
+ local-hostname: {{ node_item.hostname }}
+
+- name: Remove the marker
+ lineinfile:
+ dest: "{{ meta_data }}"
+ state: absent
+ regexp: "MARKER"
+
+- name: Define xml file name
+ set_fact:
+ xml_file: "{{ '/tmp/'+node_item.hostname+'/'+node_item.hostname+'.xml' }}"
+
+- name: Create a new empty file for xml file
+ file:
+ path: "{{ xml_file }}"
+ state: touch
+
+- name: Add root "domain" node
+ blockinfile:
+ path: "{{ xml_file }}"
+ marker: ""
+ content: |
+ <domain>
+ </domain>
+
+- name: Add "type" attribute to "domain" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain
+ attribute: type
+ value: "kvm"
+ pretty_print: yes
+
+- name: Add new children nodes to "domain" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain
+ add_children:
+ - name: "{{ node_item.hostname }}"
+ - memory: "{{ node_item.ram }}"
+ - vcpu: "{{ node_item.vcpus }}"
+ - os
+ - cpu
+ - devices
+ pretty_print: yes
+
+- name: Add "unit" attribute to "memory" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/memory
+ attribute: unit
+ value: "MB"
+ pretty_print: yes
+
+- name: Add "placement" attribute to "vcpu" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/vcpu
+ attribute: placement
+ value: "static"
+ pretty_print: yes
+
+- name: Add new children nodes to "os" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/os
+ add_children:
+ - type: "hvm"
+ - boot
+ pretty_print: yes
+
+- name: Add "arch" attribute to "type" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/os/type
+ attribute: arch
+ value: "x86_64"
+ pretty_print: yes
+
+- name: Add "dev" attribute to "boot" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/os/boot
+ attribute: dev
+ value: "hd"
+ pretty_print: yes
+
+- name: Add new children nodes to "cpu" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/cpu
+ add_children:
+ - cache
+ pretty_print: yes
+
+- name: Add "mode" attribute to "cpu" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/cpu
+ attribute: mode
+ value: "host-passthrough"
+ pretty_print: yes
+
+- name: Add "mode" attribute to "cache" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/cpu/cache
+ attribute: mode
+ value: "passthrough"
+ pretty_print: yes
+
+- name: Add new children nodes to "devices" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/devices
+ add_children:
+ - disk:
+ type: file
+ device: disk
+ - controller:
+ type: virtio-serial
+ index: '0'
+ - serial:
+ type: pty
+ - console:
+ type: pty
+ tty: '/dev/pts/14'
+ pretty_print: yes
+
+- name: Add new children nodes to "disk" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/devices/disk
+ add_children:
+ - driver:
+ name: qemu
+ type: qcow2
+ - source:
+ file: "{{ '/var/lib/libvirt/images/'+node_item.hostname+'.qcow2' }}"
+ - target:
+ dev: vda
+ bus: virtio
+ - alias:
+ name: virtio-disk0
+ pretty_print: yes
+
+- name: Add new children nodes to "devices" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/devices
+ add_children:
+ - disk:
+ type: file
+ device: cdrom
+ pretty_print: yes
+
+- name: Add new children nodes to "disk" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/devices/disk
+ add_children:
+ - source:
+ file: "{{ '/var/lib/libvirt/images/'+node_item.hostname+'-ci-data.img' }}"
+ - target:
+ dev: hdb
+ bus: ide
+ - readonly
+ pretty_print: yes
+
+- name: Configure controller
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/devices/controller
+ add_children:
+ - alias:
+ name: virtio-serial0
+ pretty_print: yes
+
+- name: Configure serial
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/devices/serial
+ add_children:
+ - source:
+ path: '/dev/pts/14'
+ - target:
+ port: '0'
+ - alias:
+ name: 'serial0'
+ pretty_print: yes
+
+- name: Configure console
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/devices/console
+ add_children:
+ - source:
+ path: '/dev/pts/14'
+ - target:
+ port: '0'
+ type: 'serial'
+ - alias:
+ name: 'serial0'
+ pretty_print: yes
+
+- set_fact:
+ slot_address: 5
+
+- name: Populate network-config and add interface to xml file
+ include_tasks: create_interfaces.yml
+ extra_vars: "{{ network_config, xml_file , slot_address, mac_address_counter }}"
+ loop_control:
+ loop_var: interface_item
+ with_items: "{{ node_item.interfaces }}"
+
+- name: Create directory
+ file:
+ path: "{{ '/tmp/'+node_item.hostname }}"
+ state: directory
+ mode: 0755
+
+- name: Generate iso image
+ shell: >
+ genisoimage -output {{ image_dir+node_item.hostname+'-ci-data.img' }} -volid cidata -joliet
+ -r {{ '/tmp/'+node_item.hostname+'/network-config' }} {{ '/tmp/'+node_item.hostname+'/user-data' }} {{ '/tmp/'+node_item.hostname+'/meta-data' }}
+ &>> {{ '/tmp/'+node_item.hostname+'/hostname.log' }}
+
+- name: Copy and convert the ubuntu image
+ shell: >
+ qemu-img convert -O qcow2 {{ node_item.image }} {{ image_dir+node_item.hostname+'.qcow2' }}
+
+- name: Copy and convert the ubuntu image
+ shell: >
+ qemu-img resize {{ image_dir+node_item.hostname+'.qcow2' }} {{ node_item.disk }}MB
+
+- name: Define the VMs
+ virt:
+ command: define
+ name: "{{ node_item.hostname }}"
+ xml: "{{ lookup('file', '/tmp/'+node_item.hostname+'/'+node_item.hostname+'.xml') }}"
+
+- name: Start the VMs
+ virt:
+ command: create
+ name: "{{ node_item.hostname }}"
--- /dev/null
+# Copyright (c) 2017-2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Add network-data
+ blockinfile:
+ path: "{{ network_config }}"
+ insertafter: 'ethernets:'
+ marker: "MARKER"
+ block: |2
+ {{ 'enp0s%d:'| format( slot_address | int) }}
+ match:
+ mac_address: {{ '52:54:00:5d:7d:%02x'| format( mac_address_counter | int) }}
+ addresses:
+ - {{ interface_item.ip }}/{{ interface_item.netmask }}
+
+
+- name: Remove the marker introduced in network-data
+ lineinfile:
+ dest: "{{ network_config }}"
+ state: absent
+ regexp: "MARKER"
+
+- name: Add new children nodes to "domain" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/devices
+ add_children:
+ - interface:
+ type: 'bridge'
+ pretty_print: yes
+
+- name: Add new children nodes to "domain" node
+ xml:
+ path: "{{ xml_file }}"
+ xpath: /domain/devices/interface
+ add_children:
+ - source:
+ bridge: "{{ interface_item.network }}"
+ - model:
+ type: 'virtio'
+ - address:
+ type: 'pci'
+ domain: '0x0000'
+ bus: '0x00'
+ slot: "{{ '0x%02x'| format( slot_address | int) }}"
+ function: '0x0'
+ - mac:
+ address: "{{ '52:54:00:5d:7d:%02x'| format( mac_address_counter | int) }}"
+ pretty_print: yes
+
+- set_fact:
+ slot_address: "{{ slot_address | int + 1 }}"
+- set_fact:
+ mac_address_counter: "{{ mac_address_counter | int + 1 }}"
--- /dev/null
+# Copyright (c) 2017-2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+- name: Include
+ include_vars:
+ file: "{{ rs_file }}"
+ name: infra_deploy_vars
+
+- set_fact:
+ mac_address_counter: 0
+
+- name: Create XML file
+ include_tasks: configure_vm.yml
+ extra_vars: "{{ mac_address_counter }}"
+ loop_control:
+ loop_var: node_item
+ with_items: "{{ infra_deploy_vars.nodes }}"
ip: 192.168.1.10
netmask: 255.255.255.0
user: ubuntu
- pasword: password
+ password: password
image: /tmp/image1.qcow
disk: 50000
ram: 8192
ip: 192.20.1.20
netmask: 255.255.255.0
user: ubuntu
- pasword: password
+ password: password
image: /tmp/image_2.qcow
disk: 40000
ram: 32768
networks:
- name: management
- host_ip: 192.168.1.1 # not mandatory
+ host_ip: 192.168.1.1
+ netmask: 255.255.255.0
+++ /dev/null
-##############################################################################
-# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import unittest
-import mock
-
-from yardstick.benchmark.scenarios.lib.create_floating_ip import CreateFloatingIp
-
-
-class CreateFloatingIpTestCase(unittest.TestCase):
-
- @mock.patch('yardstick.common.openstack_utils.create_floating_ip')
- @mock.patch('yardstick.common.openstack_utils.get_network_id')
- @mock.patch('yardstick.common.openstack_utils.get_neutron_client')
- def test_create_floating_ip(self, mock_create_floating_ip, mock_get_network_id, mock_get_neutron_client):
- options = {}
- args = {"options": options}
- obj = CreateFloatingIp(args, {})
- obj.run({})
- self.assertTrue(mock_create_floating_ip.called)
- self.assertTrue(mock_get_network_id.called)
- self.assertTrue(mock_get_neutron_client.called)
-
-def main():
- unittest.main()
-
-
-if __name__ == '__main__':
- main()
+++ /dev/null
-#!/usr/bin/env python
-
-# Copyright 2017 Nokia
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Unittest for yardstick.benchmark.scenarios.networking.vsperf.VsperfDPDK
-
-from __future__ import absolute_import
-try:
- from unittest import mock
-except ImportError:
- import mock
-import unittest
-
-from yardstick.benchmark.scenarios.networking import vsperf_dpdk
-
-
-@mock.patch('yardstick.benchmark.scenarios.networking.vsperf_dpdk.subprocess')
-class VsperfDPDKTestCase(unittest.TestCase):
-
- def setUp(self):
- self.ctx = {
- "host": {
- "ip": "10.229.47.137",
- "user": "ubuntu",
- "password": "ubuntu",
- },
- }
- self.args = {
- 'task_id': "1234-5678",
- 'options': {
- 'testname': 'pvp_tput',
- 'traffic_type': 'rfc2544_throughput',
- 'frame_size': '64',
- 'test_params': 'TRAFFICGEN_DURATION=30;',
- 'trafficgen_port1': 'ens4',
- 'trafficgen_port2': 'ens5',
- 'conf_file': 'vsperf-yardstick.conf',
- 'setup_script': 'setup_yardstick.sh',
- 'moongen_helper_file': '~/moongen.py',
- 'moongen_host_ip': '10.5.201.151',
- 'moongen_port1_mac': '8c:dc:d4:ae:7c:5c',
- 'moongen_port2_mac': '8c:dc:d4:ae:7c:5d',
- 'trafficgen_port1_nw': 'test2',
- 'trafficgen_port2_nw': 'test3',
- },
- 'sla': {
- 'metrics': 'throughput_rx_fps',
- 'throughput_rx_fps': 500000,
- 'action': 'monitor',
- }
- }
-
- self._mock_ssh = mock.patch(
- 'yardstick.benchmark.scenarios.networking.vsperf_dpdk.ssh')
- self.mock_ssh = self._mock_ssh.start()
-
- self.addCleanup(self._cleanup)
-
- def _cleanup(self):
- self._mock_ssh.stop()
-
- def test_vsperf_dpdk_setup(self, mock_subprocess):
- p = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- # setup() specific mocks
- mock_subprocess.call().execute.return_value = None
-
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
-
- def test_vsperf_dpdk_teardown(self, mock_subprocess):
- p = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- # setup() specific mocks
- mock_subprocess.call().execute.return_value = None
-
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
-
- p.teardown()
- self.assertFalse(p.setup_done)
-
- def test_vsperf_dpdk_is_dpdk_setup_no(self, mock_subprocess):
- p = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- # setup() specific mocks
- mock_subprocess.call().execute.return_value = None
-
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
-
- # is_dpdk_setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, 'dummy', '')
-
- result = p._is_dpdk_setup()
- self.assertFalse(result)
-
- def test_vsperf_dpdk_is_dpdk_setup_yes(self, mock_subprocess):
- p = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- # setup() specific mocks
- mock_subprocess.call().execute.return_value = None
-
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
-
- # is_dpdk_setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
-
- result = p._is_dpdk_setup()
- self.assertTrue(result)
-
- @mock.patch('time.sleep')
- def test_vsperf_dpdk_dpdk_setup_first(self, _, mock_subprocess):
- p = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- # setup() specific mocks
- mock_subprocess.call().execute.return_value = None
-
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
-
- # is_dpdk_setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, 'dummy', '')
-
- p.dpdk_setup()
- self.assertFalse(p._is_dpdk_setup())
- self.assertTrue(p.dpdk_setup_done)
-
- @mock.patch('time.sleep')
- def test_vsperf_dpdk_dpdk_setup_next(self, _, mock_subprocess):
- p = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- # setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
-
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
-
- p.dpdk_setup()
- self.assertTrue(p._is_dpdk_setup())
- self.assertTrue(p.dpdk_setup_done)
-
- @mock.patch('time.sleep')
- def test_vsperf_dpdk_dpdk_setup_fail(self, _, mock_subprocess):
- p = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- # setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
-
- p.setup()
- self.assertIsNotNone(p.client)
- self.mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
- self.assertTrue(p.setup_done)
-
- self.assertRaises(RuntimeError, p.dpdk_setup)
-
- @mock.patch('time.sleep')
- def test_vsperf_dpdk_run_ok(self, _, mock_subprocess):
- p = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- # setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
-
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
-
- # run() specific mocks
- mock_subprocess.call().execute.return_value = None
- self.mock_ssh.SSH.from_node().execute.return_value = (
- 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
-
- result = {}
- p.run(result)
-
- self.assertEqual(result['throughput_rx_fps'], '14797660.000')
-
- def test_vsperf_dpdk_run_falied_vsperf_execution(self, mock_subprocess):
- p = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- # setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
-
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
-
- # run() specific mocks
- mock_subprocess.call().execute.return_value = None
- mock_subprocess.call().execute.return_value = None
- self.mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
-
- result = {}
- self.assertRaises(RuntimeError, p.run, result)
-
- def test_vsperf_dpdk_run_falied_csv_report(self, mock_subprocess):
- p = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
-
- # setup() specific mocks
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
-
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
-
- # run() specific mocks
- mock_subprocess.call().execute.return_value = None
- mock_subprocess.call().execute.return_value = None
- self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- self.mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
-
- result = {}
- self.assertRaises(RuntimeError, p.run, result)
-
-def main():
- unittest.main()
-
-
-if __name__ == '__main__':
- main()
from tests.unit import STL_MOCKS
-
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
stl_patch.start()
EXAMPLE_IP_ADDR = "10.0.0.1"
EXAMPLE_IPv6_ADDR = "0064:ff9b:0:0:0:0:9810:6414"
- PROFILE = {'description': 'Traffic profile to run RFC2544 latency',
- 'name': 'rfc2544',
- 'traffic_profile': {'traffic_type': 'RFC2544Profile',
- 'frame_rate': 100},
- TrafficProfile.DOWNLINK: {'ipv4': {'outer_l2': {'framesize': {'64B': '100',
- '1518B': '0',
- '128B': '0',
- '1400B': '0',
- '256B': '0',
- '373b': '0',
- '570B': '0'},
- "srcmac": "00:00:00:00:00:02",
- "dstmac": "00:00:00:00:00:01"},
- 'outer_l3v4': {'dstip4': '1.1.1.1-1.1.2.2',
- 'proto': 'udp',
- 'srcip4': '9.9.1.1-90.1.2.2',
- 'dscp': 0, 'ttl': 32,
- 'count': 1},
- 'outer_l4': {'srcport': '2001',
- 'dsrport': '1234',
- 'count': 1}}},
- TrafficProfile.UPLINK: {'ipv4':
- {'outer_l2': {'framesize':
- {'64B': '100', '1518B': '0',
- '128B': '0', '1400B': '0',
- '256B': '0', '373b': '0',
- '570B': '0'},
- "srcmac": "00:00:00:00:00:01",
- "dstmac": "00:00:00:00:00:02"},
- 'outer_l3v4': {'dstip4': '9.9.1.1-90.105.255.255',
- 'proto': 'udp',
- 'srcip4': '1.1.1.1-1.15.255.255',
- 'dscp': 0, 'ttl': 32, 'count': 1},
- 'outer_l4': {'dstport': '2001',
- 'srcport': '1234',
- 'count': 1}}},
- 'schema': 'isb:traffic_profile:0.1'}
- PROFILE_v6 = {'description': 'Traffic profile to run RFC2544 latency',
- 'name': 'rfc2544',
- 'traffic_profile': {'traffic_type': 'RFC2544Profile',
- 'frame_rate': 100},
- TrafficProfile.DOWNLINK: {'ipv6': {'outer_l2': {'framesize':
- {'64B': '100', '1518B': '0',
- '128B': '0', '1400B': '0',
- '256B': '0', '373b': '0',
- '570B': '0'},
- "srcmac": "00:00:00:00:00:02",
- "dstmac": "00:00:00:00:00:01"},
- 'outer_l3v4': {'dstip6': '0064:ff9b:0:0:0:0:9810:6414-0064:ff9b:0:0:0:0:9810:6420',
- 'proto': 'udp',
- 'srcip6': '0064:ff9b:0:0:0:0:9810:2814-0064:ff9b:0:0:0:0:9810:2820',
- 'dscp': 0, 'ttl': 32,
- 'count': 1},
- 'outer_l4': {'srcport': '2001',
- 'dsrport': '1234',
- 'count': 1}}},
- TrafficProfile.UPLINK:
- {'ipv6': {'outer_l2': {'framesize':
- {'64B': '100', '1518B': '0',
- '128B': '0', '1400B': '0',
- '256B': '0', '373b': '0',
- '570B': '0'},
- "srcmac": "00:00:00:00:00:01",
- "dstmac": "00:00:00:00:00:02"},
- 'outer_l3v4': {'dstip6': '0064:ff9b:0:0:0:0:9810:2814-0064:ff9b:0:0:0:0:9810:2820',
- 'proto': 'udp',
- 'srcip6': '0064:ff9b:0:0:0:0:9810:6414-0064:ff9b:0:0:0:0:9810:6420',
- 'dscp': 0, 'ttl': 32,
- 'count': 1},
- 'outer_l4': {'dstport': '2001',
- 'srcport': '1234',
- 'count': 1}}},
- 'schema': 'isb:traffic_profile:0.1'}
+ PROFILE = {
+ 'description': 'Traffic profile to run RFC2544 latency',
+ 'name': 'rfc2544',
+ 'traffic_profile': {'traffic_type': 'RFC2544Profile',
+ 'frame_rate': 100},
+ TrafficProfile.DOWNLINK: {
+ 'ipv4': {'outer_l2': {'framesize': {'64B': '100',
+ '1518B': '0',
+ '128B': '0',
+ '1400B': '0',
+ '256B': '0',
+ '373b': '0',
+ '570B': '0'},
+ "srcmac": "00:00:00:00:00:02",
+ "dstmac": "00:00:00:00:00:01"},
+ 'outer_l3v4': {'dstip4': '1.1.1.1-1.1.2.2',
+ 'proto': 'udp',
+ 'srcip4': '9.9.1.1-90.1.2.2',
+ 'dscp': 0, 'ttl': 32,
+ 'count': 1},
+ 'outer_l4': {'srcport': '2001',
+ 'dsrport': '1234',
+ 'count': 1}}},
+ TrafficProfile.UPLINK: {
+ 'ipv4':
+ {'outer_l2': {'framesize':
+ {'64B': '100', '1518B': '0',
+ '128B': '0', '1400B': '0',
+ '256B': '0', '373b': '0',
+ '570B': '0'},
+ "srcmac": "00:00:00:00:00:01",
+ "dstmac": "00:00:00:00:00:02"},
+ 'outer_l3v4': {'dstip4': '9.9.1.1-90.105.255.255',
+ 'proto': 'udp',
+ 'srcip4': '1.1.1.1-1.15.255.255',
+ 'dscp': 0, 'ttl': 32, 'count': 1},
+ 'outer_l4': {'dstport': '2001',
+ 'srcport': '1234',
+ 'count': 1}}},
+ 'schema': 'isb:traffic_profile:0.1'}
+ PROFILE_v6 = {
+ 'description': 'Traffic profile to run RFC2544 latency',
+ 'name': 'rfc2544',
+ 'traffic_profile': {'traffic_type': 'RFC2544Profile',
+ 'frame_rate': 100},
+ TrafficProfile.DOWNLINK: {
+ 'ipv6': {'outer_l2': {'framesize':
+ {'64B': '100', '1518B': '0',
+ '128B': '0', '1400B': '0',
+ '256B': '0', '373b': '0',
+ '570B': '0'},
+ "srcmac": "00:00:00:00:00:02",
+ "dstmac": "00:00:00:00:00:01"},
+ 'outer_l3v4': {
+ 'dstip6':
+ '0064:ff9b:0:0:0:0:9810:6414-0064:ff9b:0:0:0:0:9810:6420',
+ 'proto': 'udp',
+ 'srcip6':
+ '0064:ff9b:0:0:0:0:9810:2814-0064:ff9b:0:0:0:0:9810:2820',
+ 'dscp': 0, 'ttl': 32,
+ 'count': 1},
+ 'outer_l4': {'srcport': '2001',
+ 'dsrport': '1234',
+ 'count': 1}}},
+ TrafficProfile.UPLINK: {
+ 'ipv6': {'outer_l2': {'framesize':
+ {'64B': '100', '1518B': '0',
+ '128B': '0', '1400B': '0',
+ '256B': '0', '373b': '0',
+ '570B': '0'},
+ "srcmac": "00:00:00:00:00:01",
+ "dstmac": "00:00:00:00:00:02"},
+ 'outer_l3v4': {
+ 'dstip6':
+ '0064:ff9b:0:0:0:0:9810:2814-0064:ff9b:0:0:0:0:9810:2820',
+ 'proto': 'udp',
+ 'srcip6':
+ '0064:ff9b:0:0:0:0:9810:6414-0064:ff9b:0:0:0:0:9810:6420',
+ 'dscp': 0, 'ttl': 32,
+ 'count': 1},
+ 'outer_l4': {'dstport': '2001',
+ 'srcport': '1234',
+ 'count': 1}}},
+ 'schema': 'isb:traffic_profile:0.1'}
def test___init__(self):
TrafficProfile.params = self.PROFILE
self.assertRaises(SystemExit, trex_profile._get_start_end_ipv6,
"1.1.1.3", "1.1.1.1")
+ def test__dscp_range_action_partial_actual_count_zero(self):
+ traffic_profile = TrexProfile(TrafficProfile)
+ dscp_partial = traffic_profile._dscp_range_action_partial()
+
+ flow_vars_initial_length = len(traffic_profile.vm_flow_vars)
+ dscp_partial('1', '1', 'unneeded')
+ self.assertEqual(len(traffic_profile.vm_flow_vars), flow_vars_initial_length + 2)
+
+ def test__dscp_range_action_partial_count_greater_than_actual(self):
+ traffic_profile = TrexProfile(TrafficProfile)
+ dscp_partial = traffic_profile._dscp_range_action_partial()
+
+ flow_vars_initial_length = len(traffic_profile.vm_flow_vars)
+ dscp_partial('1', '10', '100')
+ self.assertEqual(len(traffic_profile.vm_flow_vars), flow_vars_initial_length + 2)
+
+ def test__udp_range_action_partial_actual_count_zero(self):
+ traffic_profile = TrexProfile(TrafficProfile)
+ traffic_profile.udp['field1'] = 'value1'
+ udp_partial = traffic_profile._udp_range_action_partial('field1')
+
+ flow_vars_initial_length = len(traffic_profile.vm_flow_vars)
+ udp_partial('1', '1', 'unneeded')
+ self.assertEqual(len(traffic_profile.vm_flow_vars), flow_vars_initial_length + 2)
+
+ def test__udp_range_action_partial_count_greater_than_actual(self):
+ traffic_profile = TrexProfile(TrafficProfile)
+ traffic_profile.udp['field1'] = 'value1'
+ udp_partial = traffic_profile._udp_range_action_partial('field1', 'not_used_count')
+
+ flow_vars_initial_length = len(traffic_profile.vm_flow_vars)
+ udp_partial('1', '10', '100')
+ self.assertEqual(len(traffic_profile.vm_flow_vars), flow_vars_initial_length + 2)
+
def test__general_single_action_partial(self):
trex_profile = TrexProfile(TrafficProfile)
@mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.call")
@mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.shutil")
def test_instantiate(self, call, shutil, mock_makedirs):
+ # pylint: disable=unused-argument
with mock.patch("yardstick.ssh.SSH") as ssh:
ssh_mock = mock.Mock(autospec=ssh.SSH)
ssh_mock.execute = \
'1C/1T',
'worker_threads': 1}}
}})
- with mock.patch('yardstick.benchmark.scenarios.networking.vnf_generic.open', create=True) as mock_open:
+ with mock.patch('yardstick.benchmark.scenarios.networking.vnf_generic.open',
+ create=True) as mock_open:
mock_open.return_value = mock.MagicMock()
ixload_traffic_gen.instantiate(scenario_cfg, {})
@mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.max")
@mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.len")
def test_run_traffic(self, call, shutil, main_open, min, max, len):
+ # pylint: disable=unused-argument
mock_traffic_profile = mock.Mock(autospec=TrafficProfile)
mock_traffic_profile.get_traffic_definition.return_value = "64"
mock_traffic_profile.params = self.TRAFFIC_PROFILE
@mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.max")
@mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.len")
def test_run_traffic_csv(self, call, shutil, main_open, min, max, len):
+ # pylint: disable=unused-argument
mock_traffic_profile = mock.Mock(autospec=TrafficProfile)
mock_traffic_profile.get_traffic_definition.return_value = "64"
mock_traffic_profile.params = self.TRAFFIC_PROFILE
self.assertIsNone(result)
@mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.call")
- def test_terminate(self, call):
+ def test_terminate(self, *args):
with mock.patch("yardstick.ssh.SSH") as ssh:
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
ssh_mock = mock.Mock(autospec=ssh.SSH)
@mock.patch("yardstick.ssh.SSH")
@mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.call")
def test_parse_csv_read(self, mock_call, mock_ssh):
+ # pylint: disable=unused-argument
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
kpi_data = {
'HTTP Total Throughput (Kbps)': 1,
@mock.patch("yardstick.ssh.SSH")
@mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.call")
def test_parse_csv_read_value_error(self, mock_call, mock_ssh):
+ # pylint: disable=unused-argument
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
http_reader = [{
'HTTP Total Throughput (Kbps)': 1,
@mock.patch("yardstick.ssh.SSH")
@mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.call")
def test_parse_csv_read_error(self, mock_call, mock_ssh):
+ # pylint: disable=unused-argument
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
http_reader = [{
'HTTP Total Throughput (Kbps)': 1,
self.assertEqual(config_create.downlink_ports, ['xe1'])
self.assertEqual(config_create.socket, 2)
+ def test_dpdk_port_to_link_id(self):
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ config_create = ConfigCreate(vnfd_helper, 2)
+ self.assertEqual(config_create.dpdk_port_to_link_id_map, {'xe0': 0, 'xe1': 1})
+
def test_vpe_initialize(self):
vnfd_helper = VnfdHelper(self.VNFD_0)
config_create = ConfigCreate(vnfd_helper, 2)
def test_build_config(self, ssh, *args):
mock_ssh(ssh)
vpe_approx_vnf = VpeApproxSetupEnvHelper(mock.MagicMock(),
- mock.MagicMock, mock.MagicMock)
+ mock.MagicMock(), mock.MagicMock())
vpe_approx_vnf.tc_file_name = get_file_abspath(TEST_FILE_YAML)
vpe_approx_vnf.generate_port_pairs = mock.Mock()
vpe_approx_vnf.vnf_cfg = {
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
import os
from yardstick.benchmark.scenarios import base
import yardstick.common.openstack_utils as op_utils
+
LOG = logging.getLogger(__name__)
self.ext_net_id = os.getenv("EXTERNAL_NETWORK", "external")
self.neutron_client = op_utils.get_neutron_client()
+ self.shade_client = op_utils.get_shade_client()
self.setup_done = False
def setup(self):
self.setup_done = True
- def run(self, result):
+ def run(self, *args):
"""execute the test"""
if not self.setup_done:
self.setup()
- net_id = op_utils.get_network_id(self.neutron_client, self.ext_net_id)
+ net_id = op_utils.get_network_id(self.shade_client, self.ext_net_id)
floating_info = op_utils.create_floating_ip(self.neutron_client,
extnet_id=net_id)
- if floating_info:
- LOG.info("Creating floating ip successful!")
- else:
+
+ if not floating_info:
LOG.error("Creating floating ip failed!")
+ return
- try:
- keys = self.scenario_cfg.get('output', '').split()
- except KeyError:
- pass
- else:
- values = [floating_info["fip_id"], floating_info["fip_addr"]]
- return self._push_to_outputs(keys, values)
+ LOG.info("Creating floating ip successful!")
+ keys = self.scenario_cfg.get('output', '').split()
+ values = [floating_info["fip_id"], floating_info["fip_addr"]]
+ return self._push_to_outputs(keys, values)
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from __future__ import absolute_import
-import os
-import errno
+import errno
+import os
from functools import reduce
import pkg_resources
try:
with open(conf_file) as f:
value = yaml_load(f)
- except IOError:
- pass
- except OSError as e:
- if e.errno != errno.EEXIST:
+ except (IOError, OSError) as e:
+ if e.errno != errno.ENOENT:
raise
else:
CONF.update(value)
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import absolute_import
-
import os
import time
import sys
from keystoneauth1 import loading
from keystoneauth1 import session
+import shade
+
from cinderclient import client as cinderclient
from novaclient import client as novaclient
from glanceclient import client as glanceclient
from neutronclient.neutron import client as neutronclient
+
log = logging.getLogger(__name__)
DEFAULT_HEAT_API_VERSION = '1'
return glanceclient.Client(get_glance_client_version(), session=sess)
+def get_shade_client():
+ return shade.openstack_cloud()
+
# *********************************************
# NOVA
# *********************************************
-def get_instances(nova_client): # pragma: no cover
+def get_instances(nova_client):
try:
return nova_client.servers.list(search_opts={'all_tenants': 1})
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [get_instances(nova_client)]")
def get_instance_status(nova_client, instance): # pragma: no cover
try:
return nova_client.servers.get(instance.id).status
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [get_instance_status(nova_client)]")
def get_instance_by_name(nova_client, instance_name): # pragma: no cover
try:
return nova_client.servers.find(name=instance_name)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [get_instance_by_name(nova_client, '%s')]",
instance_name)
def get_aggregates(nova_client): # pragma: no cover
try:
return nova_client.aggregates.list()
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [get_aggregates(nova_client)]")
def get_availability_zones(nova_client): # pragma: no cover
try:
return nova_client.availability_zones.list()
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [get_availability_zones(nova_client)]")
def get_availability_zone_names(nova_client): # pragma: no cover
try:
return [az.zoneName for az in get_availability_zones(nova_client)]
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [get_availability_zone_names(nova_client)]")
def create_aggregate(nova_client, aggregate_name, av_zone): # pragma: no cover
try:
nova_client.aggregates.create(aggregate_name, av_zone)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [create_aggregate(nova_client, %s, %s)]",
aggregate_name, av_zone)
return False
try:
aggregates = get_aggregates(nova_client)
_id = next((ag.id for ag in aggregates if ag.name == aggregate_name))
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [get_aggregate_id(nova_client, %s)]",
aggregate_name)
else:
try:
aggregate_id = get_aggregate_id(nova_client, aggregate_name)
nova_client.aggregates.add_host(aggregate_id, compute_host)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [add_host_to_aggregate(nova_client, %s, %s)]",
aggregate_name, compute_host)
return False
try:
create_aggregate(nova_client, aggregate_name, av_zone)
add_host_to_aggregate(nova_client, aggregate_name, compute_host)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [create_aggregate_with_host("
"nova_client, %s, %s, %s)]",
aggregate_name, av_zone, compute_host)
return True
-def create_keypair(nova_client, name, key_path=None): # pragma: no cover
+def create_keypair(name, key_path=None): # pragma: no cover
try:
with open(key_path) as fpubkey:
keypair = get_nova_client().keypairs.create(name=name, public_key=fpubkey.read())
return keypair
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [create_keypair(nova_client)]")
def create_instance(json_body): # pragma: no cover
try:
return get_nova_client().servers.create(**json_body)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error create instance failed")
return None
nova_client = get_nova_client()
instance = create_instance(json_body)
count = VM_BOOT_TIMEOUT / SLEEP
- for n in range(count, -1, -1):
+ for _ in range(count, -1, -1):
status = get_instance_status(nova_client, instance)
if status.lower() == "active":
return instance
def attach_server_volume(server_id, volume_id, device=None): # pragma: no cover
try:
get_nova_client().volumes.create_server_volume(server_id, volume_id, device)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [attach_server_volume(nova_client, '%s', '%s')]",
server_id, volume_id)
return False
def delete_instance(nova_client, instance_id): # pragma: no cover
try:
nova_client.servers.force_delete(instance_id)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [delete_instance(nova_client, '%s')]",
instance_id)
return False
try:
aggregate_id = get_aggregate_id(nova_client, aggregate_name)
nova_client.aggregates.remove_host(aggregate_id, compute_host)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error remove_host_from_aggregate(nova_client, %s, %s)",
aggregate_name, compute_host)
return False
try:
remove_hosts_from_aggregate(nova_client, aggregate_name)
nova_client.aggregates.delete(aggregate_name)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [delete_aggregate(nova_client, %s)]",
aggregate_name)
return False
def create_flavor(name, ram, vcpus, disk, **kwargs): # pragma: no cover
try:
return get_nova_client().flavors.create(name, ram, vcpus, disk, **kwargs)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [create_flavor(nova_client, %s, %s, %s, %s, %s)]",
name, ram, disk, vcpus, kwargs['is_public'])
return None
def check_status(status, name, iterations, interval): # pragma: no cover
- for i in range(iterations):
+ for _ in range(iterations):
try:
server = get_server_by_name(name)
except IndexError:
def delete_flavor(flavor_id): # pragma: no cover
try:
get_nova_client().flavors.delete(flavor_id)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [delete_flavor(nova_client, %s)]", flavor_id)
return False
else:
try:
nova_client.keypairs.delete(key=key)
return True
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [delete_keypair(nova_client)]")
return False
# *********************************************
# NEUTRON
# *********************************************
-def get_network_id(neutron_client, network_name): # pragma: no cover
- networks = neutron_client.list_networks()['networks']
- return next((n['id'] for n in networks if n['name'] == network_name), None)
-
-
-def get_port_id_by_ip(neutron_client, ip_address): # pragma: no cover
- ports = neutron_client.list_ports()['ports']
- return next((i['id'] for i in ports for j in i.get(
- 'fixed_ips') if j['ip_address'] == ip_address), None)
+def get_network_id(shade_client, network_name):
+ networks = shade_client.list_networks({'name': network_name})
+ if networks:
+ return networks[0]['id']
def create_neutron_net(neutron_client, json_body): # pragma: no cover
try:
network = neutron_client.create_network(body=json_body)
return network['network']['id']
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.error("Error [create_neutron_net(neutron_client)]")
raise Exception("operation error")
- return None
def delete_neutron_net(neutron_client, network_id): # pragma: no cover
try:
neutron_client.delete_network(network_id)
return True
- except Exception:
- log.error("Error [delete_neutron_net(neutron_client, '%s')]" % network_id)
+ except Exception: # pylint: disable=broad-except
+ log.error("Error [delete_neutron_net(neutron_client, '%s')]",
+ network_id)
return False
try:
subnet = neutron_client.create_subnet(body=json_body)
return subnet['subnets'][0]['id']
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.error("Error [create_neutron_subnet")
raise Exception("operation error")
- return None
def create_neutron_router(neutron_client, json_body): # pragma: no cover
try:
router = neutron_client.create_router(json_body)
return router['router']['id']
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.error("Error [create_neutron_router(neutron_client)]")
raise Exception("operation error")
- return None
def delete_neutron_router(neutron_client, router_id): # pragma: no cover
try:
neutron_client.delete_router(router=router_id)
return True
- except Exception:
- log.error("Error [delete_neutron_router(neutron_client, '%s')]" % router_id)
+ except Exception: # pylint: disable=broad-except
+ log.error("Error [delete_neutron_router(neutron_client, '%s')]",
+ router_id)
return False
try:
neutron_client.remove_gateway_router(router_id)
return True
- except Exception:
- log.error("Error [remove_gateway_router(neutron_client, '%s')]" % router_id)
+ except Exception: # pylint: disable=broad-except
+ log.error("Error [remove_gateway_router(neutron_client, '%s')]",
+ router_id)
return False
neutron_client.remove_interface_router(router=router_id,
body=json_body)
return True
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.error("Error [remove_interface_router(neutron_client, '%s', "
- "'%s')]" % (router_id, subnet_id))
+ "'%s')]", router_id, subnet_id)
return False
ip_json = neutron_client.create_floatingip({'floatingip': props})
fip_addr = ip_json['floatingip']['floating_ip_address']
fip_id = ip_json['floatingip']['id']
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.error("Error [create_floating_ip(neutron_client)]")
return None
return {'fip_addr': fip_addr, 'fip_id': fip_id}
try:
nova_client.floating_ips.delete(floatingip_id)
return True
- except Exception:
- log.error("Error [delete_floating_ip(nova_client, '%s')]" % floatingip_id)
+ except Exception: # pylint: disable=broad-except
+ log.error("Error [delete_floating_ip(nova_client, '%s')]",
+ floatingip_id)
return False
security_groups = neutron_client.list_security_groups()[
'security_groups']
return security_groups
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.error("Error [get_security_groups(neutron_client)]")
return None
try:
secgroup = neutron_client.create_security_group(json_body)
return secgroup['security_group']
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.error("Error [create_security_group(neutron_client, '%s', "
- "'%s')]" % (sg_name, sg_description))
+ "'%s')]", sg_name, sg_description)
return None
else:
log.error("Bad security group format."
"One of the port range is not properly set:"
- "range min: {},"
- "range max: {}".format(port_range_min,
- port_range_max))
+ "range min: %s, range max: %s", port_range_min,
+ port_range_max)
return False
# Create security group using neutron client
try:
neutron_client.create_security_group_rule(json_body)
return True
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Impossible to create_security_group_rule,"
"security group rule probably already exists")
return False
sg_name, sg_description): # pragma: no cover
sg_id = get_security_group_id(neutron_client, sg_name)
if sg_id != '':
- log.info("Using existing security group '%s'..." % sg_name)
+ log.info("Using existing security group '%s'...", sg_name)
else:
- log.info("Creating security group '%s'..." % sg_name)
+ log.info("Creating security group '%s'...", sg_name)
SECGROUP = create_security_group(neutron_client,
sg_name,
sg_description)
sg_id = SECGROUP['id']
- log.debug("Security group '%s' with ID=%s created successfully."
- % (SECGROUP['name'], sg_id))
+ log.debug("Security group '%s' with ID=%s created successfully.",
+ SECGROUP['name'], sg_id)
- log.debug("Adding ICMP rules in security group '%s'..."
- % sg_name)
+ log.debug("Adding ICMP rules in security group '%s'...", sg_name)
if not create_secgroup_rule(neutron_client, sg_id,
'ingress', 'icmp'):
log.error("Failed to create the security group rule...")
return None
- log.debug("Adding SSH rules in security group '%s'..."
- % sg_name)
+ log.debug("Adding SSH rules in security group '%s'...", sg_name)
if not create_secgroup_rule(
neutron_client, sg_id, 'ingress', 'tcp', '22', '22'):
log.error("Failed to create the security group rule...")
container_format, min_disk, min_ram, protected, tag,
public, **kwargs): # pragma: no cover
if not os.path.isfile(file_path):
- log.error("Error: file %s does not exist." % file_path)
+ log.error("Error: file %s does not exist.", file_path)
return None
try:
image_id = get_image_id(glance_client, image_name)
if image_id is not None:
- log.info("Image %s already exists." % image_name)
+ log.info("Image %s already exists.", image_name)
else:
log.info("Creating image '%s' from '%s'...", image_name, file_path)
with open(file_path) as image_data:
glance_client.images.upload(image_id, image_data)
return image_id
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.error("Error [create_glance_image(glance_client, '%s', '%s', '%s')]",
image_name, file_path, public)
return None
try:
glance_client.images.delete(image_id)
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [delete_flavor(glance_client, %s)]", image_id)
return False
else:
volume = cinder_client.volumes.create(name=volume_name,
size=volume_size)
return volume
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [create_volume(cinder_client, %s)]",
(volume_name, volume_size))
return None
if forced:
try:
cinder_client.volumes.detach(volume_id)
- except:
+ except Exception: # pylint: disable=broad-except
log.error(sys.exc_info()[0])
cinder_client.volumes.force_delete(volume_id)
else:
break
cinder_client.volumes.delete(volume_id)
return True
- except Exception:
- log.exception("Error [delete_volume(cinder_client, '%s')]" % volume_id)
+ except Exception: # pylint: disable=broad-except
+ log.exception("Error [delete_volume(cinder_client, '%s')]", volume_id)
return False
try:
get_nova_client().volumes.delete_server_volume(server_id, volume_id)
return True
- except Exception:
+ except Exception: # pylint: disable=broad-except
log.exception("Error [detach_server_volume(nova_client, '%s', '%s')]",
server_id, volume_id)
return False
def _ethernet_range_action_partial(self, direction, _):
def partial(min_value, max_value, count):
+ # pylint: disable=unused-argument
stl_vm_flow_var = STLVmFlowVar(name="mac_{}".format(direction),
min_value=1,
max_value=30,
return partial
def _ip_range_action_partial(self, direction, count=1):
+ # pylint: disable=unused-argument
def partial(min_value, max_value, count):
ip1 = int(ipaddress.IPv4Address(min_value))
ip2 = int(ipaddress.IPv4Address(max_value))
def _ip6_range_action_partial(self, direction, _):
def partial(min_value, max_value, count):
+ # pylint: disable=unused-argument
min_value, max_value = self._get_start_end_ipv6(min_value, max_value)
stl_vm_flow_var = STLVmFlowVar(name="ip6_{}".format(direction),
min_value=min_value,
def _dscp_range_action_partial(self, *_):
def partial(min_value, max_value, count):
+ # pylint: disable=unused-argument
stl_vm_flow_var = STLVmFlowVar(name="dscp",
min_value=min_value,
max_value=max_value,
stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='dscp',
pkt_offset='IP.tos')
self.vm_flow_vars.append(stl_vm_wr_flow_var)
+ return partial
def _udp_range_action_partial(self, field, count=1):
+ # pylint: disable=unused-argument
def partial(min_value, max_value, count):
actual_count = int(max_value) - int(min_value)
if not actual_count:
VPE_PIPELINE_COMMAND = """sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script}"""
VPE_COLLECT_KPI = """\
-Pkts in:\s(\d+)\r\n\
-\tPkts dropped by AH:\s(\d+)\r\n\
-\tPkts dropped by other:\s(\d+)\
+Pkts in:\\s(\\d+)\r\n\
+\tPkts dropped by AH:\\s(\\d+)\r\n\
+\tPkts dropped by other:\\s(\\d+)\
"""
self.downlink_ports = self.vnfd_helper.port_pairs.downlink_ports
self.pipeline_per_port = 9
self.socket = socket
+ self._dpdk_port_to_link_id_map = None
+
+ @property
+ def dpdk_port_to_link_id_map(self):
+ # we need interface name -> DPDK port num (PMD ID) -> LINK ID
+ # LINK ID -> PMD ID is governed by the port mask
+ # LINK instances are created implicitly based on the PORT_MASK application startup
+ # argument. LINK0 is the first port enabled in the PORT_MASK, port 1 is the next one,
+ # etc. The LINK ID is different than the DPDK PMD-level NIC port ID, which is the actual
+ # position in the bitmask mentioned above. For example, if bit 5 is the first bit set
+ # in the bitmask, then LINK0 is having the PMD ID of 5. This mechanism creates a
+ # contiguous LINK ID space and isolates the configuration file against changes in the
+ # board PCIe slots where NICs are plugged in.
+ if self._dpdk_port_to_link_id_map is None:
+ self._dpdk_port_to_link_id_map = {}
+ for link_id, port_name in enumerate(sorted(self.vnfd_helper.port_pairs.all_ports,
+ key=self.vnfd_helper.port_num)):
+ self._dpdk_port_to_link_id_map[port_name] = link_id
+ return self._dpdk_port_to_link_id_map
def vpe_initialize(self, config):
config.add_section('EAL')
def vpe_rxq(self, config):
for port in self.downlink_ports:
- new_section = 'RXQ{0}.0'.format(self.vnfd_helper.port_num(port))
+ new_section = 'RXQ{0}.0'.format(self.dpdk_port_to_link_id_map[port])
config.add_section(new_section)
config.set(new_section, 'mempool', 'MEMPOOL1')
for k, v in parser.items(pipeline):
if k == "pktq_in":
if "RXQ" in v:
- port = self.vnfd_helper.port_num(self.uplink_ports[index])
+ port = self.dpdk_port_to_link_id_map[self.uplink_ports[index]]
value = "RXQ{0}.0".format(port)
else:
value = self.get_sink_swq(parser, pipeline, k, index)
elif k == "pktq_out":
if "TXQ" in v:
- port = self.vnfd_helper.port_num(self.downlink_ports[index])
+ port = self.dpdk_port_to_link_id_map[self.downlink_ports[index]]
value = "TXQ{0}.0".format(port)
else:
self.sw_q += 1
for k, v in parser.items(pipeline):
if k == "pktq_in":
- port = self.vnfd_helper.port_num(self.downlink_ports[index])
+ port = self.dpdk_port_to_link_id_map[self.downlink_ports[index]]
if "RXQ" not in v:
value = self.get_sink_swq(parser, pipeline, k, index)
elif "TM" in v:
parser.set(pipeline, k, value)
if k == "pktq_out":
- port = self.vnfd_helper.port_num(self.uplink_ports[index])
+ port = self.dpdk_port_to_link_id_map[self.uplink_ports[index]]
if "TXQ" not in v:
self.sw_q += 1
value = self.get_sink_swq(parser, pipeline, k, index)
config = self.vpe_initialize(config)
config = self.vpe_rxq(config)
config.write(cfg_file)
- for index in range(0, len(self.uplink_ports)):
+ for index, _ in enumerate(self.uplink_ports):
config = self.vpe_upstream(vnf_cfg, index)
config.write(cfg_file)
config = self.vpe_downstream(vnf_cfg, index)
return rules.get_string()
- def generate_tm_cfg(self, vnf_cfg, index=0):
+ def generate_tm_cfg(self, vnf_cfg):
vnf_cfg = os.path.join(vnf_cfg, "full_tm_profile_10G.cfg")
if os.path.exists(vnf_cfg):
return open(vnf_cfg).read()
def create(self, template, heat_parameters, wait, timeout):
"""Creates an OpenStack stack from a template"""
with tempfile.NamedTemporaryFile('wb', delete=False) as template_file:
- template_file.write(jsonutils.dumps(template))
+ template_file.write(jsonutils.dump_as_bytes(template))
template_file.close()
self._stack = self._cloud.create_stack(
self.name, template_file=template_file.name, wait=wait,
import mock
from xml.etree import ElementTree
-
-from yardstick.benchmark.contexts.standalone.model import Libvirt
from yardstick.benchmark.contexts.standalone import model
from yardstick.network_services import utils
def _cleanup(self):
self._mock_write_xml.stop()
+ # TODO: Remove mocking of yardstick.ssh.SSH (here and elsewhere)
+ # In this case, we are mocking a param to be passed into other methods
+ # It can be a generic Mock() with return values set for the right methods
def test_check_if_vm_exists_and_delete(self):
with mock.patch("yardstick.ssh.SSH") as ssh:
ssh_mock = mock.Mock(autospec=ssh.SSH)
status = model.Libvirt.build_vm_xml(ssh_mock, {}, cfg_file, 'vm_0', 0)
self.assertEqual(status[0], result[0])
+ # TODO: Edit this test to test state instead of output
+ # update_interrupts_hugepages_perf does not return anything
def test_update_interrupts_hugepages_perf(self):
with mock.patch("yardstick.ssh.SSH") as ssh:
ssh_mock = mock.Mock(autospec=ssh.SSH)
# None, this check is trivial.
#status = Libvirt.update_interrupts_hugepages_perf(ssh_mock)
#self.assertIsNone(status)
- Libvirt.update_interrupts_hugepages_perf(ssh_mock)
+ model.Libvirt.update_interrupts_hugepages_perf(ssh_mock)
- @mock.patch("yardstick.benchmark.contexts.standalone.model.CpuSysCores")
+ @mock.patch.object(model, 'CpuSysCores')
@mock.patch.object(model.Libvirt, 'update_interrupts_hugepages_perf')
def test_pin_vcpu_for_perf(self, *args):
# NOTE(ralonsoh): test mocked methods/variables.
ssh_mock.execute = \
mock.Mock(return_value=(0, "a", ""))
ssh.return_value = ssh_mock
- status = Libvirt.pin_vcpu_for_perf(ssh_mock, 4)
+ status = model.Libvirt.pin_vcpu_for_perf(ssh_mock, 4)
self.assertIsNotNone(status)
class StandaloneContextHelperTestCase(unittest.TestCase):
NETWORKS = {
'mgmt': {'cidr': '152.16.100.10/24'},
'private_0': {
- 'phy_port': "0000:05:00.0",
- 'vpci': "0000:00:07.0",
- 'cidr': '152.16.100.10/24',
- 'gateway_ip': '152.16.100.20'},
+ 'phy_port': "0000:05:00.0",
+ 'vpci': "0000:00:07.0",
+ 'cidr': '152.16.100.10/24',
+ 'gateway_ip': '152.16.100.20'},
'public_0': {
- 'phy_port': "0000:05:00.1",
- 'vpci': "0000:00:08.0",
- 'cidr': '152.16.40.10/24',
- 'gateway_ip': '152.16.100.20'}
+ 'phy_port': "0000:05:00.1",
+ 'vpci': "0000:00:08.0",
+ 'cidr': '152.16.40.10/24',
+ 'gateway_ip': '152.16.100.20'}
}
def setUp(self):
with mock.patch("yardstick.ssh.SSH") as ssh:
ssh_mock = mock.Mock(autospec=ssh.SSH)
ssh_mock.execute = \
- mock.Mock(return_value=(1, pattern, ""))
+ mock.Mock(return_value=(1, pattern, ""))
ssh.return_value = ssh_mock
# NOTE(ralonsoh): this test doesn't cover function execution. This test
# should also check mocked function calls.
with mock.patch("yardstick.ssh.SSH") as ssh:
ssh_mock = mock.Mock(autospec=ssh.SSH)
ssh_mock.execute = \
- mock.Mock(return_value=(1, "", ""))
+ mock.Mock(return_value=(1, "", ""))
ssh.return_value = ssh_mock
# NOTE(ralonsoh): this test doesn't cover function execution. This test
# should also check mocked function calls.
NETWORKS = {
'mgmt': {'cidr': '152.16.100.10/24'},
'private_0': {
- 'phy_port': "0000:05:00.0",
- 'vpci': "0000:00:07.0",
- 'driver': 'i40e',
- 'mac': '',
- 'cidr': '152.16.100.10/24',
- 'gateway_ip': '152.16.100.20'},
+ 'phy_port': "0000:05:00.0",
+ 'vpci': "0000:00:07.0",
+ 'driver': 'i40e',
+ 'mac': '',
+ 'cidr': '152.16.100.10/24',
+ 'gateway_ip': '152.16.100.20'},
'public_0': {
- 'phy_port': "0000:05:00.1",
- 'vpci': "0000:00:08.0",
- 'driver': 'i40e',
- 'mac': '',
- 'cidr': '152.16.40.10/24',
- 'gateway_ip': '152.16.100.20'}
+ 'phy_port': "0000:05:00.1",
+ 'vpci': "0000:00:08.0",
+ 'driver': 'i40e',
+ 'mac': '',
+ 'cidr': '152.16.40.10/24',
+ 'gateway_ip': '152.16.100.20'}
}
def setUp(self):
{}, self.NETWORKS, '1.1.1.1/24', 'vm_0', vnf, '00:00:00:00:00:01')
self.assertIsNotNone(status)
+
class OvsDeployTestCase(unittest.TestCase):
NETWORKS = {
'mgmt': {'cidr': '152.16.100.10/24'},
'private_0': {
- 'phy_port': "0000:05:00.0",
- 'vpci': "0000:00:07.0",
- 'driver': 'i40e',
- 'mac': '',
- 'cidr': '152.16.100.10/24',
- 'gateway_ip': '152.16.100.20'},
+ 'phy_port': "0000:05:00.0",
+ 'vpci': "0000:00:07.0",
+ 'driver': 'i40e',
+ 'mac': '',
+ 'cidr': '152.16.100.10/24',
+ 'gateway_ip': '152.16.100.20'},
'public_0': {
- 'phy_port': "0000:05:00.1",
- 'vpci': "0000:00:08.0",
- 'driver': 'i40e',
- 'mac': '',
- 'cidr': '152.16.40.10/24',
- 'gateway_ip': '152.16.100.20'}
+ 'phy_port': "0000:05:00.1",
+ 'vpci': "0000:00:08.0",
+ 'driver': 'i40e',
+ 'mac': '',
+ 'cidr': '152.16.40.10/24',
+ 'gateway_ip': '152.16.100.20'}
}
+
@mock.patch('yardstick.ssh.SSH')
def setUp(self, mock_ssh):
self.ovs_deploy = model.OvsDeploy(mock_ssh, '/tmp/dpdk-devbind.py', {})
# See the License for the specific language governing permissions and
# limitations under the License.
-# Unittest for yardstick.benchmark.contexts.standalone.standaloneovs
-
-from __future__ import absolute_import
import os
-import unittest
+
import mock
+import unittest
from yardstick.benchmark.contexts.standalone import ovs_dpdk
NETWORKS = {
'mgmt': {'cidr': '152.16.100.10/24'},
'private_0': {
- 'phy_port': "0000:05:00.0",
- 'vpci': "0000:00:07.0",
- 'cidr': '152.16.100.10/24',
- 'interface': 'if0',
- 'mac': "00:00:00:00:00:01",
- 'vf_pci': {'vf_pci': 0},
- 'gateway_ip': '152.16.100.20'},
+ 'phy_port': "0000:05:00.0",
+ 'vpci': "0000:00:07.0",
+ 'cidr': '152.16.100.10/24',
+ 'interface': 'if0',
+ 'mac': "00:00:00:00:00:01",
+ 'vf_pci': {'vf_pci': 0},
+ 'gateway_ip': '152.16.100.20'},
'public_0': {
- 'phy_port': "0000:05:00.1",
- 'vpci': "0000:00:08.0",
- 'cidr': '152.16.40.10/24',
- 'interface': 'if0',
- 'vf_pci': {'vf_pci': 0},
- 'mac': "00:00:00:00:00:01",
- 'gateway_ip': '152.16.100.20'},
+ 'phy_port': "0000:05:00.1",
+ 'vpci': "0000:00:08.0",
+ 'cidr': '152.16.40.10/24',
+ 'interface': 'if0',
+ 'vf_pci': {'vf_pci': 0},
+ 'mac': "00:00:00:00:00:01",
+ 'gateway_ip': '152.16.100.20'},
}
def setUp(self):
self.ovs_dpdk = ovs_dpdk.OvsDpdkContext()
- @mock.patch('yardstick.benchmark.contexts.standalone.model.StandaloneContextHelper')
@mock.patch('yardstick.benchmark.contexts.standalone.model.Server')
+ @mock.patch('yardstick.benchmark.contexts.standalone.model.StandaloneContextHelper')
def test___init__(self, mock_helper, mock_server):
self.ovs_dpdk.helper = mock_helper
self.ovs_dpdk.vnf_node = mock_server
self.assertTrue(self.ovs_dpdk.first_run)
def test_init(self):
- self.ovs_dpdk.helper.parse_pod_file = mock.Mock(return_value=[{}, {}, {}])
+ self.ovs_dpdk.helper.parse_pod_file = mock.Mock(
+ return_value=[{}, {}, {}])
self.assertIsNone(self.ovs_dpdk.init(self.ATTRS))
def test_setup_ovs(self):
self.ovs_dpdk.wait_for_vswitchd = 0
self.assertIsNone(self.ovs_dpdk.setup_ovs_bridge_add_flows())
- def test_cleanup_ovs_dpdk_env(self):
- with mock.patch("yardstick.ssh.SSH") as ssh:
- ssh_mock = mock.Mock(autospec=ssh.SSH)
- ssh_mock.execute = \
- mock.Mock(return_value=(0, "a", ""))
- ssh.return_value = ssh_mock
- self.ovs_dpdk.connection = ssh_mock
- self.ovs_dpdk.networks = self.NETWORKS
- self.ovs_dpdk.ovs_properties = {
- 'version': {'ovs': '2.7.0'}
- }
- self.ovs_dpdk.wait_for_vswitchd = 0
- self.assertIsNone(self.ovs_dpdk.cleanup_ovs_dpdk_env())
+ @mock.patch("yardstick.ssh.SSH")
+ def test_cleanup_ovs_dpdk_env(self, mock_ssh):
+ mock_ssh.execute.return_value = 0, "a", ""
+ self.ovs_dpdk.connection = mock_ssh
+ self.ovs_dpdk.networks = self.NETWORKS
+ self.ovs_dpdk.ovs_properties = {
+ 'version': {'ovs': '2.7.0'}
+ }
+ self.ovs_dpdk.wait_for_vswitchd = 0
+ self.assertIsNone(self.ovs_dpdk.cleanup_ovs_dpdk_env())
@mock.patch('yardstick.benchmark.contexts.standalone.model.OvsDeploy')
def test_check_ovs_dpdk_env(self, mock_ovs):
self.ovs_dpdk.connection = ssh_mock
self.ovs_dpdk.networks = self.NETWORKS
self.ovs_dpdk.ovs_properties = {
- 'version': {'ovs': '2.7.0', 'dpdk': '16.11.1'}
+ 'version': {'ovs': '2.7.0', 'dpdk': '16.11.1'}
}
self.ovs_dpdk.wait_for_vswitchd = 0
self.ovs_dpdk.cleanup_ovs_dpdk_env = mock.Mock()
self.ovs_dpdk.wait_for_vswitchd = 0
self.cleanup_ovs_dpdk_env = mock.Mock()
mock_ovs.deploy = mock.Mock()
+ # NOTE(elfoley): Check for a specific Exception
self.assertRaises(Exception, self.ovs_dpdk.check_ovs_dpdk_env)
@mock.patch('yardstick.ssh.SSH')
- def test_deploy(self, ssh_mock):
- ssh_mock.execute.return_value = (0, "a", "")
+ def test_deploy(self, mock_ssh):
+ mock_ssh.execute.return_value = 0, "a", ""
self.ovs_dpdk.vm_deploy = False
self.assertIsNone(self.ovs_dpdk.deploy())
@mock.patch('yardstick.benchmark.contexts.standalone.model.Libvirt')
@mock.patch('yardstick.ssh.SSH')
- def test_undeploy(self, ssh_mock, _):
- ssh_mock.execute.return_value = (0, "a", "")
+ def test_undeploy(self, mock_ssh, *args):
+ mock_ssh.execute.return_value = 0, "a", ""
self.ovs_dpdk.vm_deploy = False
self.assertIsNone(self.ovs_dpdk.undeploy())
self.ovs_dpdk.vm_deploy = True
+ self.ovs_dpdk.connection = mock_ssh
self.ovs_dpdk.vm_names = ['vm_0', 'vm_1']
- self.ovs_dpdk.connection = ssh_mock
self.ovs_dpdk.drivers = ['vm_0', 'vm_1']
self.ovs_dpdk.cleanup_ovs_dpdk_env = mock.Mock()
self.ovs_dpdk.networks = self.NETWORKS
'file': self._get_file_abspath(self.NODES_ovs_dpdk_SAMPLE)
}
- self.ovs_dpdk.helper.parse_pod_file = mock.Mock(return_value=[{}, {}, {}])
+ self.ovs_dpdk.helper.parse_pod_file = mock.Mock(
+ return_value=[{}, {}, {}])
self.ovs_dpdk.init(attrs)
attr_name = 'bar.foo'
self.assertEqual(result['user'], 'root')
self.assertEqual(result['key_filename'], '/root/.yardstick_key')
+ # TODO(elfoley): Split this test for networks that exist and networks that
+ # don't
def test__get_network(self):
network1 = {
'name': 'net_1',
'b': network2,
}
+ # Tests for networks that do not exist
attr_name = {}
self.assertIsNone(self.ovs_dpdk._get_network(attr_name))
self.assertIsNone(self.ovs_dpdk._get_network(None))
+ # TODO(elfoley): Split this test
attr_name = 'vld777'
self.assertIsNone(self.ovs_dpdk._get_network(attr_name))
+ # Tests for networks that exist
attr_name = {'vld_id': 'vld999'}
expected = {
"name": 'net_2',
self.ovs_dpdk.get_vf_datas = mock.Mock(return_value="")
self.assertIsNone(self.ovs_dpdk.configure_nics_for_ovs_dpdk())
- @mock.patch('yardstick.benchmark.contexts.standalone.model.Libvirt.add_ovs_interface')
- def test__enable_interfaces(self, _):
+ @mock.patch('yardstick.benchmark.contexts.standalone.ovs_dpdk.Libvirt')
+ def test__enable_interfaces(self, *args):
with mock.patch("yardstick.ssh.SSH") as ssh:
ssh_mock = mock.Mock(autospec=ssh.SSH)
ssh_mock.execute = \
self.ovs_dpdk.drivers = []
self.ovs_dpdk.networks = self.NETWORKS
self.ovs_dpdk.get_vf_datas = mock.Mock(return_value="")
- self.assertIsNone(self.ovs_dpdk._enable_interfaces(0, ["private_0"], 'test'))
+ self.assertIsNone(self.ovs_dpdk._enable_interfaces(
+ 0, ["private_0"], 'test'))
- @mock.patch('yardstick.benchmark.contexts.standalone.ovs_dpdk.Libvirt')
@mock.patch('yardstick.benchmark.contexts.standalone.model.Server')
- def test_setup_ovs_dpdk_context(self, _, mock_libvirt):
+ @mock.patch('yardstick.benchmark.contexts.standalone.ovs_dpdk.Libvirt')
+ def test_setup_ovs_dpdk_context(self, mock_libvirt, *args):
with mock.patch("yardstick.ssh.SSH") as ssh:
ssh_mock = mock.Mock(autospec=ssh.SSH)
ssh_mock.execute = \
self.ovs_dpdk.host_mgmt = {}
self.ovs_dpdk.flavor = {}
self.ovs_dpdk.configure_nics_for_ovs_dpdk = mock.Mock(return_value="")
- mock_libvirt.check_if_vm_exists_and_delete = mock.Mock(return_value="")
- mock_libvirt.build_vm_xml = mock.Mock(return_value=[6, "00:00:00:00:00:01"])
+ mock_libvirt.build_vm_xml.return_value = [6, "00:00:00:00:00:01"]
self.ovs_dpdk._enable_interfaces = mock.Mock(return_value="")
- mock_libvirt.virsh_create_vm = mock.Mock(return_value="")
- mock_libvirt.pin_vcpu_for_perf = mock.Mock(return_value="")
- self.ovs_dpdk.vnf_node.generate_vnf_instance = mock.Mock(return_value={})
+ mock_libvirt.virsh_create_vm.return_value = ""
+ mock_libvirt.pin_vcpu_for_perf.return_value = ""
+ self.ovs_dpdk.vnf_node.generate_vnf_instance = mock.Mock(
+ return_value={})
+
self.assertIsNotNone(self.ovs_dpdk.setup_ovs_dpdk_context())
# See the License for the specific language governing permissions and
# limitations under the License.
-# Unittest for yardstick.benchmark.contexts.standalone.standalonesriov
-
-from __future__ import absolute_import
import os
-import unittest
+
import mock
+import unittest
from yardstick import ssh
from yardstick.benchmark.contexts.standalone import sriov
NETWORKS = {
'mgmt': {'cidr': '152.16.100.10/24'},
'private_0': {
- 'phy_port': "0000:05:00.0",
- 'vpci': "0000:00:07.0",
- 'cidr': '152.16.100.10/24',
- 'interface': 'if0',
- 'mac': "00:00:00:00:00:01",
- 'vf_pci': {'vf_pci': 0},
- 'gateway_ip': '152.16.100.20'},
+ 'phy_port': "0000:05:00.0",
+ 'vpci': "0000:00:07.0",
+ 'cidr': '152.16.100.10/24',
+ 'interface': 'if0',
+ 'mac': "00:00:00:00:00:01",
+ 'vf_pci': {'vf_pci': 0},
+ 'gateway_ip': '152.16.100.20'},
'public_0': {
- 'phy_port': "0000:05:00.1",
- 'vpci': "0000:00:08.0",
- 'cidr': '152.16.40.10/24',
- 'interface': 'if0',
- 'vf_pci': {'vf_pci': 0},
- 'mac': "00:00:00:00:00:01",
- 'gateway_ip': '152.16.100.20'},
+ 'phy_port': "0000:05:00.1",
+ 'vpci': "0000:00:08.0",
+ 'cidr': '152.16.40.10/24',
+ 'interface': 'if0',
+ 'vf_pci': {'vf_pci': 0},
+ 'mac': "00:00:00:00:00:01",
+ 'gateway_ip': '152.16.100.20'},
}
def setUp(self):
self.sriov = sriov.SriovContext()
- @mock.patch('yardstick.benchmark.contexts.standalone.model.StandaloneContextHelper')
@mock.patch('yardstick.benchmark.contexts.standalone.sriov.Libvirt')
+ @mock.patch('yardstick.benchmark.contexts.standalone.model.StandaloneContextHelper')
@mock.patch('yardstick.benchmark.contexts.standalone.model.Server')
- def test___init__(self, mock_helper, mock_libvirt, mock_server):
- # pylint: disable=unused-argument
+ def test___init__(self, mock_helper, mock_server, *args):
# NOTE(ralonsoh): this test doesn't cover function execution.
- # The pylint exception should be removed.
self.sriov.helper = mock_helper
self.sriov.vnf_node = mock_server
self.assertIsNone(self.sriov.file_path)
self.assertIsNone(self.sriov.init(self.ATTRS))
@mock.patch.object(ssh, 'SSH', return_value=(0, "a", ""))
- def test_deploy(self, mock_ssh):
- # pylint: disable=unused-argument
+ def test_deploy(self, *args):
# NOTE(ralonsoh): this test doesn't cover function execution.
- # The pylint exception should be removed.
self.sriov.vm_deploy = False
self.assertIsNone(self.sriov.deploy())
self.sriov.wait_for_vnfs_to_start = mock.Mock(return_value={})
self.assertIsNone(self.sriov.deploy())
- @mock.patch.object(ssh, 'SSH', return_value=(0, "a", ""))
@mock.patch('yardstick.benchmark.contexts.standalone.sriov.Libvirt')
- def test_undeploy(self, mock_libvirt, mock_ssh):
- # pylint: disable=unused-argument
- # NOTE(ralonsoh): the pylint exception should be removed.
+ @mock.patch.object(ssh, 'SSH', return_value=(0, "a", ""))
+ def test_undeploy(self, mock_ssh, *args):
self.sriov.vm_deploy = False
self.assertIsNone(self.sriov.undeploy())
self.assertEqual(result['user'], 'root')
self.assertEqual(result['key_filename'], '/root/.yardstick_key')
+ # TODO(elfoley): Split this test
+ # There are at least two sets of inputs/outputs
def test__get_network(self):
network1 = {
'name': 'net_1',
self.sriov._get_vf_data = mock.Mock(return_value="")
self.assertIsNone(self.sriov.configure_nics_for_sriov())
- @mock.patch.object(ssh, 'SSH', return_value=(0, "a", ""))
@mock.patch('yardstick.benchmark.contexts.standalone.sriov.Libvirt')
- def test__enable_interfaces(self, mock_libvirt, mock_ssh):
- # pylint: disable=unused-argument
- # NOTE(ralonsoh): the pylint exception should be removed.
+ @mock.patch.object(ssh, 'SSH')
+ def test__enable_interfaces(self, mock_ssh, *args):
+ mock_ssh.return_value = 0, "a", ""
+
self.sriov.vm_deploy = True
self.sriov.connection = mock_ssh
self.sriov.vm_names = ['vm_0', 'vm_1']
self.sriov.drivers = []
self.sriov.networks = self.NETWORKS
- self.sriov._get_vf_data = mock.Mock(return_value="")
- self.assertIsNone(self.sriov._enable_interfaces(0, 0, ["private_0"], 'test'))
+ self.sriov.get_vf_data = mock.Mock(return_value="")
+ self.assertIsNone(self.sriov._enable_interfaces(
+ 0, 0, ["private_0"], 'test'))
@mock.patch('yardstick.benchmark.contexts.standalone.model.Server')
@mock.patch('yardstick.benchmark.contexts.standalone.sriov.Libvirt')
- def test_setup_sriov_context(self, mock_libvirt, mock_server):
- # pylint: disable=unused-argument
- # NOTE(ralonsoh): the pylint exception should be removed.
+ def test_setup_sriov_context(self, mock_libvirt, *args):
with mock.patch("yardstick.ssh.SSH") as ssh:
ssh_mock = mock.Mock(autospec=ssh.SSH)
ssh_mock.execute = \
self.sriov.host_mgmt = {}
self.sriov.flavor = {}
self.sriov.configure_nics_for_sriov = mock.Mock(return_value="")
- mock_libvirt.build_vm_xml = mock.Mock(return_value=[6, "00:00:00:00:00:01"])
+ mock_libvirt.build_vm_xml = mock.Mock(
+ return_value=[6, "00:00:00:00:00:01"])
self.sriov._enable_interfaces = mock.Mock(return_value="")
self.sriov.vnf_node.generate_vnf_instance = mock.Mock(return_value={})
self.assertIsNotNone(self.sriov.setup_sriov_context())
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.contexts.heat
-
-from __future__ import absolute_import
-
+from collections import OrderedDict
+from itertools import count
import logging
import os
-import unittest
import uuid
-from collections import OrderedDict
import mock
+import unittest
-from itertools import count
from yardstick.benchmark.contexts import heat
from yardstick.benchmark.contexts import model
+
LOG = logging.getLogger(__name__)
pgs['pgrp1']['policy'])
mock_sg.assert_called_with('servergroup1', self.test_context,
sgs['servergroup1']['policy'])
- self.assertTrue(len(self.test_context.placement_groups) == 1)
- self.assertTrue(len(self.test_context.server_groups) == 1)
+ self.assertEqual(len(self.test_context.placement_groups), 1)
+ self.assertEqual(len(self.test_context.server_groups), 1)
mock_network.assert_called_with(
'bar', self.test_context, networks['bar'])
- self.assertTrue(len(self.test_context.networks) == 1)
+ self.assertEqual(len(self.test_context.networks), 1)
mock_server.assert_called_with('baz', self.test_context,
servers['baz'])
- self.assertTrue(len(self.test_context.servers) == 1)
+ self.assertEqual(len(self.test_context.servers), 1)
if os.path.exists(self.test_context.key_filename):
try:
"2f2e4997-0a8e-4eb7-9fa4-f3f8fbbc393b")
mock_template.add_security_group.assert_called_with("foo-secgroup")
# mock_template.add_network.assert_called_with("bar-fool-network", 'physnet1', None)
- mock_template.add_router.assert_called_with("bar-fool-network-router",
- netattrs["external_network"],
- "bar-fool-network-subnet")
- mock_template.add_router_interface.assert_called_with("bar-fool-network-router-if0",
- "bar-fool-network-router",
- "bar-fool-network-subnet")
+ mock_template.add_router.assert_called_with(
+ "bar-fool-network-router",
+ netattrs["external_network"],
+ "bar-fool-network-subnet")
+ mock_template.add_router_interface.assert_called_with(
+ "bar-fool-network-router-if0",
+ "bar-fool-network-router",
+ "bar-fool-network-subnet")
@mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
- def test_attrs_get(self, mock_template):
+ def test_attrs_get(self, *args):
image, flavor, user = expected_tuple = 'foo1', 'foo2', 'foo3'
self.assertNotEqual(self.test_context.image, image)
self.assertNotEqual(self.test_context.flavor, flavor)
self.assertEqual(attr_tuple, expected_tuple)
@mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
- def test_attrs_set_negative(self, mock_template):
+ def test_attrs_set_negative(self, *args):
with self.assertRaises(AttributeError):
self.test_context.image = 'foo'
@mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
@mock.patch('yardstick.benchmark.contexts.heat.os')
- def test_undeploy_key_filename(self, mock_template, mock_os):
+ def test_undeploy_key_filename(self, mock_os, mock_template):
self.test_context.stack = mock_template
mock_os.path.exists.return_value = True
self.assertIsNone(self.test_context.undeploy())
@mock.patch("yardstick.benchmark.contexts.heat.pkg_resources")
- def test__get_server_found_dict(self, mock_pkg_resources):
+ def test__get_server_found_dict(self, *args):
"""
Use HeatContext._get_server to get a server that matches
based on a dictionary input.
self.assertEqual(result['private_ip'], '10.0.0.1')
@mock.patch("yardstick.benchmark.contexts.heat.pkg_resources")
- def test__get_server_found_dict_no_attrs(self, mock_pkg_resources):
+ def test__get_server_found_dict_no_attrs(self, *args):
"""
Use HeatContext._get_server to get a server that matches
based on a dictionary input.
self.assertNotIn('ip', result)
@mock.patch("yardstick.benchmark.contexts.heat.pkg_resources")
- def test__get_server_found_not_dict(self, mock_pkg_resources):
+ def test__get_server_found_not_dict(self, *args):
"""
Use HeatContext._get_server to get a server that matches
based on a non-dictionary input
self.assertNotIn('public_ip', result)
@mock.patch("yardstick.benchmark.contexts.heat.pkg_resources")
- def test__get_server_none_found_not_dict(self, mock_pkg_resources):
+ def test__get_server_none_found_not_dict(self, *args):
"""
Use HeatContext._get_server to not get a server due to
None value associated with the match to a non-dictionary
self.assertIsNone(result)
@mock.patch("yardstick.benchmark.contexts.heat.pkg_resources")
- def test__get_server_not_found_dict(self, mock_pkg_resources):
+ def test__get_server_not_found_dict(self, *args):
"""
Use HeatContext._get_server to not get a server for lack
of a match to a dictionary input
self.assertIsNone(result)
@mock.patch("yardstick.benchmark.contexts.heat.pkg_resources")
- def test__get_server_not_found_not_dict(self, mock_pkg_resources):
+ def test__get_server_not_found_not_dict(self, *args):
"""
Use HeatContext._get_server to not get a server for lack
of a match to a non-dictionary input
result = self.test_context._get_server(attr_name)
self.assertIsNone(result)
+ # TODO: Split this into more granular tests
def test__get_network(self):
network1 = mock.MagicMock()
network1.name = 'net_1'
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
#
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.contexts.kubernetes
-
-from __future__ import absolute_import
-import unittest
import mock
+import unittest
from yardstick.benchmark.contexts.base import Context
-from yardstick.benchmark.contexts.kubernetes import KubernetesContext
+from yardstick.benchmark.contexts import kubernetes
context_cfg = {
class KubernetesTestCase(unittest.TestCase):
+ def setUp(self):
+ self.k8s_context = kubernetes.KubernetesContext()
+ self.k8s_context.init(context_cfg)
+
def tearDown(self):
# clear kubernetes contexts from global list so we don't break other tests
Context.list = []
- @mock.patch('{}.KubernetesContext._delete_services'.format(prefix))
- @mock.patch('{}.KubernetesContext._delete_ssh_key'.format(prefix))
- @mock.patch('{}.KubernetesContext._delete_rcs'.format(prefix))
- @mock.patch('{}.KubernetesContext._delete_pods'.format(prefix))
+ @mock.patch.object(kubernetes.KubernetesContext, '_delete_services')
+ @mock.patch.object(kubernetes.KubernetesContext, '_delete_ssh_key')
+ @mock.patch.object(kubernetes.KubernetesContext, '_delete_rcs')
+ @mock.patch.object(kubernetes.KubernetesContext, '_delete_pods')
def test_undeploy(self,
mock_delete_pods,
mock_delete_rcs,
mock_delete_ssh,
mock_delete_services):
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
- k8s_context.undeploy()
+ self.k8s_context.undeploy()
self.assertTrue(mock_delete_ssh.called)
self.assertTrue(mock_delete_rcs.called)
self.assertTrue(mock_delete_pods.called)
self.assertTrue(mock_delete_services.called)
- @mock.patch('{}.KubernetesContext._create_services'.format(prefix))
- @mock.patch('{}.KubernetesContext._wait_until_running'.format(prefix))
- @mock.patch('{}.KubernetesTemplate.get_rc_pods'.format(prefix))
- @mock.patch('{}.KubernetesContext._create_rcs'.format(prefix))
- @mock.patch('{}.KubernetesContext._set_ssh_key'.format(prefix))
+ @mock.patch.object(kubernetes.KubernetesContext, '_create_services')
+ @mock.patch.object(kubernetes.KubernetesContext, '_wait_until_running')
+ @mock.patch.object(kubernetes.KubernetesTemplate, 'get_rc_pods')
+ @mock.patch.object(kubernetes.KubernetesContext, '_create_rcs')
+ @mock.patch.object(kubernetes.KubernetesContext, '_set_ssh_key')
def test_deploy(self,
mock_set_ssh_key,
mock_create_rcs,
mock_wait_until_running,
mock_create_services):
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
with mock.patch("yardstick.benchmark.contexts.kubernetes.time"):
- k8s_context.deploy()
+ self.k8s_context.deploy()
self.assertTrue(mock_set_ssh_key.called)
self.assertTrue(mock_create_rcs.called)
self.assertTrue(mock_create_services.called)
self.assertTrue(mock_get_rc_pods.called)
self.assertTrue(mock_wait_until_running.called)
- @mock.patch('{}.paramiko'.format(prefix), **{"resource_filename.return_value": ""})
- @mock.patch('{}.pkg_resources'.format(prefix), **{"resource_filename.return_value": ""})
- @mock.patch('{}.utils'.format(prefix))
- @mock.patch('{}.open'.format(prefix), create=True)
- @mock.patch('{}.k8s_utils.delete_config_map'.format(prefix))
- @mock.patch('{}.k8s_utils.create_config_map'.format(prefix))
- def test_ssh_key(self, mock_create, mock_delete, mock_open, mock_utils, mock_resources,
- mock_paramiko):
-
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
- k8s_context._set_ssh_key()
- k8s_context._delete_ssh_key()
+ @mock.patch.object(kubernetes, 'paramiko', **{"resource_filename.return_value": ""})
+ @mock.patch.object(kubernetes, 'pkg_resources', **{"resource_filename.return_value": ""})
+ @mock.patch.object(kubernetes, 'utils')
+ @mock.patch.object(kubernetes, 'open', create=True)
+ @mock.patch.object(kubernetes.k8s_utils, 'delete_config_map')
+ @mock.patch.object(kubernetes.k8s_utils, 'create_config_map')
+ def test_ssh_key(self, mock_create, mock_delete, *args):
+ self.k8s_context._set_ssh_key()
+ self.k8s_context._delete_ssh_key()
+
self.assertTrue(mock_create.called)
self.assertTrue(mock_delete.called)
- @mock.patch('{}.k8s_utils.read_pod_status'.format(prefix))
+ @mock.patch.object(kubernetes.k8s_utils, 'read_pod_status')
def test_wait_until_running(self, mock_read_pod_status):
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
- k8s_context.template.pods = ['server']
+ self.k8s_context.template.pods = ['server']
mock_read_pod_status.return_value = 'Running'
- k8s_context._wait_until_running()
+ self.k8s_context._wait_until_running()
- @mock.patch('{}.k8s_utils.get_pod_by_name'.format(prefix))
- @mock.patch('{}.KubernetesContext._get_node_ip'.format(prefix))
- @mock.patch('{}.k8s_utils.get_service_by_name'.format(prefix))
+ @mock.patch.object(kubernetes.k8s_utils, 'get_pod_by_name')
+ @mock.patch.object(kubernetes.KubernetesContext, '_get_node_ip')
+ @mock.patch.object(kubernetes.k8s_utils, 'get_service_by_name')
def test_get_server(self,
mock_get_service_by_name,
mock_get_node_ip,
def __init__(self):
self.status = Status()
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
-
mock_get_service_by_name.return_value = Services()
mock_get_pod_by_name.return_value = Pod()
mock_get_node_ip.return_value = '172.16.10.131'
- server = k8s_context._get_server('server')
- self.assertIsNotNone(server)
+ self.assertIsNotNone(self.k8s_context._get_server('server'))
- @mock.patch('{}.KubernetesContext._create_rc'.format(prefix))
+ @mock.patch.object(kubernetes.KubernetesContext, '_create_rc')
def test_create_rcs(self, mock_create_rc):
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
- k8s_context._create_rcs()
+ self.k8s_context._create_rcs()
self.assertTrue(mock_create_rc.called)
- @mock.patch('{}.k8s_utils.create_replication_controller'.format(prefix))
+ @mock.patch.object(kubernetes.k8s_utils, 'create_replication_controller')
def test_create_rc(self, mock_create_replication_controller):
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
- k8s_context._create_rc({})
+ self.k8s_context._create_rc({})
self.assertTrue(mock_create_replication_controller.called)
- @mock.patch('{}.KubernetesContext._delete_rc'.format(prefix))
+ @mock.patch.object(kubernetes.KubernetesContext, '_delete_rc')
def test_delete_rcs(self, mock_delete_rc):
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
- k8s_context._delete_rcs()
+ self.k8s_context._delete_rcs()
self.assertTrue(mock_delete_rc.called)
- @mock.patch('{}.k8s_utils.delete_replication_controller'.format(prefix))
+ @mock.patch.object(kubernetes.k8s_utils, 'delete_replication_controller')
def test_delete_rc(self, mock_delete_replication_controller):
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
- k8s_context._delete_rc({})
+ self.k8s_context._delete_rc({})
self.assertTrue(mock_delete_replication_controller.called)
- @mock.patch('{}.k8s_utils.get_node_list'.format(prefix))
+ @mock.patch.object(kubernetes.k8s_utils, 'get_node_list')
def test_get_node_ip(self, mock_get_node_list):
-
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
- k8s_context._get_node_ip()
+ self.k8s_context._get_node_ip()
self.assertTrue(mock_get_node_list.called)
@mock.patch('yardstick.orchestrator.kubernetes.ServiceObject.create')
def test_create_services(self, mock_create):
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
- k8s_context._create_services()
+ self.k8s_context._create_services()
self.assertTrue(mock_create.called)
@mock.patch('yardstick.orchestrator.kubernetes.ServiceObject.delete')
def test_delete_services(self, mock_delete):
- k8s_context = KubernetesContext()
- k8s_context.init(context_cfg)
- k8s_context._delete_services()
+ self.k8s_context._delete_services()
self.assertTrue(mock_delete.called)
def test_find_external_network(self):
mock_network = mock.Mock()
- mock_network.router = mock.Mock()
+ mock_network.router = mock.Mock() #pylint ignore assignment-from-none
mock_network.router.external_gateway_info = 'ext_net'
model.Network.list = [mock_network]
from yardstick.benchmark.contexts import node
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
class NodeContextTestCase(unittest.TestCase):
PREFIX = 'yardstick.benchmark.contexts.node'
result = self.test_context.get_context_from_server('my.vnf1')
self.assertIs(result, self.test_context)
+ # TODO: Split this into more granular tests
def test__get_network(self):
network1 = {
'name': 'net_1',
# self.input_file = ('plugin/sample_config.yaml',)
self.input_file = [
os.path.join(os.path.abspath(
- dirname(dirname(dirname(dirname(dirname(__file__)))))),
+ dirname(dirname(dirname(dirname(dirname(dirname(__file__))))))),
'plugin/sample_config.yaml')]
self.assertEqual(1, mock_tasks.call_count)
self.assertEqual(1, mock_keys.call_count)
+ # pylint: disable=deprecated-method
def test_invalid_yaml_name(self):
self.assertRaisesRegexp(ValueError, "yaml*", self.rep._validate,
'F@KE_NAME', FAKE_TASK_ID)
+ # pylint: disable=deprecated-method
def test_invalid_task_id(self):
self.assertRaisesRegexp(ValueError, "task*", self.rep._validate,
FAKE_YAML_NAME, DUMMY_TASK_ID)
mock_query.return_value = []
self.rep.yaml_name = FAKE_YAML_NAME
self.rep.task_id = FAKE_TASK_ID
+ # pylint: disable=deprecated-method
self.assertRaisesRegexp(KeyError, "Task ID", self.rep._get_fieldkeys)
self.assertRaisesRegexp(KeyError, "Task ID", self.rep._get_tasks)
+ # pylint: enable=deprecated-method
from yardstick.common import constants as consts
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
class TaskTestCase(unittest.TestCase):
@mock.patch('yardstick.benchmark.core.task.Context')
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
#
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import print_function
-from __future__ import absolute_import
-
-import unittest
import time
-from mock import mock
+import mock
+import unittest
from yardstick.benchmark.runners import base
-from yardstick.benchmark.runners.iteration import IterationRunner
+from yardstick.benchmark.runners import iteration
class ActionTestCase(unittest.TestCase):
class RunnerTestCase(unittest.TestCase):
+ def setUp(self):
+ self.runner = iteration.IterationRunner({})
+
@mock.patch("yardstick.benchmark.runners.iteration.multiprocessing")
- def test_get_output(self, mock_process):
- runner = IterationRunner({})
- runner.output_queue.put({'case': 'opnfv_yardstick_tc002'})
- runner.output_queue.put({'criteria': 'PASS'})
+ def test_get_output(self, *args):
+ self.runner.output_queue.put({'case': 'opnfv_yardstick_tc002'})
+ self.runner.output_queue.put({'criteria': 'PASS'})
idle_result = {
'case': 'opnfv_yardstick_tc002',
'criteria': 'PASS'
}
- for retries in range(1000):
+ for _ in range(1000):
time.sleep(0.01)
- if not runner.output_queue.empty():
+ if not self.runner.output_queue.empty():
break
- actual_result = runner.get_output()
+ actual_result = self.runner.get_output()
self.assertEqual(idle_result, actual_result)
@mock.patch("yardstick.benchmark.runners.iteration.multiprocessing")
- def test_get_result(self, mock_process):
- runner = IterationRunner({})
- runner.result_queue.put({'case': 'opnfv_yardstick_tc002'})
- runner.result_queue.put({'criteria': 'PASS'})
+ def test_get_result(self, *args):
+ self.runner.result_queue.put({'case': 'opnfv_yardstick_tc002'})
+ self.runner.result_queue.put({'criteria': 'PASS'})
idle_result = [
{'case': 'opnfv_yardstick_tc002'},
{'criteria': 'PASS'}
]
- for retries in range(1000):
+ for _ in range(1000):
time.sleep(0.01)
- if not runner.result_queue.empty():
+ if not self.runner.result_queue.empty():
break
- actual_result = runner.get_result()
+ actual_result = self.runner.get_result()
self.assertEqual(idle_result, actual_result)
def test__run_benchmark(self):
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-#
-from __future__ import absolute_import
+import time
-import unittest
import mock
+import unittest
-from tests.unit import STL_MOCKS
+from yardstick.tests.unit import STL_MOCKS
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
benchmark = cls()
method = getattr(benchmark, 'my_method')
- helper = SearchRunnerHelper(cls, 'my_method', scenario_cfg, {}, aborted)
+ helper = SearchRunnerHelper(
+ cls, 'my_method', scenario_cfg, {}, aborted)
with helper.get_benchmark_instance():
helper()
'runner': {},
}
- helper = SearchRunnerHelper(cls, 'my_method', scenario_cfg, {}, aborted)
+ helper = SearchRunnerHelper(
+ cls, 'my_method', scenario_cfg, {}, aborted)
with self.assertRaises(RuntimeError):
helper()
- @mock.patch('yardstick.benchmark.runners.search.time')
- def test_is_not_done(self, mock_time):
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(time, 'time')
+ def test_is_not_done(self, mock_time, *args):
cls = mock.MagicMock()
aborted = mock.MagicMock()
scenario_cfg = {
'runner': {},
}
- mock_time.time.side_effect = range(1000)
+ mock_time.side_effect = range(1000)
- helper = SearchRunnerHelper(cls, 'my_method', scenario_cfg, {}, aborted)
+ helper = SearchRunnerHelper(
+ cls, 'my_method', scenario_cfg, {}, aborted)
index = -1
for index in helper.is_not_done():
self.assertGreaterEqual(index, 10)
- @mock.patch('yardstick.benchmark.runners.search.time')
- def test_is_not_done_immediate_stop(self, mock_time):
+ @mock.patch.object(time, 'sleep')
+ def test_is_not_done_immediate_stop(self, *args):
cls = mock.MagicMock()
aborted = mock.MagicMock()
scenario_cfg = {
},
}
- helper = SearchRunnerHelper(cls, 'my_method', scenario_cfg, {}, aborted)
+ helper = SearchRunnerHelper(
+ cls, 'my_method', scenario_cfg, {}, aborted)
index = -1
for index in helper.is_not_done():
self.assertEqual(index, -1)
+
class TestSearchRunner(unittest.TestCase):
def test__worker_run_once(self):
attacker_baremetal
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.subprocess')
class ExecuteShellTestCase(unittest.TestCase):
def test__fun_execute_shell_command_successful(self, mock_subprocess):
cmd = "env"
mock_subprocess.check_output.return_value = (0, 'unittest')
- exitcode, output = attacker_baremetal._execute_shell_command(cmd)
+ exitcode, _ = attacker_baremetal._execute_shell_command(cmd)
self.assertEqual(exitcode, 0)
@mock.patch('yardstick.benchmark.scenarios.availability.attacker.attacker_baremetal.LOG')
def test__fun_execute_shell_command_fail_cmd_exception(self, mock_log, mock_subprocess):
cmd = "env"
mock_subprocess.check_output.side_effect = RuntimeError
- exitcode, output = attacker_baremetal._execute_shell_command(cmd)
+ exitcode, _ = attacker_baremetal._execute_shell_command(cmd)
self.assertEqual(exitcode, -1)
mock_log.error.assert_called_once()
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
#
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for
-# yardstick.benchmark.scenarios.availability.monitor.monitor_command
-
-from __future__ import absolute_import
import mock
import unittest
from yardstick.benchmark.scenarios.availability.monitor import basemonitor
-@mock.patch(
- 'yardstick.benchmark.scenarios.availability.monitor.basemonitor'
- '.BaseMonitor')
class MonitorMgrTestCase(unittest.TestCase):
def setUp(self):
for mo in self.monitor_list:
mo._result = {"outage_time": 10}
- def test__MonitorMgr_setup_successful(self, mock_monitor):
+ @mock.patch.object(basemonitor, 'BaseMonitor')
+ def test__MonitorMgr_setup_successful(self, *args):
instance = basemonitor.MonitorMgr({"nova-api": 10})
instance.init_monitors(self.monitor_configs, None)
instance.start_monitors()
instance.wait_monitors()
- ret = instance.verify_SLA()
+ # TODO(elfoley): Check the return value
+ ret = instance.verify_SLA() # pylint: disable=unused-variable
- def test_MonitorMgr_getitem(self, mock_monitor):
+ @mock.patch.object(basemonitor, 'BaseMonitor')
+ def test_MonitorMgr_getitem(self, *args):
monitorMgr = basemonitor.MonitorMgr({"nova-api": 10})
monitorMgr.init_monitors(self.monitor_configs, None)
- def test_store_result(self, mock_monitor):
+ @mock.patch.object(basemonitor, 'BaseMonitor')
+ def test_store_result(self, *args):
expect = {'process_neutron-server_outage_time': 10,
'openstack-router-list_outage_time': 10}
result = {}
ins.run()
ins.verify_SLA()
- @mock.patch(
- 'yardstick.benchmark.scenarios.availability.monitor.basemonitor'
- '.multiprocessing')
+ @mock.patch.object(basemonitor, 'multiprocessing')
def test__basemonitor_func_false(self, mock_multiprocess):
ins = self.MonitorSimple(self.monitor_cfg, None, {"nova-api": 10})
ins.setup()
ins.run()
ins.verify_SLA()
+ # TODO(elfoley): fix this test to not throw an error
def test__basemonitor_getmonitorcls_successfule(self):
cls = None
try:
cls = basemonitor.BaseMonitor.get_monitor_cls(self.monitor_cfg)
- except Exception:
+ except Exception: # pylint: disable=broad-except
pass
self.assertIsNone(cls)
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2016 Huan Li and others
# lihuansse@tongji.edu.cn
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for
-# yardstick.benchmark.scenarios.availability.operation.baseoperation
-
-from __future__ import absolute_import
import mock
import unittest
from yardstick.benchmark.scenarios.availability.operation import baseoperation
-@mock.patch(
- 'yardstick.benchmark.scenarios.availability.operation.baseoperation'
- '.BaseOperation')
class OperationMgrTestCase(unittest.TestCase):
def setUp(self):
self.operation_configs = []
self.operation_configs.append(config)
- def test_all_successful(self, mock_operation):
+ @mock.patch.object(baseoperation, 'BaseOperation')
+ def test_all_successful(self, *args):
mgr_ins = baseoperation.OperationMgr()
mgr_ins.init_operations(self.operation_configs, None)
- operation_ins = mgr_ins["service-status"]
+ _ = mgr_ins["service-status"]
mgr_ins.rollback()
- def test_getitem_fail(self, mock_operation):
+ @mock.patch.object(baseoperation, 'BaseOperation')
+ def test_getitem_fail(self, *args):
mgr_ins = baseoperation.OperationMgr()
mgr_ins.init_operations(self.operation_configs, None)
with self.assertRaises(KeyError):
- operation_ins = mgr_ins["operation-not-exist"]
+ _ = mgr_ins["operation-not-exist"]
class TestOperation(baseoperation.BaseOperation):
'operation_type': 'general-operation',
'key': 'service-status'
}
+ self.base_ins = baseoperation.BaseOperation(self.config, None)
def test_all_successful(self):
- base_ins = baseoperation.BaseOperation(self.config, None)
- base_ins.setup()
- base_ins.run()
- base_ins.rollback()
+ self.base_ins.setup()
+ self.base_ins.run()
+ self.base_ins.rollback()
def test_get_script_fullpath(self):
- base_ins = baseoperation.BaseOperation(self.config, None)
- base_ins.get_script_fullpath("ha_tools/test.bash")
+ self.base_ins.get_script_fullpath("ha_tools/test.bash")
+ # TODO(elfoley): Fix test to check on expected outputs
+ # pylint: disable=unused-variable
def test_get_operation_cls_successful(self):
- base_ins = baseoperation.BaseOperation(self.config, None)
- operation_ins = base_ins.get_operation_cls("test-operation")
+ operation_ins = self.base_ins.get_operation_cls("test-operation")
def test_get_operation_cls_fail(self):
- base_ins = baseoperation.BaseOperation(self.config, None)
with self.assertRaises(RuntimeError):
- operation_ins = base_ins.get_operation_cls("operation-not-exist")
+ self.base_ins.get_operation_cls("operation-not-exist")
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.availability.result_checker
-# .baseresultchecker
-
-from __future__ import absolute_import
import mock
import unittest
baseresultchecker
-@mock.patch('yardstick.benchmark.scenarios.availability.result_checker'
- '.baseresultchecker.BaseResultChecker')
class ResultCheckerMgrTestCase(unittest.TestCase):
def setUp(self):
self.checker_configs = []
self.checker_configs.append(config)
- def test_ResultCheckerMgr_setup_successful(self, mock_basechacer):
- mgr_ins = baseresultchecker.ResultCheckerMgr()
- mgr_ins.init_ResultChecker(self.checker_configs, None)
- mgr_ins.verify()
+ self.mgr_ins = baseresultchecker.ResultCheckerMgr()
+
+ self._mock_basechecker = mock.patch.object(baseresultchecker,
+ 'BaseResultChecker')
+ self.mock_basechecker = self._mock_basechecker.start()
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_basechecker.stop()
+
+ def test_ResultCheckerMgr_setup_successful(self):
+ self.mgr_ins.verify()
- def test_getitem_succeessful(self, mock_basechacer):
- mgr_ins = baseresultchecker.ResultCheckerMgr()
- mgr_ins.init_ResultChecker(self.checker_configs, None)
- checker_ins = mgr_ins["process-checker"]
+ def test_getitem_succeessful(self):
+ self.mgr_ins.init_ResultChecker(self.checker_configs, None)
+ _ = self.mgr_ins["process-checker"]
- def test_getitem_fail(self, mock_basechacer):
- mgr_ins = baseresultchecker.ResultCheckerMgr()
- mgr_ins.init_ResultChecker(self.checker_configs, None)
+ def test_getitem_fail(self):
+ self.mgr_ins.init_ResultChecker(self.checker_configs, None)
with self.assertRaises(KeyError):
- checker_ins = mgr_ins["checker-not-exist"]
+ _ = self.mgr_ins["checker-not-exist"]
class BaseResultCheckerTestCase(unittest.TestCase):
'checker_type': 'general-result-checker',
'key': 'process-checker'
}
+ self.ins = baseresultchecker.BaseResultChecker(self.checker_cfg, None)
def test_baseresultchecker_setup_verify_successful(self):
- ins = baseresultchecker.BaseResultChecker(self.checker_cfg, None)
- ins.setup()
- ins.verify()
+ self.ins.setup()
+ self.ins.verify()
def test_baseresultchecker_verfiy_pass(self):
- ins = baseresultchecker.BaseResultChecker(self.checker_cfg, None)
- ins.setup()
- ins.actualResult = True
- ins.expectedResult = True
- ins.verify()
+ self.ins.setup()
+ self.ins.actualResult = True
+ self.ins.expectedResult = True
+ self.ins.verify()
def test_get_script_fullpath(self):
- ins = baseresultchecker.BaseResultChecker(self.checker_cfg, None)
- path = ins.get_script_fullpath("test.bash")
+ self.ins.get_script_fullpath("test.bash")
def test_get_resultchecker_cls_successful(self):
baseresultchecker.BaseResultChecker.get_resultchecker_cls(
from yardstick.benchmark.scenarios.availability.director import Director
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
@mock.patch('yardstick.benchmark.scenarios.availability.director.basemonitor')
@mock.patch('yardstick.benchmark.scenarios.availability.director.baseattacker')
@mock.patch(
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
#
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for
-# yardstick.benchmark.scenarios.availability.monitor.monitor_command
-
-from __future__ import absolute_import
import mock
import unittest
from yardstick.benchmark.scenarios.availability.monitor import monitor_command
-@mock.patch('subprocess.check_output')
class ExecuteShellTestCase(unittest.TestCase):
- def test__fun_execute_shell_command_successful(self, mock_subprocess_check_output):
+ def setUp(self):
+ self._mock_subprocess = mock.patch.object(monitor_command, 'subprocess')
+ self.mock_subprocess = self._mock_subprocess.start()
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_subprocess.stop()
+
+ def test__fun_execute_shell_command_successful(self):
cmd = "env"
- mock_subprocess_check_output.return_value = (0, 'unittest')
- exitcode, _ = monitor_command._execute_shell_command(cmd)
+ self.mock_subprocess.check_output.return_value = (0, 'unittest')
+ exitcode, _t = monitor_command._execute_shell_command(cmd)
self.assertEqual(exitcode, 0)
- @mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.LOG')
- def test__fun_execute_shell_command_fail_cmd_exception(self, mock_log,
- mock_subprocess_check_output):
+ @mock.patch.object(monitor_command, 'LOG')
+ def test__fun_execute_shell_command_fail_cmd_exception(self, mock_log):
cmd = "env"
- mock_subprocess_check_output.side_effect = RuntimeError
+ self.mock_subprocess.check_output.side_effect = RuntimeError
exitcode, _ = monitor_command._execute_shell_command(cmd)
self.assertEqual(exitcode, -1)
mock_log.error.assert_called_once()
-@mock.patch('subprocess.check_output')
class MonitorOpenstackCmdTestCase(unittest.TestCase):
def setUp(self):
'monitor_time': 1,
'sla': {'max_outage_time': 5}
}
+ self._mock_subprocess = mock.patch.object(monitor_command, 'subprocess')
+ self.mock_subprocess = self._mock_subprocess.start()
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_subprocess.stop()
- def test__monitor_command_monitor_func_successful(self, mock_subprocess_check_output):
+ def test__monitor_command_monitor_func_successful(self):
instance = monitor_command.MonitorOpenstackCmd(self.config, None, {"nova-api": 10})
instance.setup()
- mock_subprocess_check_output.return_value = (0, 'unittest')
+ self.mock_subprocess.check_output.return_value = (0, 'unittest')
ret = instance.monitor_func()
self.assertTrue(ret)
instance._result = {"outage_time": 0}
instance.verify_SLA()
- @mock.patch('yardstick.benchmark.scenarios.availability.monitor.monitor_command.LOG')
- def test__monitor_command_monitor_func_failure(self, mock_log, mock_subprocess_check_output):
- mock_subprocess_check_output.return_value = (1, 'unittest')
+ @mock.patch.object(monitor_command, 'LOG')
+ def test__monitor_command_monitor_func_failure(self, mock_log):
+ self.mock_subprocess.check_output.return_value = (1, 'unittest')
instance = monitor_command.MonitorOpenstackCmd(self.config, None, {"nova-api": 10})
instance.setup()
- mock_subprocess_check_output.side_effect = RuntimeError
+ self.mock_subprocess.check_output.side_effect = RuntimeError
ret = instance.monitor_func()
self.assertFalse(ret)
mock_log.error.assert_called_once()
instance._result = {"outage_time": 10}
instance.verify_SLA()
- @mock.patch(
- 'yardstick.benchmark.scenarios.availability.monitor.monitor_command'
- '.ssh')
- def test__monitor_command_ssh_monitor_successful(self, mock_ssh, mock_subprocess_check_output):
+ @mock.patch.object(monitor_command, 'ssh')
+ def test__monitor_command_ssh_monitor_successful(self, mock_ssh):
- mock_subprocess_check_output.return_value = (0, 'unittest')
+ self.mock_subprocess.check_output.return_value = (0, 'unittest')
self.config["host"] = "node1"
instance = monitor_command.MonitorOpenstackCmd(
self.config, self.context, {"nova-api": 10})
from yardstick.benchmark.scenarios.availability.monitor import monitor_general
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
@mock.patch('yardstick.benchmark.scenarios.availability.monitor.'
'monitor_general.ssh')
@mock.patch('yardstick.benchmark.scenarios.availability.monitor.'
import unittest
from yardstick.benchmark.scenarios.availability.monitor import monitor_multi
+
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
@mock.patch('yardstick.benchmark.scenarios.availability.monitor.'
'monitor_general.ssh')
@mock.patch('yardstick.benchmark.scenarios.availability.monitor.'
}
def test__monitor_multi_all_successful(self, mock_open, mock_ssh):
- ins = monitor_multi.MultiMonitor(self.monitor_cfg, self.context, {"nova-api": 10})
+ ins = monitor_multi.MultiMonitor(
+ self.monitor_cfg, self.context, {"nova-api": 10})
mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
ins.verify_SLA()
def test__monitor_multi_all_fail(self, mock_open, mock_ssh):
- ins = monitor_multi.MultiMonitor(self.monitor_cfg, self.context, {"nova-api": 10})
+ ins = monitor_multi.MultiMonitor(
+ self.monitor_cfg, self.context, {"nova-api": 10})
mock_ssh.SSH.from_node().execute.return_value = (0, "running", '')
ins.start_monitor()
ins.wait_monitor()
ins.verify_SLA()
-
operation_general
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
@mock.patch('yardstick.benchmark.scenarios.availability.operation.'
'operation_general.ssh')
@mock.patch('yardstick.benchmark.scenarios.availability.operation.'
result_checker_general
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
@mock.patch('yardstick.benchmark.scenarios.availability.result_checker.'
'result_checker_general.ssh')
@mock.patch('yardstick.benchmark.scenarios.availability.result_checker.'
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2016 Huan Li and others
# lihuansse@tongji.edu.cn
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.availability.scenario_general
-
-from __future__ import absolute_import
import mock
import unittest
-from yardstick.benchmark.scenarios.availability.scenario_general import \
- ScenarioGeneral
-
+from yardstick.benchmark.scenarios.availability import scenario_general
-@mock.patch(
- 'yardstick.benchmark.scenarios.availability.scenario_general.Director')
class ScenarioGeneralTestCase(unittest.TestCase):
def setUp(self):
'index': 2}]
}
}
+ self.instance = scenario_general.ScenarioGeneral(self.scenario_cfg, None)
+
+ self._mock_director = mock.patch.object(scenario_general, 'Director')
+ self.mock_director = self._mock_director.start()
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_director.stop()
- def test_scenario_general_all_successful(self, mock_director):
- ins = ScenarioGeneral(self.scenario_cfg, None)
- ins.setup()
- ins.run({})
- ins.teardown()
+ def test_scenario_general_all_successful(self):
+ self.instance.setup()
+ self.instance.run({})
+ self.instance.teardown()
- def test_scenario_general_exception(self, mock_director):
- ins = ScenarioGeneral(self.scenario_cfg, None)
+ def test_scenario_general_exception(self):
mock_obj = mock.Mock()
mock_obj.createActionPlayer.side_effect = KeyError('Wrong')
- ins.director = mock_obj
- ins.director.data = {}
- ins.run({})
- ins.teardown()
+ self.instance.director = mock_obj
+ self.instance.director.data = {}
+ self.instance.run({})
+ self.instance.teardown()
- def test_scenario_general_case_fail(self, mock_director):
- ins = ScenarioGeneral(self.scenario_cfg, None)
+ def test_scenario_general_case_fail(self):
mock_obj = mock.Mock()
mock_obj.verify.return_value = False
- ins.director = mock_obj
- ins.director.data = {}
- ins.run({})
- ins.pass_flag = True
- ins.teardown()
+ self.instance.director = mock_obj
+ self.instance.director.data = {}
+ self.instance.run({})
+ self.instance.pass_flag = True
+ self.instance.teardown()
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
#
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.availability.serviceha
-
-from __future__ import absolute_import
import mock
import unittest
sla = {"outage_time": 5}
self.args = {"options": options, "sla": sla}
- @mock.patch('yardstick.benchmark.scenarios.availability.serviceha.basemonitor')
- @mock.patch(
- 'yardstick.benchmark.scenarios.availability.serviceha.baseattacker')
- def test__serviceha_setup_run_successful(self, _,
- mock_monitor):
+ # NOTE(elfoley): This should be split into test_setup and test_run
+ # NOTE(elfoley): This should explicitly test outcomes and states
+ @mock.patch.object(serviceha, 'baseattacker')
+ @mock.patch.object(serviceha, 'basemonitor')
+ def test__serviceha_setup_run_successful(self, mock_monitor, *args):
p = serviceha.ServiceHA(self.args, self.ctx)
p.setup()
p.setup()
self.assertTrue(p.setup_done)
-# def test__serviceha_run_sla_error(self, mock_attacker, mock_monitor):
-# p = serviceha.ServiceHA(self.args, self.ctx)
+ # def test__serviceha_run_sla_error(self, mock_attacker, mock_monitor):
+ # p = serviceha.ServiceHA(self.args, self.ctx)
-# p.setup()
-# self.assertTrue(p.setup_done)
-#
-# result = {}
-# result["outage_time"] = 10
-# mock_monitor.Monitor().get_result.return_value = result
+ # p.setup()
+ # self.assertEqual(p.setup_done, True)
+
+ # result = {}
+ # result["outage_time"] = 10
+ # mock_monitor.Monitor().get_result.return_value = result
-# ret = {}
-# self.assertRaises(AssertionError, p.run, ret)
+ # ret = {}
+ # self.assertRaises(AssertionError, p.run, ret)
from yardstick.benchmark.scenarios.compute import lmbench
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
@mock.patch('yardstick.benchmark.scenarios.compute.lmbench.ssh')
class LmbenchTestCase(unittest.TestCase):
def main():
unittest.main()
+
if __name__ == '__main__':
main()
import mock
-from yardstick.common import utils
from yardstick.benchmark.scenarios.compute import spec_cpu
options = {
"SPECint_benchmark": "perlbench",
- "runspec_tune": "all",
"output_format": "all",
"runspec_iterations": "1",
"runspec_tune": "base",
args = {"options": options}
s = spec_cpu.SpecCPU(args, self.ctx)
- sample_output = ''
mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
s.run(self.result)
expected_result = {}
mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
self.assertRaises(RuntimeError, s.run, self.result)
+
def main():
unittest.main()
+
if __name__ == '__main__':
main()
@mock.patch('yardstick.common.openstack_utils.attach_server_volume')
def test_attach_volume(self, mock_attach_server_volume):
options = {
- 'volume_id': '123-456-000',
- 'server_id': '000-123-456'
+ 'volume_id': '123-456-000',
+ 'server_id': '000-123-456'
}
args = {"options": options}
obj = AttachVolume(args, {})
obj.run({})
self.assertTrue(mock_attach_server_volume.called)
+
def main():
unittest.main()
'target': {
'ipaddr': '172.16.0.138'
}
- }
+ }
@mock.patch('yardstick.benchmark.scenarios.lib.check_connectivity.ssh')
def test_check_connectivity(self, mock_ssh):
'ssh_port': '22',
'ssh_timeout': 600,
'ping_parameter': "-s 2048"
- },
+ },
'sla': {'status': 'True',
'action': 'assert'}
}
- result = {}
+ # TODO(elfoley): Properly check the outputs
+ result = {} # pylint: disable=unused-variable
obj = check_connectivity.CheckConnectivity(args, {})
obj.setup()
mock_ssh.SSH.execute.return_value = (0, '100', '')
-
@mock.patch('yardstick.benchmark.scenarios.lib.check_connectivity.ssh')
def test_check_connectivity_key(self, mock_ssh):
'ssh_port': '22',
'ssh_timeout': 600,
'ping_parameter': "-s 2048"
- },
+ },
'sla': {'status': 'True',
'action': 'assert'}
}
- result = {}
+ # TODO(elfoley): Properly check the outputs
+ result = {} # pylint: disable=unused-variable
obj = check_connectivity.CheckConnectivity(args, self.ctx)
obj.setup()
mock_ssh.SSH.execute.return_value = (0, '100', '')
+
def main():
unittest.main()
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import unittest
import mock
+import unittest
from yardstick.benchmark.scenarios.lib.check_numa_info import CheckNumaInfo
class CheckNumaInfoTestCase(unittest.TestCase):
- @mock.patch(
- 'yardstick.benchmark.scenarios.lib.check_numa_info.CheckNumaInfo._check_vm2_status')
- def test_check_numa_info(self, mock_check_vm2):
+ @mock.patch.object(CheckNumaInfo, '_check_vm2_status')
+ def test_run(self, mock_check_vm2):
scenario_cfg = {'info1': {}, 'info2': {}}
obj = CheckNumaInfo(scenario_cfg, {})
obj.run({})
from yardstick.benchmark.scenarios.lib.check_value import CheckValue
-
class CheckValueTestCase(unittest.TestCase):
+ def setUp(self):
+ self.result = {}
+
def test_check_value_eq(self):
scenario_cfg = {'options': {'operator': 'eq', 'value1': 1, 'value2': 2}}
obj = CheckValue(scenario_cfg, {})
- try:
- obj.run({})
- except Exception as e:
- self.assertIsInstance(e, AssertionError)
+ self.assertRaises(AssertionError, obj.run, self.result)
+ self.assertEqual({}, self.result)
def test_check_value_eq_pass(self):
scenario_cfg = {'options': {'operator': 'eq', 'value1': 1, 'value2': 1}}
obj = CheckValue(scenario_cfg, {})
- try:
- obj.run({})
- except Exception as e:
- self.assertIsInstance(e, AssertionError)
+
+ obj.run(self.result)
+ self.assertEqual({}, self.result)
def test_check_value_ne(self):
scenario_cfg = {'options': {'operator': 'ne', 'value1': 1, 'value2': 1}}
obj = CheckValue(scenario_cfg, {})
- try:
- obj.run({})
- except Exception as e:
- self.assertIsInstance(e, AssertionError)
-
+ self.assertRaises(AssertionError, obj.run, self.result)
+ self.assertEqual({}, self.result)
def main():
unittest.main()
--- /dev/null
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import unittest
+import mock
+
+from yardstick.benchmark.scenarios.lib import create_floating_ip
+import yardstick.common.openstack_utils as op_utils
+
+
+class CreateFloatingIpTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self._mock_get_network_id = mock.patch.object(
+ op_utils, 'get_network_id')
+ self.mock_get_network_id = self._mock_get_network_id.start()
+ self._mock_create_floating_ip = mock.patch.object(
+ op_utils, 'create_floating_ip')
+ self.mock_create_floating_ip = self._mock_create_floating_ip.start()
+ self._mock_get_neutron_client = mock.patch.object(
+ op_utils, 'get_neutron_client')
+ self.mock_get_neutron_client = self._mock_get_neutron_client.start()
+ self._mock_get_shade_client = mock.patch.object(
+ op_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(create_floating_ip, 'LOG')
+ self.mock_log = self._mock_log.start()
+
+ self._fip_obj = create_floating_ip.CreateFloatingIp(mock.ANY, mock.ANY)
+ self._fip_obj.scenario_cfg = {'output': 'key1\nkey2'}
+
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_get_network_id.stop()
+ self._mock_create_floating_ip.stop()
+ self._mock_get_neutron_client.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ self.mock_create_floating_ip.return_value = {'fip_id': 'value1',
+ 'fip_addr': 'value2'}
+ output = self._fip_obj.run(mock.ANY)
+ self.assertDictEqual({'key1': 'value1', 'key2': 'value2'}, output)
+
+ def test_run_no_fip(self):
+ self.mock_create_floating_ip.return_value = None
+ output = self._fip_obj.run(mock.ANY)
+ self.assertIsNone(output)
+ self.mock_log.error.assert_called_once_with(
+ 'Creating floating ip failed!')
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.create_image import CreateImage
-
+from yardstick.benchmark.scenarios.lib import create_image
+from yardstick.common import openstack_utils
+# NOTE(elfoley): There should be more tests here.
class CreateImageTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.create_image')
- @mock.patch('yardstick.common.openstack_utils.get_glance_client')
+ @mock.patch.object(openstack_utils, 'create_image')
+ @mock.patch.object(openstack_utils, 'get_glance_client')
def test_create_image(self, mock_get_glance_client, mock_create_image):
options = {
- 'image_name': 'yardstick_test_image_01',
- 'disk_format': 'qcow2',
- 'container_format': 'bare',
- 'min_disk': '1',
- 'min_ram': '512',
- 'protected': 'False',
- 'tags': '["yardstick automatic test image"]',
- 'file_path': '/home/opnfv/images/cirros-0.3.5-x86_64-disk.img'
+ 'image_name': 'yardstick_test_image_01',
+ 'disk_format': 'qcow2',
+ 'container_format': 'bare',
+ 'min_disk': '1',
+ 'min_ram': '512',
+ 'protected': 'False',
+ 'tags': '["yardstick automatic test image"]',
+ 'file_path': '/home/opnfv/images/cirros-0.3.5-x86_64-disk.img'
}
args = {"options": options}
- obj = CreateImage(args, {})
+ obj = create_image.CreateImage(args, {})
obj.run({})
- self.assertTrue(mock_create_image.called)
+ mock_create_image.assert_called_once()
+ mock_get_glance_client.assert_called_once()
def main():
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import unittest
-import mock
-from yardstick.benchmark.scenarios.lib.create_keypair import CreateKeypair
+import mock
+import unittest
-PREFIX = "yardstick.benchmark.scenarios.lib.create_keypair"
+from yardstick.benchmark.scenarios.lib import create_keypair
class CreateKeypairTestCase(unittest.TestCase):
- @mock.patch('{}.paramiko'.format(PREFIX))
- @mock.patch('{}.op_utils'.format(PREFIX))
- def test_create_keypair(self, mock_op_utils, mock_paramiko):
+ @mock.patch.object(create_keypair, 'paramiko')
+ @mock.patch.object(create_keypair, 'op_utils')
+ def test_create_keypair(self, mock_op_utils, *args):
options = {
'key_name': 'yardstick_key',
'key_path': '/tmp/yardstick_key'
}
args = {"options": options}
- obj = CreateKeypair(args, {})
+ obj = create_keypair.CreateKeypair(args, {})
obj.run({})
- self.assertTrue(mock_op_utils.create_keypair.called)
+ mock_op_utils.create_keypair.assert_called_once()
def main():
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.create_network import CreateNetwork
@mock.patch('yardstick.common.openstack_utils.create_neutron_net')
def test_create_network(self, mock_get_neutron_client, mock_create_neutron_net):
options = {
- 'openstack_paras': {
- 'name': 'yardstick_net',
- 'admin_state_up': 'True'
- }
+ 'openstack_paras': {
+ 'name': 'yardstick_net',
+ 'admin_state_up': 'True'
+ }
}
args = {"options": options}
obj = CreateNetwork(args, {})
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.create_port import CreatePort
@mock.patch('yardstick.common.openstack_utils.get_neutron_client')
def test_create_port(self, mock_get_neutron_client):
options = {
- 'openstack_paras': {
- 'name': 'yardstick_port'
- }
+ 'openstack_paras': {
+ 'name': 'yardstick_port'
+ }
}
args = {"options": options}
obj = CreatePort(args, {})
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.create_router import CreateRouter
@mock.patch('yardstick.common.openstack_utils.create_neutron_router')
def test_create_router(self, mock_get_neutron_client, mock_create_neutron_router):
options = {
- 'openstack_paras': {
- 'admin_state_up': 'True',
- 'name': 'yardstick_router'
- }
+ 'openstack_paras': {
+ 'admin_state_up': 'True',
+ 'name': 'yardstick_router'
+ }
}
args = {"options": options}
obj = CreateRouter(args, {})
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.create_sec_group import CreateSecgroup
@mock.patch('yardstick.common.openstack_utils.create_security_group_full')
def test_create_sec_group(self, mock_get_neutron_client, mock_create_security_group_full):
options = {
- 'openstack_paras': {
- 'sg_name': 'yardstick_sec_group',
- 'description': 'security group for yardstick manual VM'
- }
+ 'openstack_paras': {
+ 'sg_name': 'yardstick_sec_group',
+ 'description': 'security group for yardstick manual VM'
+ }
}
args = {"options": options}
obj = CreateSecgroup(args, {})
def test_create_server(self, mock_get_nova_client, mock_get_neutron_client,
mock_get_glance_client, mock_create_instance_and_wait_for_active):
scenario_cfg = {
- 'options' : {
- 'openstack_paras': 'example'
- },
- 'output': 'server'
+ 'options': {
+ 'openstack_paras': 'example'
+ },
+ 'output': 'server'
}
obj = CreateServer(scenario_cfg, {})
obj.run({})
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.create_subnet import CreateSubnet
@mock.patch('yardstick.common.openstack_utils.create_neutron_subnet')
def test_create_subnet(self, mock_get_neutron_client, mock_create_neutron_subnet):
options = {
- 'openstack_paras': {
- 'network_id': '123-123-123',
- 'name': 'yardstick_subnet',
- 'cidr': '10.10.10.0/24',
- 'ip_version': '4'
- }
+ 'openstack_paras': {
+ 'network_id': '123-123-123',
+ 'name': 'yardstick_subnet',
+ 'cidr': '10.10.10.0/24',
+ 'ip_version': '4'
+ }
}
args = {"options": options}
obj = CreateSubnet(args, {})
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import unittest
import mock
+import unittest
-import yardstick.benchmark.scenarios.lib.create_volume
+from yardstick.benchmark.scenarios.lib import create_volume
class CreateVolumeTestCase(unittest.TestCase):
}
}
- self.scenario = (
- yardstick.benchmark.scenarios.lib.create_volume.CreateVolume(
- scenario_cfg=self.scenario_cfg,
- context_cfg={}))
+ self.scenario = create_volume.CreateVolume(
+ scenario_cfg=self.scenario_cfg,
+ context_cfg={})
def _stop_mock(self):
self._mock_cinder_client.stop()
expected_im_name = self.scenario_cfg["options"]["image"]
expected_im_id = None
- scenario = (
- yardstick.benchmark.scenarios.lib.create_volume.CreateVolume(
- scenario_cfg=self.scenario_cfg,
- context_cfg={}))
+ scenario = create_volume.CreateVolume(
+ scenario_cfg=self.scenario_cfg,
+ context_cfg={})
self.assertEqual(expected_vol_name, scenario.volume_name)
self.assertEqual(expected_vol_size, scenario.volume_size)
mock_image_id.assert_called_once()
mock_create_volume.assert_called_once()
- @mock.patch.object(
- yardstick.benchmark.scenarios.lib.create_volume.CreateVolume, 'setup')
+ @mock.patch.object(create_volume.CreateVolume, 'setup')
def test_run_no_setup(self, scenario_setup):
self.scenario.setup_done = False
self.scenario.run()
scenario_setup.assert_called_once()
+ @mock.patch('yardstick.common.openstack_utils.create_volume')
+ @mock.patch('yardstick.common.openstack_utils.get_image_id')
+ @mock.patch('yardstick.common.openstack_utils.get_cinder_client')
+ @mock.patch('yardstick.common.openstack_utils.get_glance_client')
+ def test_create_volume(self, mock_get_glance_client,
+ mock_get_cinder_client, mock_image_id,
+ mock_create_volume):
+ options = {
+ 'volume_name': 'yardstick_test_volume_01',
+ 'size': '256',
+ 'image': 'cirros-0.3.5'
+ }
+ args = {"options": options}
+ scenario = create_volume.CreateVolume(args, {})
+ scenario.run()
+ self.assertTrue(mock_create_volume.called)
+ self.assertTrue(mock_image_id.called)
+ self.assertTrue(mock_get_glance_client.called)
+ self.assertTrue(mock_get_cinder_client.called)
+
def main():
unittest.main()
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.delete_floating_ip import DeleteFloatingIp
@mock.patch('yardstick.common.openstack_utils.get_glance_client')
def test_delete_image(self, mock_get_glance_client, mock_image_id, mock_delete_image):
options = {
- 'image_name': 'yardstick_test_image_01'
+ 'image_name': 'yardstick_test_image_01'
}
args = {"options": options}
obj = DeleteImage(args, {})
self.assertTrue(mock_image_id.called)
self.assertTrue(mock_get_glance_client.called)
+
def main():
unittest.main()
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.delete_keypair import DeleteKeypair
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.delete_network import DeleteNetwork
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.delete_port import DeletePort
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.delete_router import DeleteRouter
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.delete_router_gateway import DeleteRouterGateway
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.delete_router_interface import DeleteRouterInterface
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.delete_volume import DeleteVolume
##############################################################################
import unittest
import mock
-import paramiko
from yardstick.benchmark.scenarios.lib.detach_volume import DetachVolume
from yardstick.benchmark.scenarios.lib.get_numa_info import GetNumaInfo
+
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
BASE = 'yardstick.benchmark.scenarios.lib.get_numa_info'
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
#
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.networking.nstat.Nstat
-
-from __future__ import absolute_import
-
-import unittest
-
import mock
+import unittest
from yardstick.benchmark.scenarios.networking import nstat
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2015 Ericsson AB and others.
#
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.networking.pktgen.Pktgen
-
-from __future__ import absolute_import
-
+import mock
import unittest
-import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.networking import pktgen
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
- mock_iptables_result = mock.Mock()
- mock_iptables_result.return_value = 149300
- p._iptables_get_result = mock_iptables_result
+ p._iptables_get_result = mock.Mock(return_value=149300)
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149776, "packetsize": 60, "flows": 110, "ppm": 3179}'
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
- mock_iptables_result = mock.Mock()
- mock_iptables_result.return_value = 149300
- p._iptables_get_result = mock_iptables_result
+ p._iptables_get_result = mock.Mock(return_value=149300)
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149776, "packetsize": 60, "flows": 110, "ppm": 3179}'
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
- mock_iptables_result = mock.Mock()
- mock_iptables_result.return_value = 149300
- p._iptables_get_result = mock_iptables_result
+ p._iptables_get_result = mock.Mock(return_value=149300)
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149776, "packetsize": 60, "flows": 110}'
mock_ssh.SSH.from_node().execute.return_value = (0, '4', '')
- mock_result1 = mock.Mock()
- mock_result1.return_value = 1
- p._get_usable_queue_number = mock_result1
-
- mock_result2 = mock.Mock()
- mock_result2.return_value = 4
- p._get_available_queue_number = mock_result2
+ p._get_usable_queue_number = mock.Mock(return_value=1)
+ p._get_available_queue_number = mock.Mock(return_value=4)
p.queue_number = p._enable_ovs_multiqueue()
self.assertEqual(p.queue_number, 4)
mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
- mock_result1 = mock.Mock()
- mock_result1.return_value = 1
- p._get_usable_queue_number = mock_result1
-
- mock_result2 = mock.Mock()
- mock_result2.return_value = 1
- p._get_available_queue_number = mock_result2
+ p._get_usable_queue_number = mock.Mock(return_value=1)
+ p._get_available_queue_number = mock.Mock(return_value=1)
p.queue_number = p._enable_ovs_multiqueue()
self.assertEqual(p.queue_number, 1)
mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
- mock_result1 = mock.Mock()
- mock_result1.return_value = 1
- p._get_usable_queue_number = mock_result1
-
- mock_result2 = mock.Mock()
- mock_result2.return_value = 4
- p._get_available_queue_number = mock_result2
+ p._get_usable_queue_number = mock.Mock(return_value=1)
+ p._get_available_queue_number = mock.Mock(return_value=4)
self.assertRaises(RuntimeError, p._enable_ovs_multiqueue)
mock_ssh.SSH.from_node().execute.return_value = (0, '4', '')
- mock_result1 = mock.Mock()
- mock_result1.return_value = False
- p._is_irqbalance_disabled = mock_result1
-
- mock_result2 = mock.Mock()
- mock_result2.return_value = "virtio_net"
- p._get_vnic_driver_name = mock_result2
-
- mock_result3 = mock.Mock()
- mock_result3.return_value = 1
- p._get_usable_queue_number = mock_result3
-
- mock_result4 = mock.Mock()
- mock_result4.return_value = 4
- p._get_available_queue_number = mock_result4
+ p._is_irqbalance_disabled = mock.Mock(return_value=False)
+ p._get_vnic_driver_name = mock.Mock(return_value="virtio_net")
+ p._get_usable_queue_number = mock.Mock(return_value=1)
+ p._get_available_queue_number = mock.Mock(return_value=4)
p.multiqueue_setup()
mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
- mock_result1 = mock.Mock()
- mock_result1.return_value = False
- p._is_irqbalance_disabled = mock_result1
-
- mock_result2 = mock.Mock()
- mock_result2.return_value = "virtio_net"
- p._get_vnic_driver_name = mock_result2
-
- mock_result3 = mock.Mock()
- mock_result3.return_value = 1
- p._get_usable_queue_number = mock_result3
-
- mock_result4 = mock.Mock()
- mock_result4.return_value = 1
- p._get_available_queue_number = mock_result4
+ p._is_irqbalance_disabled = mock.Mock(return_value=False)
+ p._get_vnic_driver_name = mock.Mock(return_value="virtio_net")
+ p._get_usable_queue_number = mock.Mock(return_value=1)
+ p._get_available_queue_number = mock.Mock(return_value=1)
p.multiqueue_setup()
mock_ssh.SSH.from_node().execute.return_value = (0, '2', '')
- mock_result1 = mock.Mock()
- mock_result1.return_value = False
- p._is_irqbalance_disabled = mock_result1
-
- mock_result2 = mock.Mock()
- mock_result2.return_value = "ixgbevf"
- p._get_vnic_driver_name = mock_result2
+ p._is_irqbalance_disabled = mock.Mock(return_value=False)
+ p._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
p.multiqueue_setup()
mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
- mock_result1 = mock.Mock()
- mock_result1.return_value = False
- p._is_irqbalance_disabled = mock_result1
-
- mock_result2 = mock.Mock()
- mock_result2.return_value = "ixgbevf"
- p._get_vnic_driver_name = mock_result2
+ p._is_irqbalance_disabled = mock.Mock(return_value=False)
+ p._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
p.multiqueue_setup()
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
- mock_result = mock.Mock()
- mock_result.return_value = "virtio_net"
- p._get_vnic_driver_name = mock_result
-
- mock_result1 = mock.Mock()
- mock_result1.return_value = 1
- p._get_usable_queue_number = mock_result1
-
- mock_result2 = mock.Mock()
- mock_result2.return_value = 4
- p._get_available_queue_number = mock_result2
-
- mock_result3 = mock.Mock()
- mock_result3.return_value = 4
- p._enable_ovs_multiqueue = mock_result3
-
- mock_result4 = mock.Mock()
- p._setup_irqmapping_ovs = mock_result4
-
- mock_iptables_result = mock.Mock()
- mock_iptables_result.return_value = 149300
- p._iptables_get_result = mock_iptables_result
+ p._get_vnic_driver_name = mock.Mock(return_value="virtio_net")
+ p._get_usable_queue_number = mock.Mock(return_value=1)
+ p._get_available_queue_number = mock.Mock(return_value=4)
+ p._enable_ovs_multiqueue = mock.Mock(return_value=4)
+ p._setup_irqmapping_ovs = mock.Mock()
+ p._iptables_get_result = mock.Mock(return_value=149300)
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149300, "flows": 110, "ppm": 0}'
p.server = mock_ssh.SSH.from_node()
p.client = mock_ssh.SSH.from_node()
- mock_result1 = mock.Mock()
- mock_result1.return_value = "ixgbevf"
- p._get_vnic_driver_name = mock_result1
-
- mock_result2 = mock.Mock()
- mock_result2.return_value = 2
- p._get_sriov_queue_number = mock_result2
-
- mock_result3 = mock.Mock()
- p._setup_irqmapping_sriov = mock_result3
-
- mock_iptables_result = mock.Mock()
- mock_iptables_result.return_value = 149300
- p._iptables_get_result = mock_iptables_result
+ p._get_vnic_driver_name = mock.Mock(return_value="ixgbevf")
+ p._get_sriov_queue_number = mock.Mock(return_value=2)
+ p._setup_irqmapping_sriov = mock.Mock()
+ p._iptables_get_result = mock.Mock(return_value=149300)
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149300, "flows": 110, "ppm": 0}'
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2015 ZTE and others.
#
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.benchmark.scenarios.networking.pktgen.Pktgen
-
-from __future__ import absolute_import
-import unittest
-
import mock
+import unittest
import yardstick.common.utils as utils
from yardstick.benchmark.scenarios.networking import pktgen_dpdk
+#!/usr/bin/env python
##############################################################################
# Copyright (c) 2017 Nokia and others.
#
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-#!/usr/bin/env python
# Unittest for yardstick.benchmark.scenarios.networking.pktgen.PktgenDPDK
from yardstick.benchmark.scenarios.networking import pktgen_dpdk_throughput
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
@mock.patch('yardstick.benchmark.scenarios.networking.pktgen_dpdk_throughput.ssh')
class PktgenDPDKTestCase(unittest.TestCase):
from copy import deepcopy
-from tests.unit import STL_MOCKS
+from yardstick.tests.unit import STL_MOCKS
from yardstick.benchmark.scenarios.networking.vnf_generic import \
SshManager, NetworkServiceTestCase, IncorrectConfig, \
open_relative_file
GenericTrafficGen, GenericVNF
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
COMPLETE_TREX_VNFD = {
'vnfd:vnfd-catalog': {
'vnfd': [
ssh_mock.execute = \
mock.Mock(return_value=(0, SYS_CLASS_NET + IP_ADDR_SHOW, ""))
ssh.from_node.return_value = ssh_mock
- for node, node_dict in self.context_cfg["nodes"].items():
+ for _, node_dict in self.context_cfg["nodes"].items():
with SshManager(node_dict) as conn:
self.assertIsNotNone(conn)
self.scenario_cfg["traffic_options"]["flow"] = \
self._get_file_abspath("ipv4_1flow_Packets_vpe.yaml")
result = '152.16.100.2-152.16.100.254'
- self.assertEqual(result, self.s._get_ip_flow_range('152.16.100.2-152.16.100.254'))
+ self.assertEqual(result, self.s._get_ip_flow_range(
+ '152.16.100.2-152.16.100.254'))
def test__get_ip_flow_range(self):
self.scenario_cfg["traffic_options"]["flow"] = \
self.scenario_cfg["options"] = {}
self.scenario_cfg['options'] = {
'flow': {
- 'src_ip': [
- {
- 'tg__1': 'xe0',
- },
- ],
- 'dst_ip': [
- {
- 'tg__1': 'xe1',
- },
- ],
- 'public_ip': ['1.1.1.1'],
+ 'src_ip': [
+ {
+ 'tg__1': 'xe0',
+ },
+ ],
+ 'dst_ip': [
+ {
+ 'tg__1': 'xe1',
+ },
+ ],
+ 'public_ip': ['1.1.1.1'],
},
}
- result = {'flow': {'dst_ip0': '152.16.40.2-152.16.40.254',
- 'src_ip0': '152.16.100.2-152.16.100.254'}}
self.assertEqual({'flow': {}}, self.s._get_traffic_flow())
self.s.map_topology_to_infrastructure()
nodes = self.context_cfg["nodes"]
- self.assertEqual("../../vnf_descriptors/tg_rfc2544_tpl.yaml", nodes['tg__1']['VNF model'])
- self.assertEqual("../../vnf_descriptors/vpe_vnf.yaml", nodes['vnf__1']['VNF model'])
+ self.assertEqual(
+ "../../vnf_descriptors/tg_rfc2544_tpl.yaml", nodes['tg__1']['VNF model'])
+ self.assertEqual("../../vnf_descriptors/vpe_vnf.yaml",
+ nodes['vnf__1']['VNF model'])
def test_map_topology_to_infrastructure_insufficient_nodes(self):
del self.context_cfg['nodes']['vnf__1']
del interface['local_mac']
with mock.patch(
- "yardstick.benchmark.scenarios.networking.vnf_generic.LOG") as mock_log:
+ "yardstick.benchmark.scenarios.networking.vnf_generic.LOG"):
with self.assertRaises(IncorrectConfig) as raised:
self.s._resolve_topology()
self.s.topology["vld"][0]['vnfd-connection-point-ref'][0])
with mock.patch(
- "yardstick.benchmark.scenarios.networking.vnf_generic.LOG") as mock_log:
+ "yardstick.benchmark.scenarios.networking.vnf_generic.LOG"):
with self.assertRaises(IncorrectConfig) as raised:
self.s._resolve_topology()
self.s.topology["vld"][0]['vnfd-connection-point-ref'][:1]
with mock.patch(
- "yardstick.benchmark.scenarios.networking.vnf_generic.LOG") as mock_log:
+ "yardstick.benchmark.scenarios.networking.vnf_generic.LOG"):
with self.assertRaises(IncorrectConfig) as raised:
self.s._resolve_topology()
tgen.name = "tgen__1"
vnf = mock.Mock(autospec=GenericVNF)
vnf.runs_traffic = False
- vnf.instantiate.side_effect = RuntimeError("error during instantiate")
+ vnf.instantiate.side_effect = RuntimeError(
+ "error during instantiate")
vnf.terminate = mock.Mock(return_value=True)
self.s.vnfs = [tgen, vnf]
self.s.traffic_profile = mock.Mock()
def test___get_traffic_imix_exception(self):
with mock.patch.dict(self.scenario_cfg["traffic_options"], {'imix': ''}):
- self.assertEqual({'imix': {'64B': 100}}, self.s._get_traffic_imix())
+ self.assertEqual({'imix': {'64B': 100}},
+ self.s._get_traffic_imix())
def test__fill_traffic_profile(self):
with mock.patch.dict("sys.modules", STL_MOCKS):
def test_teardown_exception(self):
vnf = mock.Mock(autospec=GenericVNF)
- vnf.terminate = mock.Mock(side_effect=RuntimeError("error duing terminate"))
+ vnf.terminate = mock.Mock(
+ side_effect=RuntimeError("error duing terminate"))
vnf.name = str(vnf)
self.s.vnfs = [vnf]
self.s.traffic_profile = mock.Mock()
NetworkServiceTestCase._probe_missing_values(netdevs, network)
assert network['vpci'] == '0000:00:19.0'
+ # TODO: Split this into several tests, for different IOError sub-types
def test_open_relative_path(self):
mock_open = mock.mock_open()
mock_open_result = mock_open()
# test
with mock.patch(module_name, mock_open, create=True):
- self.assertEqual(open_relative_file('foo', 'bar'), mock_open_result)
+ self.assertEqual(open_relative_file(
+ 'foo', 'bar'), mock_open_result)
mock_open_call_count += 1 # one more call expected
self.assertEqual(mock_open.call_count, mock_open_call_count)
raise IOError(errno.ENOENT, 'not found')
mock_open.side_effect = open_effect
- self.assertEqual(open_relative_file('foo', 'bar'), mock_open_result)
+ self.assertEqual(open_relative_file(
+ 'foo', 'bar'), mock_open_result)
mock_open_call_count += 2 # two more calls expected
self.assertEqual(mock_open.call_count, mock_open_call_count)
--- /dev/null
+# Copyright 2017 Nokia
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import subprocess
+import time
+
+import mock
+import unittest
+
+from yardstick.benchmark.scenarios.networking import vsperf_dpdk
+
+
+class VsperfDPDKTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ctx = {
+ "host": {
+ "ip": "10.229.47.137",
+ "user": "ubuntu",
+ "password": "ubuntu",
+ },
+ }
+ self.args = {
+ 'task_id': "1234-5678",
+ 'options': {
+ 'testname': 'pvp_tput',
+ 'traffic_type': 'rfc2544_throughput',
+ 'frame_size': '64',
+ 'test_params': 'TRAFFICGEN_DURATION=30;',
+ 'trafficgen_port1': 'ens4',
+ 'trafficgen_port2': 'ens5',
+ 'conf_file': 'vsperf-yardstick.conf',
+ 'setup_script': 'setup_yardstick.sh',
+ 'moongen_helper_file': '~/moongen.py',
+ 'moongen_host_ip': '10.5.201.151',
+ 'moongen_port1_mac': '8c:dc:d4:ae:7c:5c',
+ 'moongen_port2_mac': '8c:dc:d4:ae:7c:5d',
+ 'trafficgen_port1_nw': 'test2',
+ 'trafficgen_port2_nw': 'test3',
+ },
+ 'sla': {
+ 'metrics': 'throughput_rx_fps',
+ 'throughput_rx_fps': 500000,
+ 'action': 'monitor',
+ }
+ }
+
+ self.scenario = vsperf_dpdk.VsperfDPDK(self.args, self.ctx)
+
+ self._mock_ssh = mock.patch(
+ 'yardstick.benchmark.scenarios.networking.vsperf_dpdk.ssh')
+ self.mock_ssh = self._mock_ssh.start()
+ self._mock_subprocess_call = mock.patch.object(subprocess, 'call')
+ self.mock_subprocess_call = self._mock_subprocess_call.start()
+
+ self.addCleanup(self._cleanup)
+
+ def _cleanup(self):
+ self._mock_ssh.stop()
+ self._mock_subprocess_call.stop()
+
+ def test_setup(self):
+ # setup() specific mocks
+ self.mock_subprocess_call().execute.return_value = None
+
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ def test_teardown(self):
+ # setup() specific mocks
+ self.mock_subprocess_call().execute.return_value = None
+
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ self.scenario.teardown()
+ self.assertFalse(self.scenario.setup_done)
+
+ def test_is_dpdk_setup_no(self):
+ # setup() specific mocks
+ self.mock_subprocess_call().execute.return_value = None
+
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ # is_dpdk_setup() specific mocks
+ self.mock_ssh.SSH.from_node().execute.return_value = (0, 'dummy', '')
+
+ result = self.scenario._is_dpdk_setup()
+ self.assertFalse(result)
+
+ def test_is_dpdk_setup_yes(self):
+ # setup() specific mocks
+ self.mock_subprocess_call().execute.return_value = None
+
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ # is_dpdk_setup() specific mocks
+ self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+
+ result = self.scenario._is_dpdk_setup()
+ self.assertTrue(result)
+
+ @mock.patch.object(time, 'sleep')
+ def test_dpdk_setup_first(self, *args):
+ # setup() specific mocks
+ self.mock_subprocess_call().execute.return_value = None
+
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ # is_dpdk_setup() specific mocks
+ self.mock_ssh.SSH.from_node().execute.return_value = (0, 'dummy', '')
+
+ self.scenario.dpdk_setup()
+ self.assertFalse(self.scenario._is_dpdk_setup())
+ self.assertTrue(self.scenario.dpdk_setup_done)
+
+ @mock.patch.object(time, 'sleep')
+ def test_dpdk_setup_next(self, *args):
+ # setup() specific mocks
+ self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ self.mock_subprocess_call().execute.return_value = None
+
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ self.scenario.dpdk_setup()
+ self.assertTrue(self.scenario._is_dpdk_setup())
+ self.assertTrue(self.scenario.dpdk_setup_done)
+
+ @mock.patch.object(time, 'sleep')
+ def test_dpdk_setup_runtime_error(self, *args):
+
+ # setup specific mocks
+ self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ self.mock_subprocess_call().execute.return_value = None
+
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+ self.assertTrue(self.scenario.setup_done)
+
+ self.assertRaises(RuntimeError, self.scenario.dpdk_setup)
+
+ @mock.patch.object(subprocess, 'check_output')
+ @mock.patch('time.sleep')
+ def test_run_ok(self, *args):
+ # setup() specific mocks
+ self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ self.mock_subprocess_call().execute.return_value = None
+
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ # run() specific mocks
+ self.mock_subprocess_call().execute.return_value = None
+ self.mock_ssh.SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
+
+ result = {}
+ self.scenario.run(result)
+
+ self.assertEqual(result['throughput_rx_fps'], '14797660.000')
+
+ def test_run_failed_vsperf_execution(self):
+ # setup() specific mocks
+ self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ self.mock_subprocess_call().execute.return_value = None
+
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ self.mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+
+ result = {}
+ self.assertRaises(RuntimeError, self.scenario.run, result)
+
+ def test_run_falied_csv_report(self):
+ # setup() specific mocks
+ self.mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+ self.mock_subprocess_call().execute.return_value = None
+
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ # run() specific mocks
+ self.mock_subprocess_call().execute.return_value = None
+ self.mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+
+ result = {}
+ self.assertRaises(RuntimeError, self.scenario.run, result)
+
+
+def main():
+ unittest.main()
+
+
+if __name__ == '__main__':
+ main()
import mock
-from yardstick.common import utils
from yardstick.benchmark.scenarios.storage import bonnie
mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
self.assertRaises(RuntimeError, b.run, self.result)
+
def main():
unittest.main()
+
if __name__ == '__main__':
main()
from yardstick.benchmark.scenarios.storage import storperf
+# pylint: disable=unused-argument
+# disable this for now because I keep forgetting mock patch arg ordering
+
+
def mocked_requests_config_post(*args, **kwargs):
- class MockResponseConfigPost:
+ class MockResponseConfigPost(object):
def __init__(self, json_data, status_code):
self.content = json_data
def mocked_requests_config_get(*args, **kwargs):
- class MockResponseConfigGet:
+ class MockResponseConfigGet(object):
def __init__(self, json_data, status_code):
self.content = json_data
def mocked_requests_job_get(*args, **kwargs):
- class MockResponseJobGet:
+ class MockResponseJobGet(object):
def __init__(self, json_data, status_code):
self.content = json_data
def mocked_requests_job_post(*args, **kwargs):
- class MockResponseJobPost:
+ class MockResponseJobPost(object):
def __init__(self, json_data, status_code):
self.content = json_data
def mocked_requests_job_delete(*args, **kwargs):
- class MockResponseJobDelete:
+ class MockResponseJobDelete(object):
def __init__(self, json_data, status_code):
self.content = json_data
def mocked_requests_delete(*args, **kwargs):
- class MockResponseDelete:
+ class MockResponseDelete(object):
def __init__(self, json_data, status_code):
self.json_data = json_data
def mocked_requests_delete_failed(*args, **kwargs):
- class MockResponseDeleteFailed:
+ class MockResponseDeleteFailed(object):
def __init__(self, json_data, status_code):
self.json_data = json_data
-#!/usr/bin/env python
-
##############################################################################
# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
#
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Unittest for yardstick.common.openstack_utils
-
-from __future__ import absolute_import
+from oslo_utils import uuidutils
import unittest
import mock
self.assertEqual(api_version, expected_result)
-def main():
- unittest.main()
+class GetNetworkIdTestCase(unittest.TestCase):
+
+ def test_get_network_id(self):
+ _uuid = uuidutils.generate_uuid()
+ mock_shade_client = mock.Mock()
+ mock_shade_client.list_networks = mock.Mock()
+ mock_shade_client.list_networks.return_value = [{'id': _uuid}]
+
+ output = openstack_utils.get_network_id(mock_shade_client,
+ 'network_name')
+ self.assertEqual(_uuid, output)
+ def test_get_network_id_no_network(self):
+ mock_shade_client = mock.Mock()
+ mock_shade_client.list_networks = mock.Mock()
+ mock_shade_client.list_networks.return_value = None
-if __name__ == '__main__':
- main()
+ output = openstack_utils.get_network_id(mock_shade_client,
+ 'network_name')
+ self.assertEqual(None, output)
with mock.patch.object(tempfile._TemporaryFileWrapper, '__enter__',
return_value=mock_tfile):
self.heatstack.create(template, heat_parameters, True, 100)
- mock_tfile.write.assert_called_once_with(jsonutils.dumps(template))
+ mock_tfile.write.assert_called_once_with(jsonutils.dump_as_bytes(template))
mock_tfile.close.assert_called_once()
self.mock_stack_create.assert_called_once_with(