'agent_flavor': fields.String,
'agent_image': fields.String,
'public_network': fields.String,
+ 'volume_count': fields.Integer,
'volume_size': fields.Integer,
'availability_zone': fields.String,
'username': fields.String,
'public_network': fields.String,
'stack_created': fields.Boolean,
'stack_id': fields.String,
+ 'volume_count': fields.Integer,
'volume_size': fields.Integer,
'availability_zone': fields.String
}
'agent_flavor': storperf.agent_flavor,
'agent_image': storperf.agent_image,
'public_network': storperf.public_network,
+ 'volume_count': storperf.volume_count,
'volume_size': storperf.volume_size,
'stack_created': storperf.is_stack_created,
'availability_zone': storperf.availability_zone,
storperf.agent_image = request.json['agent_image']
if ('public_network' in request.json):
storperf.public_network = request.json['public_network']
+ if ('volume_count' in request.json):
+ storperf.volume_count = request.json['volume_count']
if ('volume_size' in request.json):
storperf.volume_size = request.json['volume_size']
if ('availability_zone' in request.json):
'agent_image': storperf.agent_image,
'availability_zone': storperf.availability_zone,
'public_network': storperf.public_network,
+ 'volume_count': storperf.volume_count,
'volume_size': storperf.volume_size,
'stack_id': storperf.stack_id})
constraints:
- range: { min: 1, max: 1024 }
description: must be between 1 and 1024 Gb.
+ volume_count:
+ type: number
+ default: 0
+ constraints:
+ - range: { min: 0, max: 512 }
+ description: must be between 1 and 512 agents.
agent_count:
type: number
default: 1
availability_zone: {get_param: availability_zone},
storperf_open_security_group: {get_resource: storperf_open_security_group},
key_name: {get_resource: storperf_key_pair},
+ volume_count: {get_param: volume_count},
volume_size: {get_param: volume_size}
}
}
default: storperf
storperf_open_security_group:
type: string
+ volume_count:
+ type: number
+ description: Number of volumes to be created
+ default: 1
+ constraints:
+ - range: { min: 0, max: 1024 }
+ description: must be between 1 and 1024.
volume_size:
type: number
description: Size of the volume to be created.
port_id: { get_resource: storperf_agent_port }
agent_volume:
- type: OS::Cinder::Volume
- properties:
- size: { get_param: volume_size }
-
- agent_volume_att:
- type: OS::Cinder::VolumeAttachment
+ type: OS::Heat::ResourceGroup
properties:
- instance_uuid: { get_resource: storperf_agent }
- volume_id: { get_resource: agent_volume}
+ count: { get_param: volume_count }
+ resource_def: {
+ type: "storperf-volume.yaml",
+ properties: {
+ volume_size: { get_param: volume_size },
+ agent_instance_uuid: { get_resource: storperf_agent }
+ }
+ }
outputs:
storperf_agent_ip:
--- /dev/null
+##############################################################################
+# Copyright (c) 2018 Dell EMC and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+heat_template_version: 2013-05-23
+
+parameters:
+ volume_size:
+ type: number
+ description: Size of the volume to be created.
+ default: 1
+ constraints:
+ - range: { min: 1, max: 1024 }
+ description: must be between 1 and 1024 Gb.
+ agent_instance_uuid:
+ type: string
+
+resources:
+ agent_volume:
+ type: OS::Cinder::Volume
+ properties:
+ size: { get_param: volume_size }
+
+ agent_volume_att:
+ type: OS::Cinder::VolumeAttachment
+ properties:
+ instance_uuid: { get_param: agent_instance_uuid }
+ volume_id: { get_resource: agent_volume}
self._agent_flavor = "storperf"
self._availability_zone = None
self._public_network = None
+ self._volume_count = 1
self._volume_size = 1
self._cached_stack_id = None
self._last_snaps_check_time = None
+ @property
+ def volume_count(self):
+ self._get_stack_info()
+ return self._volume_count
+
+ @volume_count.setter
+ def volume_count(self, value):
+ if (self.stack_id is not None):
+ raise ParameterError(
+ "ERROR: Cannot change volume count after stack is created")
+ self._volume_count = value
+
@property
def volume_size(self):
self._get_stack_info()
def _get_stack_info(self):
if self._last_snaps_check_time is not None:
time_since_check = datetime.now() - self._last_snaps_check_time
- if time_since_check.total_seconds() < 30:
+ if time_since_check.total_seconds() < 60:
return self._cached_stack_id
self.heat_stack.initialize()
if self.heat_stack.get_stack() is not None:
self._last_snaps_check_time = datetime.now()
- if self._cached_stack_id == self.heat_stack.get_stack().id:
- return self._cached_stack_id
self._cached_stack_id = self.heat_stack.get_stack().id
cinder_cli = cinder_utils.cinder_client(self.os_creds)
glance_cli = glance_utils.glance_client(self.os_creds)
image = glance_utils.get_image_by_id(glance_cli, image_id)
self._agent_image = image.name
- volume_id = server.volume_ids[0]['id']
- volume = cinder_utils.get_volume_by_id(
- cinder_cli, volume_id)
- self._volume_size = volume.size
+ self._volume_count = len(server.volume_ids)
+ if self._volume_count > 0:
+ volume_id = server.volume_ids[0]['id']
+ volume = cinder_utils.get_volume_by_id(
+ cinder_cli, volume_id)
+ self.logger.debug("Volume id %s, size=%s" % (volume.id,
+ volume.size))
+ self._volume_size = volume.size
router_creators = self.heat_stack.get_router_creators()
router1 = router_creators[0]
def create_stack(self):
self.stack_settings.resource_files = [
- 'storperf/resources/hot/storperf-agent.yaml']
+ 'storperf/resources/hot/storperf-agent.yaml',
+ 'storperf/resources/hot/storperf-volume.yaml']
self.stack_settings.env_values = self._make_parameters()
try:
self.heat_stack.create()
- except Exception:
+ except Exception as e:
+ self.logger.error("Stack creation failed")
+ self.logger.exception(e)
heat_cli = heat_utils.heat_client(self.os_creds)
res = heat_utils.get_resources(heat_cli,
self.heat_stack.get_stack().id)
- self.logger.error("Stack creation failed")
reason = ""
failed = False
for resource in res:
thread.join()
self._test_executor.slaves = slaves
+ self._test_executor.volume_count = self.volume_count
params = metadata
params['agent_count'] = self.agent_count
params['public_network'] = self.public_network
+ params['volume_count'] = self.volume_count
params['volume_size'] = self.volume_size
if self.username and self.password:
params['username'] = self.username
heat_parameters = {}
heat_parameters['public_network'] = self.public_network
heat_parameters['agent_count'] = self.agent_count
+ heat_parameters['volume_count'] = self.volume_count
heat_parameters['volume_size'] = self.volume_size
heat_parameters['agent_image'] = self.agent_image
heat_parameters['agent_flavor'] = self.agent_flavor
self.job_db = JobDB()
self._slaves = []
self._terminated = False
+ self._volume_count = 1
self._workload_executors = []
self._workload_thread = None
self._thread_gate = None
self.logger.debug("Set slaves to: " + str(slaves))
self._slaves = slaves
+ @property
+ def volume_count(self):
+ return self._volume_count
+
+ @volume_count.setter
+ def volume_count(self, volume_count):
+ self.logger.debug("Set volume count to: " + str(volume_count))
+ self._volume_count = volume_count
+
@property
def queue_depths(self):
return ','.join(self._queue_depths)
slave_threads = []
for slave in self.slaves:
- slave_workload = copy.copy(current_workload['workload'])
- slave_workload.remote_host = slave
-
- self._workload_executors.append(slave_workload)
-
- t = Thread(target=self.execute_on_node,
- args=(slave_workload,),
- name="%s worker" % slave)
- t.daemon = False
- t.start()
- slave_threads.append(t)
+ volume_number = 0
+ while volume_number < self.volume_count:
+ slave_workload = copy.copy(current_workload['workload'])
+ slave_workload.remote_host = slave
+ last_char_of_filename = chr(ord(
+ slave_workload.filename[-1:]) + volume_number)
+ slave_workload.filename = "%s%s" % \
+ (slave_workload.filename[:-1], last_char_of_filename)
+ self.logger.debug("Device to profile: %s" %
+ slave_workload.filename)
+ self._workload_executors.append(slave_workload)
+ t = Thread(target=self.execute_on_node,
+ args=(slave_workload,),
+ name="%s worker" % slave)
+ t.daemon = False
+ t.start()
+ slave_threads.append(t)
+ volume_number += 1
for slave_thread in slave_threads:
self.logger.debug("Waiting on %s" % slave_thread)
workload.filename = self.filename
workload.id = self.job_db.job_id
- if (self.filename is not None):
- workload.filename = self.filename
-
if (workload_name.startswith("_")):
iodepths = [8, ]
blocksizes = [16384, ]
@property
def fullname(self):
+ host_file = self.remote_host+"."+self.filename
+ host_file = host_file.replace(".", "-").replace("/", "-")
return ("%s.%s.queue-depth.%s.block-size.%s.%s"
% (str(self.id),
self.__class__.__name__,
str(self.options['iodepth']),
str(self.options['bs']),
- str(self.remote_host).replace(".", "-")))
+ host_file))
def test_local_name(self):
workload = rr()
self.assertEqual(workload.fullname,
- "None.rr.queue-depth.1.block-size.64k.None",
+ "None.rr.queue-depth.1.block-size.64k.None--dev-vdb",
workload.fullname)
def test_remote_name(self):
workload = rw()
workload.remote_host = "192.168.0.1"
- self.assertEqual(workload.fullname,
- "None.rw.queue-depth.1.block-size.64k.192-168-0-1",
- workload.fullname)
+ self.assertEqual(
+ workload.fullname,
+ "None.rw.queue-depth.1.block-size.64k.192-168-0-1--dev-vdb",
+ workload.fullname)
def test_blocksize(self):
workload = rs()
workload.options["bs"] = "4k"
self.assertEqual(workload.fullname,
- "None.rs.queue-depth.1.block-size.4k.None",
+ "None.rs.queue-depth.1.block-size.4k.None--dev-vdb",
workload.fullname)
def test_queue_depth(self):
workload = wr()
workload.options["iodepth"] = "8"
self.assertEqual(workload.fullname,
- "None.wr.queue-depth.8.block-size.64k.None",
+ "None.wr.queue-depth.8.block-size.64k.None--dev-vdb",
workload.fullname)
def test_id(self):
workload = ws()
workload.id = "workloadid"
- self.assertEqual(workload.fullname,
- "workloadid.ws.queue-depth.1.block-size.64k.None",
- workload.fullname)
+ self.assertEqual(
+ workload.fullname,
+ "workloadid.ws.queue-depth.1.block-size.64k.None--dev-vdb",
+ workload.fullname)
.. http://creativecommons.org/licenses/by/4.0
-This document provides the release notes for Euphrates 1.0 of StorPerf.
+This document provides the release notes for Fraser 2.0 of StorPerf.
.. contents::
:depth: 3
| 2018-04-18 | Fraser 1.0 | Mark Beierl | |
| | | | |
+--------------------+--------------------+--------------------+--------------------+
+| 2018-05-18 | Fraser 2.0 | Mark Beierl | |
+| | | | |
++--------------------+--------------------+--------------------+--------------------+
Important notes
----------------
-This is the release where StorPerf is not delivered as a single container but
-is delivered as a series of networked containers. StorPerf must be run using
-docker-compose.
+StorPerf has added the ability to specify the number of Cinder Volumes per
+agent VM to test. The name of the device that the volume is attached to
+has been appended to the host IP address in the metrics so that it can be
+tracked independently.
+
Summary
--------
performance. If desired, it can push results to the OPNFV Test Results DB, or
the embedded Graphite web interface can be used to perform ad hoc queries.
-This release changes to docker-compose framework and adds the StorPerf
-reporting module. It also marks a change from microsecond (:math:`\mu`\s) to
-nano-second (ns) precision for all reported latencies. This is denoted by a change
-from lat.mean to lat_ns.mean for read and write metrics.
+This release allows for changing of stack attributes from the OpenStack CLI.
+Using a command such as
+
+.. code-block::
+ heat stack-update StorPerfAgentGroup --existing -P "agent_count=6"
+
+will change the existing stack to use 6 agents. Note that StorPerf can take
+up to 1 minute after the stack update is complete before detecting the new
+values. Please use a GET of the configurations API to test for updated
+values prior to submitting a new test.
+
+The following command changes the number of volumes per agent:
+
+.. code-block::
+ heat stack-update StorPerfAgentGroup --existing -P "volume_count=2"
+
Release Data
-------------
| **Project** | StorPerf |
| | |
+--------------------------------------+--------------------------------------+
-| **Repo/commit-ID** | storperf/fraser.1.0 |
+| **Repo/commit-ID** | storperf/fraser.2.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Fraser base release |
+| **Release designation** | Fraser base release |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | 2017-10-06 |
+| **Release date** | 2018-05-18 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | OPNFV Fraser release 1.0 |
+| **Purpose of the delivery** | OPNFV Fraser release 2.0 |
| | |
+--------------------------------------+--------------------------------------+
Features additions
-------------------
-* STORPERF-231 - Integration with SNAPS
+* STORPERF-242 - Allow user to change stack parameters outside of StorPerf
+
Bug Fixes
----------
---------
- `StorPerf master image <https://hub.docker.com/r/opnfv/storperf-master/>`_
- (tag: x86_64-fraser.1.0 or aarch64-fraser.1.0)
+ (tag: x86_64-fraser.2.0 or aarch64-fraser.2.0)
- `StorPerf swaggerui <https://hub.docker.com/r/opnfv/storperf-swaggerui/>`_
- (tag: x86_64-fraser.1.0 or aarch64-fraser.1.0)
+ (tag: x86_64-fraser.2.0 or aarch64-fraser.2.0)
- `StorPerf graphite image <https://hub.docker.com/r/opnfv/storperf-graphite/>`_
- (tag: x86_64-fraser.1.0 or aarch64-fraser.1.0)
+ (tag: x86_64-fraser.2.0 or aarch64-fraser.2.0)
- `StorPerf reporting image <https://hub.docker.com/r/opnfv/storperf-reporting/>`_
- (tag: x86_64-fraser.1.0 or aarch64-fraser.1.0)
+ (tag: x86_64-fraser.2.0 or aarch64-fraser.2.0)
- `StorPerf Http-Frontend image <https://hub.docker.com/r/opnfv/storperf-httpfrontend/>`_
- (tag: x86_64-fraser.1.0 or aarch64-fraser.1.0)
+ (tag: x86_64-fraser.2.0 or aarch64-fraser.2.0)
Documentation
--------------
* Cirros target VMs do not always mount Cinder volumes on first boot. Sometimes
a reboot of the VM is required to properly attach the Cinder volume to /dev/vdb
+* A bug in the linux kernel can prevent Cinder volumes from attaching to VMs
+ using ARM architecture. Specifying the following properties in Glance for
+ the ARM based image will work around this problem. Note: this will cause
+ the device to show up as a SCSI device and therefore will be /dev/sdb instead
+ of /dev/vdb.
+
+.. code-block:
+ --property hw_disk_bus=scsi --property hw_scsi_model=virtio-scsi
Test Result
===========
-- `OPNFV Test Results DB <http://testresults.opnfv.org/reporting/euphrates/storperf/status-apex.html>`_
+- `OPNFV Test Results DB <http://testresults.opnfv.org/reporting/fraser/storperf/status-apex.html>`_
StorPerf is delivered as a series of Docker containers managed by
docker-compose. There are two possible methods for installation:
-#. Run container on bare metal
-#. Run container in a VM
+#. Run the containers on bare metal
+#. Run the containers in a VM
Requirements:
.. code-block:: console
- 968c0c2d7c0e24f6777c33b37d9b4fd885575155069fb760405ec8214b2eb672 docker-compose.yaml
+ 69856e9788bec36308a25303ec9154ed68562e126788a47d54641d68ad22c8b9 docker-compose.yaml
To run, you must specify two environment variables:
.. code-block:: console
- 00649e02237d27bf0b40d1a66160a68a56c9f5e1ceb78d7858e30715cf4350e3 create-compose.py
+ 327cad2a7b3a3ca37910978005c743799313c2b90709e4a3f142286a06e53f57 create-compose.py
Note: The script will run fine on python3. Install python future package to avoid error on python2.
Docker Exec
~~~~~~~~~~~
-If needed, the container can be entered with docker exec. This is not normally
+If needed, any StorPerf container can be entered with docker exec. This is not normally
required.
.. code-block:: console
- docker exec -it storperf-master bash
+ docker exec -it storperf-master /bin/bash
-Pulling StorPerf Container
-==========================
+Pulling StorPerf Containers
+===========================
-Master (Euphrates)
-~~~~~~~~~~~~~~~~~~
+The tags for StorPerf can be found here: https://hub.docker.com/r/opnfv/storperf-master/tags/
-StorPerf has switched to docker-compose in the latest version. The tag for
-pulling the latest master Euphrates container is:
+Master (latest)
+~~~~~~~~~~~~~~~
-.. code-block:: bash
+This tag represents StorPerf at its most current state of development. While
+self-tests have been run, there is not a guarantee that all features will be
+functional, or there may be bugs.
- docker pull opnfv/storperf-master:latest
- docker pull opnfv/storperf-reporting:latest
- docker pull opnfv/storperf-httpfrontend:latest
- docker pull opnfv/storperf-swaggerui:latest
- docker pull opnfv/storperf-graphite:latest
+Documentation for latest can be found using the latest label at:
-However, by itself, this will no longer provide full functionality. Full
-instructions are provided in the Running StorPerf Container section of this
-document.
+http://docs.opnfv.org/en/latest/submodules/storperf/docs/testing/user/index.html
+For x86_64 based systems, use:
-Danube
-~~~~~~
+.. code-block:: console
-The tag for the latest stable Danube is be:
+ TAG=x86_64-latest ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose pull
+ TAG=x86_64-latest ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose up -d
-.. code-block:: bash
+For 64 bit ARM based systems, use:
- docker pull opnfv/storperf:danube.3.0
+.. code-block:: console
-Colorado
-~~~~~~~~
+ TAG=aarch64-latest ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose pull
+ TAG=aarch64-latest ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose up -d
-The tag for the latest stable Colorado release is:
-.. code-block:: bash
+Release (stable)
+~~~~~~~~~~~~~~~~
- docker pull opnfv/storperf:colorado.0.1
+This tag represents StorPerf at its most recent stable release. There are
+no known bugs and known issues and workarounds are documented in the release
+notes. Issues found here should be reported in JIRA:
-Brahmaputra
-~~~~~~~~~~~
+https://jira.opnfv.org/secure/RapidBoard.jspa?rapidView=3
-The tag for the latest stable Brahmaputra release is:
+For x86_64 based systems, use:
-.. code-block:: bash
+.. code-block:: console
- docker pull opnfv/storperf:brahmaputra.1.2
+ TAG=x86_64-stable ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose pull
+ TAG=x86_64-stable ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose up -d
-StorPerf on ARM Processors
-==========================
+For 64 bit ARM based systems, use:
-StorPerf now supports docker images on ARM processors as well. However, at the moment
-there is no upstream image on DockerHub. The user needs to manually build it. Firstly,
-clone StorPerf repository from GitHub
+.. code-block:: console
-.. code-block:: bash
+ TAG=aarch64-stable ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose pull
+ TAG=aarch64-stable ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose up -d
+
+
+
+Fraser (opnfv-6.0.0)
+~~~~~~~~~~~~~~~~~~
+
+This tag represents the 6th OPNFV release and the 5th StorPerf release. There
+are no known bugs and known issues and workarounds are documented in the release
+notes. Documentation can be found under the Fraser label at:
+
+http://docs.opnfv.org/en/stable-fraser/submodules/storperf/docs/testing/user/index.html
+
+Issues found here should be reported against release 6.0.0 in JIRA:
+
+https://jira.opnfv.org/secure/RapidBoard.jspa?rapidView=3
+
+For x86_64 based systems, use:
+
+.. code-block:: console
+
+ TAG=x86_64-opnfv-6.0.0 ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose pull
+ TAG=x86_64-opnfv-6.0.0 ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose up -d
+
+For 64 bit ARM based systems, use:
+
+.. code-block:: console
+
+ TAG=aarch64-opnfv-6.0.0 ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose pull
+ TAG=aarch64-opnfv-6.0.0 ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose up -d
+
+
+
+Euphrates (opnfv-5.0.0)
+~~~~~~~~~~~~~~~~~
+
+This tag represents the 5th OPNFV release and the 4th StorPerf release. There
+are no known bugs and known issues and workarounds are documented in the release
+notes. Documentation can be found under the Euphrates label at:
+
+http://docs.opnfv.org/en/stable-euphrates/submodules/storperf/docs/testing/user/index.html
+
+Issues found here should be reported against release 6.0.0 in JIRA:
+
+https://jira.opnfv.org/secure/RapidBoard.jspa?rapidView=3
+
+For x86_64 based systems, use:
+
+.. code-block:: console
- git clone https://git.opnfv.org/storperf
- cd storperf/docker/
+ TAG=x86_64-opnfv-6.0.0 ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose pull
+ TAG=x86_64-opnfv-6.0.0 ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose up -d
-Next, build and setup the docker images
+For 64 bit ARM based systems, use:
.. code-block:: console
- TAG=aarch64 ENV_FILE=./admin.rc CARBON_DIR=./carbon docker-compose -f local-docker-compose.yaml build
- TAG=aarch64 ENV_FILE=./admin.rc CARBON_DIR=./carbon docker-compose -f local-docker-compose.yaml up -d
+ TAG=aarch64-opnfv-6.0.0 ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose pull
+ TAG=aarch64-opnfv-6.0.0 ENV_FILE=./admin.rc CARBON_DIR=./carbon/ docker-compose up -d
The following pieces of information are required to prepare the environment:
-- The number of VMs/Cinder volumes to create
-- The Glance image that holds the VM operating system to use. StorPerf has
- only been tested with Ubuntu 16.04
-- The OpenStack flavor to use when creating the VMs
-- The name of the public network that agents will use
-- The size, in gigabytes, of the Cinder volumes to create
+- The number of VMs/Cinder volumes to create.
+- The Glance image that holds the VM operating system to use.
+- The OpenStack flavor to use when creating the VMs.
+- The name of the public network that agents will use.
+- The size, in gigabytes, of the Cinder volumes to create.
+- The number of the Cinder volumes to attach to each VM.
- The availability zone (optional) in which the VM is to be launched. Defaults to **nova**.
- The username (optional) if we specify a custom image.
- The password (optional) for the above image.
+**Note**: on ARM based platforms there exists a bug in the kernel which can prevent
+VMs from properly attaching Cinder volumes. There are two known workarounds:
+
+#. Create the environment with 0 Cinder volumes attached, and after the VMs
+ have finished booting, modify the stack to have 1 or more Cinder volumes.
+ See section on Changing Stack Parameters later in this guide.
+#. Add the following image metadata to Glance. This will cause the Cinder
+ volume to be mounted as a SCSI device, and therefore your target will be
+ /dev/sdb, etc, instead of /dev/vdb. You will need to specify this in your
+ warm up and workload jobs.
+
+.. code-block:
+ --property hw_disk_bus=scsi --property hw_scsi_model=virtio-scsi
+
+
The ReST API is a POST to http://StorPerf:5000/api/v1.0/configurations and
takes a JSON payload as follows.
"agent_image": string,
"public_network": string,
"volume_size": int,
+ "volume_count": int,
"availability_zone": string,
"username": string,
"password": string
"test_case": "snia_steady_state"
}
+Changing Stack Parameters
+~~~~~~~~~~~~~~~~~~~~~~~~~
+While StorPerf currently does not support changing the parameters of the
+stack directly, it is possible to change the stack using the OpenStack client
+library. The following parameters can be changed:
+
+- agent_count: to increase or decrease the number of VMs.
+- volume_count: to change the number of Cinder volumes per VM.
+- volume_size: to increase the size of each volume. Note: Cinder cannot shrink volumes.
+
+Increasing the number of agents or volumes, or increasing the size of the volumes
+will require you to kick off a new _warm_up job to initialize the newly
+allocated volumes.
+
+The following is an example of how to change the stack using the heat client:
+
+.. code-block::
+ heat stack-update StorPerfAgentGroup --existing -P "volume_count=2"
Query Jobs Information