|metric        | Storage performance                                          |
 |              |                                                              |
 +--------------+--------------------------------------------------------------+
-|test purpose  | Storperf integration with yardstick. The purpose of StorPerf |
-|              | is to provide a tool to measure block and object storage     |
-|              | performance in an NFVI. When complemented with a             |
-|              | characterization of typical VF storage performance           |
-|              | requirements, it can provide pass/fail thresholds for test,  |
-|              | staging, and production NFVI environments.                   |
-|              |                                                              |
-|              | The benchmarks developed for block and object storage will   |
-|              | be sufficiently varied to provide a good preview of expected |
-|              | storage performance behavior for any type of VNF workload.   |
+|test purpose  | To evaluate and report on the Cinder volume performance.     |
+|              |                                                              |
+|              | This testcase integrates with OPNFV StorPerf to measure      |
+|              | block performance of the underlying Cinder drivers.  Many    |
+|              | options are supported, and even the root disk (Glance        |
+|              | ephemeral storage can be profiled.                           |
+|              |                                                              |
+|              | The fundamental concept of the test case is to first fill    |
+|              | the volumes with random data to ensure reported metrics      |
+|              | are indicative of continued usage and not skewed by          |
+|              | transitional performance while the underlying storage        |
+|              | driver allocates blocks.                                     |
+|              | The metrics for filling the volumes with random data         |
+|              | are not reported in the final results.  The test also        |
+|              | ensures the volumes are performing at a consistent level     |
+|              | of performance by measuring metrics every minute, and        |
+|              | comparing the trend of the metrics over the run.  By         |
+|              | evaluating the min and max values, as well as the slope of   |
+|              | the trend, it can make the determination that the metrics    |
+|              | are stable, and not fluctuating beyond industry standard     |
+|              | norms.                                                       |
 |              |                                                              |
 +--------------+--------------------------------------------------------------+
 |configuration | file: opnfv_yardstick_tc074.yaml                             |
 |              | * public_network: "ext-net" - name of public network         |
 |              | * volume_size: 2 - cinder volume size                        |
 |              | * block_sizes: "4096" - data block size                      |
-|              | * queue_depths: "4"                                          |
+|              | * queue_depths: "4" - the number of simultaneous I/Os        |
+|              |   to perform at all times                                    |
 |              | * StorPerf_ip: "192.168.200.2"                               |
 |              | * query_interval: 10 - state query interval                  |
 |              | * timeout: 600 - maximum allowed job time                    |
 |              | performance in an NFVI.                                      |
 |              |                                                              |
 |              | StorPerf is delivered as a Docker container from             |
-|              | https://hub.docker.com/r/opnfv/storperf/tags/.               |
+|              | https://hub.docker.com/r/opnfv/storperf-master/tags/.        |
+|              |                                                              |
+|              | The underlying tool used is FIO, and StorPerf supports       |
+|              | any FIO option in order to tailor the test to the exact      |
+|              | workload needed.                                             |
 |              |                                                              |
 +--------------+--------------------------------------------------------------+
 |references    | Storperf_                                                    |
 |              |      - rr: 100% Read, random access                          |
 |              |      - wr: 100% Write, random access                         |
 |              |      - rw: 70% Read / 30% write, random access               |
-|              | * nossd: Do not perform SSD style preconditioning.           |
-|              | * nowarm:  Do not perform a warmup prior to                  |
 |              |   measurements.                                              |
+|              | * workloads={json maps}                                      |
+|              |   This parameter supercedes the workload and calls the V2.0  |
+|              |   API in StorPerf. It allows for greater control of the      |
+|              |   parameters to be passed to FIO.  For example, running a    |
+|              |   random read/write with a mix of 90% read and 10% write     |
+|              |   would be expressed as follows:                             |
+|              |   {"9010randrw": {"rw":"randrw","rwmixread": "90"}}          |
+|              |   Note: This must be passed in as a string, so don't forget  |
+|              |   to escape or otherwise properly deal with the quotes.      |
+|              |                                                              |
 |              | * report= [job_id]                                           |
 |              |   Query the status of the supplied job_id and report on      |
 |              |   metrics. If a workload is supplied, will report on only    |
 |              |                                                              |
 +--------------+--------------------------------------------------------------+
 |pre-test      | If you do not have an Ubuntu 14.04 image in Glance, you will |
-|conditions    | need to add one. A key pair for launching agents is also     |
-|              | required.                                                    |
+|conditions    | need to add one.                                             |
 |              |                                                              |
 |              | Storperf is required to be installed in the environment.     |
 |              | There are two possible methods for Storperf installation:    |
 |test sequence | description and expected result                              |
 |              |                                                              |
 +--------------+--------------------------------------------------------------+
-|step 1        | The Storperf is installed and Ubuntu 14.04 image is stored   |
-|              | in glance. TC is invoked and logs are produced and stored.   |
+|step 1        | Yardstick calls StorPerf to create the heat stack with the   |
+|              | number of VMs and size of Cinder volumes specified.  The     |
+|              | VMs will be on their own private subnet, and take floating   |
+|              | IP addresses from the specified public network.              |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 2        | Yardstick calls StorPerf to fill all the volumes with        |
+|              | random data.                                                 |
+|              |                                                              |
++--------------+--------------------------------------------------------------+
+|step 3        | Yardstick calls StorPerf to perform the series of tests      |
+|              | specified by the workload, queue depths and block sizes.     |
 |              |                                                              |
-|              | Result: Logs are stored.                                     |
++--------------+--------------------------------------------------------------+
+|step 4        | Yardstick calls StorPerf to delete the stack it created.     |
 |              |                                                              |
 +--------------+--------------------------------------------------------------+
 |test verdict  | None. Storage performance results are fetched and stored.    |
 
 ##############################################################################
 from __future__ import absolute_import
 
-import os
 import logging
+import os
 import time
 
-import requests
 from oslo_serialization import jsonutils
+import requests
 
 from yardstick.benchmark.scenarios import base
 
+
 LOG = logging.getLogger(__name__)
 
 
         wr: 100% Write, random access
         rw: 70% Read / 30% write, random access
 
-    nossd (Optional):
-    Do not perform SSD style preconditioning.
-
-    nowarm (Optional):
-    Do not perform a warmup prior to measurements.
-
     report = [job_id] (Optional):
     Query the status of the supplied job_id and report on metrics.
     If a workload is supplied, will report on only that subset.
 
         setup_query_content = jsonutils.loads(
             setup_query.content)
-        if setup_query_content["stack_created"]:
-            self.setup_done = True
+        if ("stack_created" in setup_query_content and
+                setup_query_content["stack_created"]):
             LOG.debug("stack_created: %s",
                       setup_query_content["stack_created"])
+            return True
+
+        return False
 
     def setup(self):
         """Set the configuration."""
         elif setup_res.status_code == 200:
             LOG.info("stack_id: %s", setup_res_content["stack_id"])
 
-            while not self.setup_done:
-                self._query_setup_state()
-                time.sleep(self.query_interval)
+        while not self._query_setup_state():
+            time.sleep(self.query_interval)
+
+        # We do not want to load the results of the disk initialization,
+        # so it is not added to the results here.
+        self.initialize_disks()
+        self.setup_done = True
 
     def _query_job_state(self, job_id):
         """Query the status of the supplied job_id and report on metrics"""
         if not self.setup_done:
             self.setup()
 
-        metadata = {"build_tag": "latest", "test_case": "opnfv_yardstick_tc074"}
+        metadata = {"build_tag": "latest",
+                    "test_case": "opnfv_yardstick_tc074"}
         metadata_payload_dict = {"pod_name": "NODE_NAME",
                                  "scenario_name": "DEPLOY_SCENARIO",
                                  "version": "YARDSTICK_BRANCH"}
 
         job_args = {"metadata": metadata}
         job_args_payload_list = ["block_sizes", "queue_depths", "deadline",
-                                 "target", "nossd", "nowarm", "workload"]
+                                 "target", "workload", "workloads",
+                                 "agent_count", "steady_state_samples"]
+        job_args["deadline"] = self.options["timeout"]
 
         for job_argument in job_args_payload_list:
             try:
             except KeyError:
                 pass
 
+        api_version = "v1.0"
+
+        if ("workloads" in job_args and
+                job_args["workloads"] is not None and
+                len(job_args["workloads"])) > 0:
+            api_version = "v2.0"
+
         LOG.info("Starting a job with parameters %s", job_args)
-        job_res = requests.post('http://%s:5000/api/v1.0/jobs' % self.target,
+        job_res = requests.post('http://%s:5000/api/%s/jobs' % (self.target,
+                                                                api_version),
                                 json=job_args)
 
         job_res_content = jsonutils.loads(job_res.content)
                 self._query_job_state(job_id)
                 time.sleep(self.query_interval)
 
-            terminate_res = requests.delete('http://%s:5000/api/v1.0/jobs' %
-                                            self.target)
-
-            if terminate_res.status_code != 200:
-                terminate_res_content = jsonutils.loads(
-                    terminate_res.content)
-                raise RuntimeError("Failed to start a job, error message:",
-                                   terminate_res_content["message"])
-
         # TODO: Support using ETA to polls for completion.
         #       Read ETA, next poll in 1/2 ETA time slot.
         #       If ETA is greater than the maximum allowed job time,
 
             result.update(result_res_content)
 
+    def initialize_disks(self):
+        """Fills the target with random data prior to executing workloads"""
+
+        job_args = {}
+        job_args_payload_list = ["target"]
+
+        for job_argument in job_args_payload_list:
+            try:
+                job_args[job_argument] = self.options[job_argument]
+            except KeyError:
+                pass
+
+        LOG.info("Starting initialization with parameters %s", job_args)
+        job_res = requests.post('http://%s:5000/api/v1.0/initializations' %
+                                self.target, json=job_args)
+
+        job_res_content = jsonutils.loads(job_res.content)
+
+        if job_res.status_code != 200:
+            raise RuntimeError(
+                "Failed to start initialization job, error message:",
+                job_res_content["message"])
+        elif job_res.status_code == 200:
+            job_id = job_res_content["job_id"]
+            LOG.info("Started initialization as job id: %s...", job_id)
+
+        while not self.job_completed:
+            self._query_job_state(job_id)
+            time.sleep(self.query_interval)
+
+        self.job_completed = False
+
     def teardown(self):
         """Deletes the agent configuration and the stack"""
-        teardown_res = requests.delete('http://%s:5000/api/v1.0/\
-                                       configurations' % self.target)
+        teardown_res = requests.delete(
+            'http://%s:5000/api/v1.0/configurations' % self.target)
 
         if teardown_res.status_code == 400:
             teardown_res_content = jsonutils.loads(
-                teardown_res.content)
+                teardown_res.json_data)
             raise RuntimeError("Failed to reset environment, error message:",
                                teardown_res_content['message'])
 
 
 
 from __future__ import absolute_import
 
+import json
 import unittest
 
 import mock
 from oslo_serialization import jsonutils
+import requests
 
 from yardstick.benchmark.scenarios.storage import storperf
 
 
 # pylint: disable=unused-argument
 # disable this for now because I keep forgetting mock patch arg ordering
-
-
 def mocked_requests_config_post(*args, **kwargs):
     class MockResponseConfigPost(object):
 
 
     return MockResponseConfigPost(
         '{"stack_id": "dac27db1-3502-4300-b301-91c64e6a1622",'
-        '"stack_created": "false"}',
+        '"stack_created": false}',
         200)
 
 
+def mocked_requests_config_post_fail(*args, **kwargs):
+    class MockResponseConfigPost(object):
+
+        def __init__(self, json_data, status_code):
+            self.content = json_data
+            self.status_code = status_code
+
+    return MockResponseConfigPost(
+        '{"message": "ERROR: Parameter \'public_network\' is invalid: ' +
+        'Error validating value \'foo\': Unable to find network with ' +
+        'name or id \'foo\'"}',
+        400)
+
+
 def mocked_requests_config_get(*args, **kwargs):
     class MockResponseConfigGet(object):
 
 
     return MockResponseConfigGet(
         '{"stack_id": "dac27db1-3502-4300-b301-91c64e6a1622",'
-        '"stack_created": "true"}',
+        '"stack_created": true}',
         200)
 
 
+def mocked_requests_config_get_not_created(*args, **kwargs):
+    class MockResponseConfigGet(object):
+
+        def __init__(self, json_data, status_code):
+            self.content = json_data
+            self.status_code = status_code
+
+    return MockResponseConfigGet(
+        '{"stack_id": "",'
+        '"stack_created": false}',
+        200)
+
+
+def mocked_requests_config_get_no_payload(*args, **kwargs):
+    class MockResponseConfigGet(object):
+
+        def __init__(self, json_data, status_code):
+            self.content = json_data
+            self.status_code = status_code
+
+    return MockResponseConfigGet(
+        '{}',
+        200)
+
+
+def mocked_requests_initialize_post_fail(*args, **kwargs):
+    class MockResponseJobPost(object):
+
+        def __init__(self, json_data, status_code):
+            self.content = json_data
+            self.status_code = status_code
+
+    return MockResponseJobPost(
+        '{"message": "ERROR: Stack StorPerfAgentGroup does not exist"}',
+        400)
+
+
 def mocked_requests_job_get(*args, **kwargs):
     class MockResponseJobGet(object):
 
                                  "d46bfb8c-36f4-4a40-813b-c4b4a437f728"}', 200)
 
 
+def mocked_requests_job_post_fail(*args, **kwargs):
+    class MockResponseJobPost(object):
+
+        def __init__(self, json_data, status_code):
+            self.content = json_data
+            self.status_code = status_code
+
+    return MockResponseJobPost(
+        '{"message": "ERROR: Stack StorPerfAgentGroup does not exist"}',
+        400)
+
+
 def mocked_requests_job_delete(*args, **kwargs):
     class MockResponseJobDelete(object):
 
             self.json_data = json_data
             self.status_code = status_code
 
-    if args[0] == "http://172.16.0.137:5000/api/v1.0/configurations":
-        return MockResponseDeleteFailed('{"message": "Teardown failed"}', 400)
-
-    return MockResponseDeleteFailed('{}', 404)
+    return MockResponseDeleteFailed('{"message": "Teardown failed"}', 400)
 
 
 class StorPerfTestCase(unittest.TestCase):
 
         self.result = {}
 
-    @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.post',
-                side_effect=mocked_requests_config_post)
-    @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.get',
-                side_effect=mocked_requests_config_get)
-    def test_successful_setup(self, mock_post, mock_get):
+    @mock.patch.object(requests, 'post')
+    @mock.patch.object(requests, 'get')
+    def test_setup(self, mock_get, mock_post):
+        mock_post.side_effect = [mocked_requests_config_post(),
+                                 mocked_requests_job_post()]
+        mock_get.side_effect = [mocked_requests_config_get(),
+                                mocked_requests_job_get()]
+
         options = {
             "agent_count": 8,
             "public_network": 'ext-net',
 
         self.assertTrue(s.setup_done)
 
-    @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.post',
-                side_effect=mocked_requests_job_post)
-    @mock.patch('yardstick.benchmark.scenarios.storage.storperf.requests.get',
-                side_effect=mocked_requests_job_get)
-    @mock.patch(
-        'yardstick.benchmark.scenarios.storage.storperf.requests.delete',
-        side_effect=mocked_requests_job_delete)
-    def test_successful_run(self, mock_post, mock_get, mock_delete):
+    @mock.patch.object(requests, 'get')
+    def test_query_setup_state_unsuccessful(self, mock_get):
+        mock_get.side_effect = mocked_requests_config_get_not_created
+        args = {
+            "options": {}
+        }
+        s = storperf.StorPerf(args, self.ctx)
+        result = s._query_setup_state()
+        self.assertFalse(result)
+
+    @mock.patch.object(requests, 'get')
+    def test_query_setup_state_no_payload(self, mock_get):
+        mock_get.side_effect = mocked_requests_config_get_no_payload
+        args = {
+            "options": {}
+        }
+        s = storperf.StorPerf(args, self.ctx)
+        result = s._query_setup_state()
+        self.assertFalse(result)
+
+    @mock.patch.object(requests, 'post')
+    @mock.patch.object(requests, 'get')
+    def test_setup_config_post_failed(self, mock_get, mock_post):
+        mock_post.side_effect = mocked_requests_config_post_fail
+
+        args = {
+            "options": {
+                "public_network": "foo"
+            }
+        }
+
+        s = storperf.StorPerf(args, self.ctx)
+
+        self.assertRaises(RuntimeError, s.setup)
+
+    @mock.patch.object(requests, 'get')
+    @mock.patch.object(requests, 'post')
+    def test_run_v1_successful(self, mock_post, mock_get):
+        mock_post.side_effect = mocked_requests_job_post
+        mock_get.side_effect = mocked_requests_job_get
+
         options = {
             "agent_count": 8,
             "public_network": 'ext-net',
             "query_interval": 0,
             "timeout": 60
         }
+        expected_post = {
+            'metadata': {
+                'build_tag': 'latest',
+                'test_case': 'opnfv_yardstick_tc074'
+            },
+            'deadline': 60,
+            'block_sizes': 4096,
+            'queue_depths': 4,
+            "workload": "rs",
+            'agent_count': 8
+        }
+
+        args = {
+            "options": options
+        }
+
+        s = storperf.StorPerf(args, self.ctx)
+        s.setup_done = True
+
+        sample_output = '{"Status": "Completed",\
+         "_ssd_preconditioning.queue-depth.8.block-size.16384.duration": 6}'
+
+        expected_result = jsonutils.loads(sample_output)
+
+        s.run(self.result)
+
+        mock_post.assert_called_once_with(
+            'http://192.168.23.2:5000/api/v1.0/jobs',
+            json=jsonutils.loads(json.dumps(expected_post)))
+
+        self.assertEqual(self.result, expected_result)
+
+    @mock.patch.object(requests, 'get')
+    @mock.patch.object(requests, 'post')
+    def test_run_v2_successful(self, mock_post, mock_get):
+        mock_post.side_effect = mocked_requests_job_post
+        mock_get.side_effect = mocked_requests_job_get
+
+        options = {
+            "agent_count": 8,
+            "public_network": 'ext-net',
+            "volume_size": 10,
+            "block_sizes": 4096,
+            "queue_depths": 4,
+            "workloads": {
+                "read_sequential": {
+                    "rw": "rs"
+                }
+            },
+            "StorPerf_ip": "192.168.23.2",
+            "query_interval": 0,
+            "timeout": 60
+        }
+        expected_post = {
+            'metadata': {
+                'build_tag': 'latest',
+                'test_case': 'opnfv_yardstick_tc074'
+            },
+            'deadline': 60,
+            'block_sizes': 4096,
+            'queue_depths': 4,
+            'workloads': {
+                'read_sequential': {
+                    'rw': 'rs'
+                }
+            },
+            'agent_count': 8
+        }
 
         args = {
             "options": options
         expected_result = jsonutils.loads(sample_output)
 
         s.run(self.result)
+        mock_post.assert_called_once_with(
+            'http://192.168.23.2:5000/api/v2.0/jobs',
+            json=expected_post)
 
         self.assertEqual(self.result, expected_result)
 
-    @mock.patch(
-        'yardstick.benchmark.scenarios.storage.storperf.requests.delete',
-        side_effect=mocked_requests_delete)
-    def test_successful_teardown(self, mock_delete):
+    @mock.patch('time.sleep')
+    @mock.patch.object(requests, 'get')
+    @mock.patch.object(requests, 'post')
+    def test_run_failed(self, mock_post, mock_get, _):
+        mock_post.side_effect = mocked_requests_job_post_fail
+        mock_get.side_effect = mocked_requests_job_get
+
+        options = {
+            "agent_count": 8,
+            "public_network": 'ext-net',
+            "volume_size": 10,
+            "block_sizes": 4096,
+            "queue_depths": 4,
+            "workloads": {
+                "read_sequential": {
+                    "rw": "rs"
+                }
+            },
+            "StorPerf_ip": "192.168.23.2",
+            "query_interval": 0,
+            "timeout": 60
+        }
+        expected_post = {
+            'metadata': {
+                'build_tag': 'latest',
+                'test_case': 'opnfv_yardstick_tc074'
+            },
+            'deadline': 60,
+            'block_sizes': 4096,
+            'queue_depths': 4,
+            'workloads': {
+                'read_sequential': {
+                    'rw': 'rs'
+                }
+            },
+            'agent_count': 8
+        }
+
+        args = {
+            "options": options
+        }
+
+        s = storperf.StorPerf(args, self.ctx)
+        s.setup_done = True
+
+        self.assertRaises(RuntimeError, s.run, self.ctx)
+        mock_post.assert_called_once_with(
+            'http://192.168.23.2:5000/api/v2.0/jobs',
+            json=expected_post)
+
+    @mock.patch('time.sleep')
+    @mock.patch.object(requests, 'get')
+    @mock.patch.object(requests, 'post')
+    @mock.patch.object(storperf.StorPerf, 'setup')
+    def test_run_calls_setup(self, mock_setup, mock_post, mock_get, _):
+        mock_post.side_effect = mocked_requests_job_post
+        mock_get.side_effect = mocked_requests_job_get
+
+        args = {
+            "options": {
+                'timeout': 60,
+            }
+        }
+
+        s = storperf.StorPerf(args, self.ctx)
+
+        s.run(self.result)
+
+        mock_setup.assert_called_once()
+
+    @mock.patch('time.sleep')
+    @mock.patch.object(requests, 'get')
+    @mock.patch.object(requests, 'post')
+    def test_initialize_disks(self, mock_post, mock_get, _):
+        mock_post.side_effect = mocked_requests_job_post
+        mock_get.side_effect = mocked_requests_job_get
+
+        args = {
+            "options": {
+                "StorPerf_ip": "192.168.23.2"
+            }
+        }
+
+        s = storperf.StorPerf(args, self.ctx)
+
+        s.initialize_disks()
+
+        mock_post.assert_called_once_with(
+            'http://192.168.23.2:5000/api/v1.0/initializations',
+            json={})
+
+    @mock.patch('time.sleep')
+    @mock.patch.object(requests, 'get')
+    @mock.patch.object(requests, 'post')
+    def test_initialize_disks_post_failed(self, mock_post, mock_get, _):
+        mock_post.side_effect = mocked_requests_initialize_post_fail
+        mock_get.side_effect = mocked_requests_job_get
+
+        args = {
+            "options": {
+                "StorPerf_ip": "192.168.23.2"
+            }
+        }
+
+        s = storperf.StorPerf(args, self.ctx)
+
+        self.assertRaises(RuntimeError, s.initialize_disks)
+        mock_post.assert_called_once_with(
+            'http://192.168.23.2:5000/api/v1.0/initializations',
+            json={})
+
+    @mock.patch.object(requests, 'delete')
+    def test_teardown(self, mock_delete):
+        mock_delete.side_effect = mocked_requests_job_delete
         options = {
             "agent_count": 8,
             "public_network": 'ext-net',
         s.teardown()
 
         self.assertFalse(s.setup_done)
+        mock_delete.assert_called_once_with(
+            'http://192.168.23.2:5000/api/v1.0/configurations')
 
-    @mock.patch(
-        'yardstick.benchmark.scenarios.storage.storperf.requests.delete',
-        side_effect=mocked_requests_delete_failed)
-    def test_failed_teardown(self, mock_delete):
+    @mock.patch.object(requests, 'delete')
+    def test_teardown_request_delete_failed(self, mock_delete):
+        mock_delete.side_effect = mocked_requests_delete_failed
         options = {
             "agent_count": 8,
             "public_network": 'ext-net',
 
         s = storperf.StorPerf(args, self.ctx)
 
-        self.assertRaises(AssertionError, s.teardown(), self.result)
+        self.assertRaises(RuntimeError, s.teardown)
+        mock_delete.assert_called_once_with(
+            'http://192.168.23.2:5000/api/v1.0/configurations')