Parameterize Rally scenarios 85/4885/1
authorJuha Kosonen <juha.kosonen@nokia.com>
Fri, 18 Dec 2015 15:47:53 +0000 (15:47 +0000)
committerJuha Kosonen <juha.kosonen@nokia.com>
Fri, 18 Dec 2015 15:47:53 +0000 (15:47 +0000)
Applied argument manipulation approach from rally/certification.

JIRA: FUNCTEST-109

Change-Id: Ia6f08f1a7e7566c883cf7c616d32b7e0ca26b1f9
Signed-off-by: Juha Kosonen <juha.kosonen@nokia.com>
27 files changed:
testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py [new file with mode: 0755]
testcases/VIM/OpenStack/CI/rally_cert/macro/macro.yaml [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-authenticate.yaml [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-cinder.yaml [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-glance.yaml [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-heat.yaml [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-keystone.yaml [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-neutron.yaml [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-nova.yaml [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-quotas.yaml [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-requests.yaml [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-smoke.yaml [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-vm.yaml [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/support/instance_dd_test.sh [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/autoscaling_policy.yaml.template [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/default.yaml.template [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/random_strings.yaml.template [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/resource_group.yaml.template [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_ports.yaml.template [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_volume.yaml.template [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_autoscaling_policy_inplace.yaml.template [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_add.yaml.template [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_delete.yaml.template [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_replace.yaml.template [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_increase.yaml.template [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_reduce.yaml.template [new file with mode: 0644]
testcases/VIM/OpenStack/CI/rally_cert/task.yaml [new file with mode: 0644]

diff --git a/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py b/testcases/VIM/OpenStack/CI/libraries/run_rally-cert.py
new file mode 100755 (executable)
index 0000000..c6e5505
--- /dev/null
@@ -0,0 +1,301 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2015 Orange
+# guyrodrigue.koffi@orange.com
+# morgan.richomme@orange.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# 0.1 (05/2015) initial commit
+# 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
+# 0.3 (19/10/2015) remove Tempest from run_rally
+# and push result into test DB
+#
+
+import re
+import json
+import os
+import argparse
+import logging
+import yaml
+import requests
+import sys
+import novaclient.v2.client as novaclient
+
+""" tests configuration """
+tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
+         'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
+parser = argparse.ArgumentParser()
+parser.add_argument("repo_path", help="Path to the repository")
+parser.add_argument("test_name",
+                    help="Module name to be tested"
+                         "Possible values are : "
+                         "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
+                         "{d[5]} | {d[6]} | {d[7]}] "
+                         "The 'all' value "
+                         "performs all the possible tests scenarios"
+                         .format(d=tests))
+
+parser.add_argument("-d", "--debug", help="Debug mode",  action="store_true")
+parser.add_argument("-r", "--report",
+                    help="Create json result file",
+                    action="store_true")
+
+args = parser.parse_args()
+
+sys.path.append(args.repo_path + "testcases/")
+import functest_utils
+
+""" logging configuration """
+logger = logging.getLogger("run_rally")
+logger.setLevel(logging.DEBUG)
+
+ch = logging.StreamHandler()
+if args.debug:
+    ch.setLevel(logging.DEBUG)
+else:
+    ch.setLevel(logging.INFO)
+
+formatter = logging.Formatter("%(asctime)s - %(name)s - "
+                              "%(levelname)s - %(message)s")
+ch.setFormatter(formatter)
+logger.addHandler(ch)
+
+with open(args.repo_path+"testcases/config_functest.yaml") as f:
+    functest_yaml = yaml.safe_load(f)
+f.close()
+
+HOME = os.environ['HOME']+"/"
+REPO_PATH = args.repo_path
+####todo:
+#SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
+#    get("directories").get("dir_rally_scn")
+SCENARIOS_DIR = REPO_PATH + "testcases/VIM/OpenStack/CI/rally_cert/"
+###
+TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates"
+SUPPORT_DIR = SCENARIOS_DIR + "scenario/support"
+###todo:
+FLAVOR_NAME = "m1.tiny"
+USERS_AMOUNT = 2
+TENANTS_AMOUNT = 3
+CONTROLLERS_AMOUNT = 2
+###
+RESULTS_DIR = functest_yaml.get("general").get("directories"). \
+    get("dir_rally_res")
+TEST_DB = functest_yaml.get("results").get("test_db_url")
+FLOATING_NETWORK = functest_yaml.get("general"). \
+    get("openstack").get("neutron_public_net_name")
+
+GLANCE_IMAGE_NAME = functest_yaml.get("general"). \
+    get("openstack").get("image_name")
+GLANCE_IMAGE_FILENAME = functest_yaml.get("general"). \
+    get("openstack").get("image_file_name")
+GLANCE_IMAGE_FORMAT = functest_yaml.get("general"). \
+    get("openstack").get("image_disk_format")
+GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
+    get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
+GLANCE_IMAGE_LOCATION = "http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img"
+
+
+def push_results_to_db(payload):
+
+    url = TEST_DB + "/results"
+    installer = functest_utils.get_installer_type(logger)
+    git_version = functest_utils.get_git_branch(args.repo_path)
+    pod_name = functest_utils.get_pod_name(logger)
+    # TODO pod_name hardcoded, info shall come from Jenkins
+    params = {"project_name": "functest", "case_name": "Rally",
+              "pod_name": pod_name, "installer": installer,
+              "version": git_version, "details": payload}
+
+    headers = {'Content-Type': 'application/json'}
+    r = requests.post(url, data=json.dumps(params), headers=headers)
+    logger.debug(r)
+
+
+def get_task_id(cmd_raw):
+    """
+    get task id from command rally result
+    :param cmd_raw:
+    :return: task_id as string
+    """
+    taskid_re = re.compile('^Task +(.*): started$')
+    for line in cmd_raw.splitlines(True):
+        line = line.strip()
+        match = taskid_re.match(line)
+        if match:
+            return match.group(1)
+    return None
+
+
+def create_glance_image(path, name, disk_format):
+    """
+    Create a glance image given the absolute path of the image, its name and the disk format
+    """
+    cmd = ("glance image-create --name " + name + "  --visibility public "
+           "--disk-format " + disk_format + " --container-format bare --file " + path)
+    functest_utils.execute_command(cmd, logger)
+    return True
+
+
+def task_succeed(json_raw):
+    """
+    Parse JSON from rally JSON results
+    :param json_raw:
+    :return: Bool
+    """
+    rally_report = json.loads(json_raw)
+    rally_report = rally_report[0]
+    if rally_report is None:
+        return False
+    if rally_report.get('result') is None:
+        return False
+
+    for result in rally_report.get('result'):
+        if len(result.get('error')) > 0:
+            return False
+
+    return True
+
+
+def build_task_args(test_file_name):
+    task_args = {'service_list': [test_file_name]}
+    task_args['smoke'] = False 
+    task_args['image_name'] = GLANCE_IMAGE_NAME
+    task_args['flavor_name'] = FLAVOR_NAME
+    task_args['glance_image_location'] = GLANCE_IMAGE_LOCATION
+    task_args['floating_network'] = FLOATING_NETWORK
+    task_args['tmpl_dir'] = TEMPLATE_DIR
+    task_args['sup_dir'] = SUPPORT_DIR
+    task_args['users_amount'] = USERS_AMOUNT
+    task_args['tenants_amount'] = TENANTS_AMOUNT
+    task_args['controllers_amount'] = CONTROLLERS_AMOUNT
+
+    return task_args
+
+
+def run_task(test_name):
+    #
+    # the "main" function of the script who launch rally for a task
+    # :param test_name: name for the rally test
+    # :return: void
+    #
+
+    logger.info('starting {} test ...'.format(test_name))
+
+    task_file = '{}task.yaml'.format(SCENARIOS_DIR)
+    if not os.path.exists(task_file):
+        logger.error("Task file '%s' does not exist." % task_file)
+        exit(-1)
+
+    test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/", test_name)
+    if not os.path.exists(test_file_name):
+        logger.error("The scenario '%s' does not exist." % test_file_name)
+        exit(-1)
+
+    logger.debug('Scenario fetched from : {}'.format(test_file_name))
+
+    cmd_line = "rally task start --abort-on-sla-failure " + \
+               "--task {} ".format(task_file) + \
+               "--task-args \"{}\" ".format(build_task_args(test_name))
+    logger.debug('running command line : {}'.format(cmd_line))
+    cmd = os.popen(cmd_line)
+    task_id = get_task_id(cmd.read())
+    logger.debug('task_id : {}'.format(task_id))
+
+    if task_id is None:
+        logger.error("failed to retrieve task_id")
+        exit(-1)
+
+    # check for result directory and create it otherwise
+    if not os.path.exists(RESULTS_DIR):
+        logger.debug('does not exists, we create it'.format(RESULTS_DIR))
+        os.makedirs(RESULTS_DIR)
+
+    # write html report file
+    report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
+    cmd_line = "rally task report {} --out {}".format(task_id,
+                                                      report_file_name)
+
+    logger.debug('running command line : {}'.format(cmd_line))
+    os.popen(cmd_line)
+
+    # get and save rally operation JSON result
+    cmd_line = "rally task results %s" % task_id
+    logger.debug('running command line : {}'.format(cmd_line))
+    cmd = os.popen(cmd_line)
+    json_results = cmd.read()
+    with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
+        logger.debug('saving json file')
+        f.write(json_results)
+
+    with open('{}opnfv-{}.json'
+              .format(RESULTS_DIR, test_name)) as json_file:
+        json_data = json.load(json_file)
+
+    # Push results in payload of testcase
+    if args.report:
+        logger.debug("Push result into DB")
+        push_results_to_db(json_data)
+
+    """ parse JSON operation result """
+    if task_succeed(json_results):
+        print 'Test OK'
+    else:
+        print 'Test KO'
+
+
+def delete_glance_image(name):
+    cmd = ("glance image-delete $(glance image-list | grep %s "
+           "| awk '{print $2}' | head -1)" % name)
+    functest_utils.execute_command(cmd, logger)
+    return True
+
+
+def cleanup(nova):
+    logger.info("Cleaning up...")
+    logger.debug("Deleting image...")
+    delete_glance_image(GLANCE_IMAGE_NAME)
+    return True
+
+
+def main():
+    # configure script
+    if not (args.test_name in tests):
+        logger.error('argument not valid')
+        exit(-1)
+
+    creds_nova = functest_utils.get_credentials("nova")
+    nova_client = novaclient.Client(**creds_nova)
+
+    logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, GLANCE_IMAGE_PATH))
+    create_glance_image(GLANCE_IMAGE_PATH, GLANCE_IMAGE_NAME, GLANCE_IMAGE_FORMAT)
+
+
+    # Check if the given image exists
+    try:
+        nova_client.images.find(name=GLANCE_IMAGE_NAME)
+        logger.info("Glance image found '%s'" % GLANCE_IMAGE_NAME)
+    except:
+        logger.error("ERROR: Glance image '%s' not found." % GLANCE_IMAGE_NAME)
+        logger.info("Available images are: ")
+        exit(-1)
+
+    if args.test_name == "all":
+        for test_name in tests:
+            if not (test_name == 'all' or
+                    test_name == 'heat' or
+                    test_name == 'smoke' or
+                    test_name == 'vm'):
+                print(test_name)
+                run_task(test_name)
+    else:
+        print(args.test_name)
+        run_task(args.test_name)
+
+    cleanup(nova_client)
+
+if __name__ == '__main__':
+    main()
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/macro/macro.yaml b/testcases/VIM/OpenStack/CI/rally_cert/macro/macro.yaml
new file mode 100644 (file)
index 0000000..e6542f3
--- /dev/null
@@ -0,0 +1,95 @@
+{%- macro user_context(tenants,users_per_tenant, use_existing_users) -%}
+{%- if use_existing_users and caller is not defined -%} {}
+{%- else %}
+  {%- if not use_existing_users %}
+        users:
+          tenants: {{ tenants }}
+          users_per_tenant: {{ users_per_tenant }}
+  {%- endif %}
+  {%- if caller is defined %}
+    {{ caller() }}
+  {%- endif %}
+{%- endif %}
+{%- endmacro %}
+
+{%- macro vm_params(image=none, flavor=none, size=none) %}
+{%- if flavor is not none %}
+        flavor:
+          name: {{ flavor }}
+{%- endif %}
+{%- if image is not none %}
+        image:
+          name: {{ image }}
+{%- endif %}
+{%- if size is not none %}
+        size: {{ size }}
+{%- endif %}
+{%- endmacro %}
+
+{%- macro unlimited_volumes() %}
+          cinder:
+            gigabytes: -1
+            snapshots: -1
+            volumes: -1
+{%- endmacro %}
+
+{%- macro constant_runner(concurrency=1, times=1, is_smoke=True) %}
+        type: "constant"
+  {%- if is_smoke %}
+        concurrency: 1
+        times: 1
+  {%- else %}
+        concurrency: {{ concurrency }}
+        times: {{ times }}
+  {%- endif %}
+{%- endmacro %}
+
+{%- macro rps_runner(rps=1, times=1, is_smoke=True) %}
+        type: rps
+  {%- if is_smoke %}
+        rps: 1
+        times: 1
+  {%- else %}
+        rps: {{ rps }}
+        times: {{ times }}
+  {%- endif %}
+{%- endmacro %}
+
+{%- macro no_failures_sla() %}
+        failure_rate:
+          max: 0
+{%- endmacro %}
+
+{%- macro volumes(size=1, volumes_per_tenant=1) %}
+        volumes:
+          size: {{ size }}
+          volumes_per_tenant: {{ volumes_per_tenant }}
+{%- endmacro %}
+
+{%- macro unlimited_nova(keypairs=false) %}
+          nova:
+            cores: -1
+            floating_ips: -1
+            instances: -1
+            {%- if keypairs %}
+            key_pairs: -1
+            {%- endif %}
+            ram: -1
+            security_group_rules: -1
+            security_groups: -1
+{%- endmacro %}
+
+{%- macro unlimited_neutron() %}
+{% if "neutron" in service_list %}
+          neutron:
+            network: -1
+            port: -1
+            subnet: -1
+{% endif %}
+{%- endmacro %}
+
+{%- macro glance_args(location, container="bare", type="qcow2") %}
+        container_format: {{ container }}
+        disk_format: {{ type }}
+        image_location: {{ location }}
+{%- endmacro %}
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-authenticate.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-authenticate.yaml
new file mode 100644 (file)
index 0000000..8d7f0e7
--- /dev/null
@@ -0,0 +1,63 @@
+  Authenticate.keystone:
+    -
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  Authenticate.validate_cinder:
+    -
+      args:
+        repetitions: 2
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  Authenticate.validate_glance:
+    -
+      args:
+        repetitions: 2
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  Authenticate.validate_heat:
+    -
+      args:
+        repetitions: 2
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  Authenticate.validate_neutron:
+    -
+      args:
+        repetitions: 2
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  Authenticate.validate_nova:
+    -
+      args:
+        repetitions: 2
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-cinder.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-cinder.yaml
new file mode 100644 (file)
index 0000000..3c64e26
--- /dev/null
@@ -0,0 +1,250 @@
+  CinderVolumes.create_and_attach_volume:
+    -
+      args:
+        {{ vm_params(image_name,flavor_name,1) }}
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          {{ unlimited_volumes() }}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  CinderVolumes.create_and_delete_snapshot:
+    -
+      args:
+        force: false
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          {{ unlimited_volumes() }}
+        {{ volumes() }}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  CinderVolumes.create_and_delete_volume:
+    -
+      args:
+        size:
+          max: 1
+          min: 1
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          {{ unlimited_volumes() }}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+    -
+      args:
+        {{ vm_params(image_name,none,1) }}
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          {{ unlimited_volumes() }}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+    -
+      args:
+        size: 1
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          {{ unlimited_volumes() }}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  CinderVolumes.create_and_extend_volume:
+    -
+      args:
+        new_size: 2
+        size: 1
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          {{ unlimited_volumes() }}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  CinderVolumes.create_and_list_snapshots:
+    -
+      args:
+        detailed: true
+        force: false
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          {{ unlimited_volumes() }}
+        {{ volumes() }}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  CinderVolumes.create_and_list_volume:
+    -
+      args:
+        detailed: true
+        {{ vm_params(image_name,none,1) }}
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          {{ unlimited_volumes() }}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+    -
+      args:
+        detailed: true
+        size: 1
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          {{ unlimited_volumes() }}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  CinderVolumes.create_and_upload_volume_to_image:
+    -
+      args:
+        container_format: "bare"
+        disk_format: "raw"
+        do_delete: true
+        force: false
+        size: 1
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          {{ unlimited_volumes() }}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  CinderVolumes.create_from_volume_and_delete_volume:
+    -
+      args:
+        size: 1
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          {{ unlimited_volumes() }}
+        {{ volumes() }}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  CinderVolumes.create_nested_snapshots_and_attach_volume:
+    -
+      args:
+        nested_level:
+          max: 1
+          min: 1
+        size:
+          max: 1
+          min: 1
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          {{ unlimited_volumes() }}
+        servers:
+          {{ vm_params(image_name,flavor_name,none)|indent(2,true) }}
+          servers_per_tenant: 1
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  CinderVolumes.create_snapshot_and_attach_volume:
+    -
+      args:
+        volume_type: false
+        size:
+          min: 1
+          max: 5
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        servers:
+          {{ vm_params(image_name,flavor_name,none)|indent(2,true) }}
+          servers_per_tenant: 2
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+    -
+      args:
+          volume_type: true
+          size:
+              min: 1
+              max: 5
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        servers:
+          {{ vm_params(image_name,flavor_name,none)|indent(2,true) }}
+          servers_per_tenant: 2
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  CinderVolumes.create_volume:
+    -
+      args:
+        size: 1
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+    -
+      args:
+        size:
+          min: 1
+          max: 5
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  CinderVolumes.list_volumes:
+    -
+      args:
+        detailed: True
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+        volumes:
+          size: 1
+          volumes_per_tenant: 4
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-glance.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-glance.yaml
new file mode 100644 (file)
index 0000000..c9cf9a0
--- /dev/null
@@ -0,0 +1,44 @@
+  GlanceImages.create_and_delete_image:
+    -
+      args:
+        {{ glance_args(location=glance_image_location) }}
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  GlanceImages.create_and_list_image:
+    -
+      args:
+        {{ glance_args(location=glance_image_location) }}
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  GlanceImages.list_images:
+    -
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  GlanceImages.create_image_and_boot_instances:
+    -
+      args:
+        {{ glance_args(location=glance_image_location) }}
+        flavor:
+            name: "{{flavor_name}}"
+        number_instances: 2
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-heat.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-heat.yaml
new file mode 100644 (file)
index 0000000..3689cc4
--- /dev/null
@@ -0,0 +1,132 @@
+  HeatStacks.create_and_delete_stack:
+    -
+      args:
+        template_path: "{{ tmpl_dir }}/default.yaml.template"
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+    -
+      args:
+        template_path: "{{ tmpl_dir }}/server_with_ports.yaml.template"
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+    -
+      args:
+        template_path: "{{ tmpl_dir }}/server_with_volume.yaml.template"
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  HeatStacks.create_and_list_stack:
+    -
+      args:
+        template_path: "{{ tmpl_dir }}/default.yaml.template"
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  HeatStacks.create_check_delete_stack:
+    -
+      args:
+        template_path: "{{ tmpl_dir }}/random_strings.yaml.template"
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  HeatStacks.create_suspend_resume_delete_stack:
+    -
+      args:
+        template_path: "{{ tmpl_dir }}/random_strings.yaml.template"
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  HeatStacks.create_update_delete_stack:
+    -
+      args:
+        template_path: "{{ tmpl_dir }}/random_strings.yaml.template"
+        updated_template_path: "{{ tmpl_dir }}/updated_random_strings_add.yaml.template"
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+    -
+      args:
+        template_path: "{{ tmpl_dir }}/random_strings.yaml.template"
+        updated_template_path: "{{ tmpl_dir }}/updated_random_strings_delete.yaml.template"
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+    -
+      args:
+        template_path: "{{ tmpl_dir }}/resource_group.yaml.template"
+        updated_template_path: "{{ tmpl_dir }}/updated_resource_group_increase.yaml.template"
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+    -
+      args:
+        template_path: "{{ tmpl_dir }}/autoscaling_policy.yaml.template"
+        updated_template_path: "{{ tmpl_dir }}/updated_autoscaling_policy_inplace.yaml.template"
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+    -
+      args:
+        template_path: "{{ tmpl_dir }}/resource_group.yaml.template"
+        updated_template_path: "{{ tmpl_dir }}/updated_resource_group_reduce.yaml.template"
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+    -
+      args:
+        template_path: "{{ tmpl_dir }}/random_strings.yaml.template"
+        updated_template_path: "{{ tmpl_dir }}/updated_random_strings_replace.yaml.template"
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  HeatStacks.list_stacks_and_resources:
+    -
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
\ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-keystone.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-keystone.yaml
new file mode 100644 (file)
index 0000000..a6a637f
--- /dev/null
@@ -0,0 +1,103 @@
+  KeystoneBasic.add_and_remove_user_role:
+    -
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  KeystoneBasic.create_add_and_list_user_roles:
+    -
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  KeystoneBasic.create_and_list_tenants:
+    -
+      args:
+        name_length: 10
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  KeystoneBasic.create_and_delete_role:
+    -
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  KeystoneBasic.create_and_delete_service:
+    -
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  KeystoneBasic.get_entities:
+    -
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  KeystoneBasic.create_update_and_delete_tenant:
+    -
+      args:
+        name_length: 10
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  KeystoneBasic.create_user:
+    -
+      args:
+        name_length: 10
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  KeystoneBasic.create_tenant:
+    -
+      args:
+        name_length: 10
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  KeystoneBasic.create_and_list_users:
+    -
+      args:
+        name_length: 10
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  KeystoneBasic.create_tenant_with_users:
+    -
+      args:
+        name_length: 10
+        users_per_tenant: 10
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-neutron.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-neutron.yaml
new file mode 100644 (file)
index 0000000..4f4a633
--- /dev/null
@@ -0,0 +1,245 @@
+  NeutronNetworks.create_and_delete_networks:
+    -
+      args:
+        network_create_args: {}
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          neutron:
+            network: -1
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NeutronNetworks.create_and_delete_ports:
+    -
+      args:
+        network_create_args: {}
+        port_create_args: {}
+        ports_per_network: 1
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          neutron:
+            network: -1
+            port: -1
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NeutronNetworks.create_and_delete_routers:
+    -
+      args:
+        network_create_args: {}
+        router_create_args: {}
+        subnet_cidr_start: "1.1.0.0/30"
+        subnet_create_args: {}
+        subnets_per_network: 1
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          neutron:
+            network: -1
+            subnet: -1
+            port: -1
+            router: -1
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NeutronNetworks.create_and_delete_subnets:
+    -
+      args:
+        network_create_args: {}
+        subnet_cidr_start: "1.1.0.0/30"
+        subnet_create_args: {}
+        subnets_per_network: 1
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          neutron:
+            network: -1
+            subnet: -1
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NeutronNetworks.create_and_list_networks:
+    -
+      args:
+        network_create_args: {}
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          neutron:
+            network: -1
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NeutronNetworks.create_and_list_ports:
+    -
+      args:
+        network_create_args: {}
+        port_create_args: {}
+        ports_per_network: 1
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          neutron:
+            network: -1
+            port: -1
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NeutronNetworks.create_and_list_routers:
+    -
+      args:
+        network_create_args: {}
+        router_create_args: {}
+        subnet_cidr_start: "1.1.0.0/30"
+        subnet_create_args: {}
+        subnets_per_network: 1
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          neutron:
+            network: -1
+            subnet: -1
+            router: -1
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NeutronNetworks.create_and_list_subnets:
+    -
+      args:
+        network_create_args: {}
+        subnet_cidr_start: "1.1.0.0/30"
+        subnet_create_args: {}
+        subnets_per_network: 1
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          neutron:
+            network: -1
+            subnet: -1
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NeutronNetworks.create_and_update_networks:
+    -
+      args:
+        network_create_args: {}
+        network_update_args:
+          admin_state_up: false
+          name: "_updated"
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          neutron:
+            network: -1
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NeutronNetworks.create_and_update_ports:
+    -
+      args:
+        network_create_args: {}
+        port_create_args: {}
+        port_update_args:
+          admin_state_up: false
+          device_id: "dummy_id"
+          device_owner: "dummy_owner"
+          name: "_port_updated"
+        ports_per_network: 1
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          neutron:
+            network: -1
+            port: -1
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NeutronNetworks.create_and_update_routers:
+    -
+      args:
+        network_create_args: {}
+        router_create_args: {}
+        router_update_args:
+          admin_state_up: false
+          name: "_router_updated"
+        subnet_cidr_start: "1.1.0.0/30"
+        subnet_create_args: {}
+        subnets_per_network: 1
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          neutron:
+            network: -1
+            subnet: -1
+            port: -1
+            router: -1
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NeutronNetworks.create_and_update_subnets:
+    -
+      args:
+        network_create_args: {}
+        subnet_cidr_start: "1.4.0.0/16"
+        subnet_create_args: {}
+        subnet_update_args:
+          enable_dhcp: false
+          name: "_subnet_updated"
+        subnets_per_network: 1
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          neutron:
+            network: -1
+            subnet: -1
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  Quotas.neutron_update:
+    -
+      args:
+        max_quota: 1024
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
\ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-nova.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-nova.yaml
new file mode 100644 (file)
index 0000000..76a3375
--- /dev/null
@@ -0,0 +1,324 @@
+  NovaKeypair.boot_and_delete_server_with_keypair:
+    -
+      args:
+        {{ vm_params(image_name, flavor_name) }}
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        network:
+          networks_per_tenant: 1
+          start_cidr: "100.1.0.0/25"
+        quotas:
+          {{ unlimited_neutron() }}
+          {{ unlimited_nova(keypairs=true) }}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NovaKeypair.create_and_delete_keypair:
+    -
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          {{ unlimited_nova(keypairs=true) }}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NovaKeypair.create_and_list_keypairs:
+    -
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        quotas:
+          {{ unlimited_nova(keypairs=true) }}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NovaServers.boot_and_bounce_server:
+    -
+      args:
+        actions:
+          -
+            hard_reboot: 1
+          -
+            soft_reboot: 1
+          -
+            stop_start: 1
+          -
+            rescue_unrescue: 1
+        {{ vm_params(image_name, flavor_name) }}
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        network:
+          networks_per_tenant: 1
+          start_cidr: "100.1.0.0/25"
+        quotas:
+          {{ unlimited_neutron() }}
+          {{ unlimited_nova() }}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NovaServers.boot_and_delete_server:
+    -
+      args:
+        {{ vm_params(image_name, flavor_name) }}
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        network:
+          networks_per_tenant: 1
+          start_cidr: "100.1.0.0/25"
+        quotas:
+          {{ unlimited_neutron() }}
+          {{ unlimited_nova() }}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NovaServers.boot_and_list_server:
+    -
+      args:
+        detailed: true
+        {{ vm_params(image_name, flavor_name) }}
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        network:
+          networks_per_tenant: 1
+          start_cidr: "100.1.0.0/25"
+        quotas:
+          {{ unlimited_neutron() }}
+          {{ unlimited_nova() }}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NovaServers.boot_and_rebuild_server:
+    -
+      args:
+        {{ vm_params(flavor=flavor_name) }}
+        from_image:
+          name: {{ image_name }}
+        to_image:
+          name: {{ image_name }}
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        network:
+          networks_per_tenant: 1
+          start_cidr: "100.1.0.0/25"
+        quotas:
+          {{ unlimited_neutron() }}
+          {{ unlimited_nova() }}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NovaServers.boot_server_from_volume_and_delete:
+    -
+      args:
+        {{ vm_params(image_name, flavor_name) }}
+        volume_size: 5
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        network:
+          networks_per_tenant: 1
+          start_cidr: "100.1.0.0/25"
+        quotas:
+          {{ unlimited_volumes() }}
+          {{ unlimited_neutron() }}
+          {{ unlimited_nova() }}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NovaServers.pause_and_unpause_server:
+    -
+      args:
+        {{ vm_params(image_name, flavor_name) }}
+        force_delete: false
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        network:
+          networks_per_tenant: 1
+          start_cidr: "100.1.0.0/25"
+        quotas:
+          {{ unlimited_neutron() }}
+          {{ unlimited_nova() }}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NovaServers.snapshot_server:
+    -
+      args:
+        {{ vm_params(image_name, flavor_name) }}
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        network:
+          networks_per_tenant: 1
+          start_cidr: "100.1.0.0/25"
+        quotas:
+          {{ unlimited_neutron() }}
+          {{ unlimited_nova() }}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NovaSecGroup.boot_and_delete_server_with_secgroups:
+    -
+      args:
+        {{ vm_params(image_name, flavor_name) }}
+        security_group_count: 10
+        rules_per_security_group: 10
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        network:
+          start_cidr: "100.1.0.0/25"
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NovaServers.boot_and_live_migrate_server:
+    - args:
+        {{ vm_params(image_name, flavor_name) }}
+        block_migration: false
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NovaServers.boot_and_migrate_server:
+    - args:
+        {{ vm_params(image_name, flavor_name) }}
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NovaServers.boot_server_from_volume:
+    -
+      args:
+        {{ vm_params(image_name, flavor_name) }}
+        volume_size: 10
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NovaServers.boot_server:
+    -
+      args:
+        {{ vm_params(image_name, flavor_name) }}
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NovaServers.boot_server_attach_created_volume_and_live_migrate:
+    -
+      args:
+        {{ vm_params(image_name, flavor_name) }}
+        size: 10
+        block_migration: false
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NovaServers.boot_server_from_volume_and_live_migrate:
+    - args:
+        {{ vm_params(image_name, flavor_name) }}
+        block_migration: false
+        volume_size: 10
+        force_delete: false
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NovaSecGroup.create_and_delete_secgroups:
+    -
+      args:
+        security_group_count: 10
+        rules_per_security_group: 10
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NovaSecGroup.create_and_list_secgroups:
+    -
+      args:
+        security_group_count: 10
+        rules_per_security_group: 10
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NovaServers.list_servers:
+    -
+      args:
+        detailed: True
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        servers:
+          {{ vm_params(image_name,flavor_name,none)|indent(2,true) }}
+          servers_per_tenant: 2
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  NovaServers.resize_server:
+    -
+      args:
+        {{ vm_params(image_name, flavor_name) }}
+        to_flavor:
+          name: "m1.small"
+        confirm: true
+        force_delete: false
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-quotas.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-quotas.yaml
new file mode 100644 (file)
index 0000000..66fd203
--- /dev/null
@@ -0,0 +1,54 @@
+  Quotas.cinder_update_and_delete:
+    -
+      args:
+        max_quota: 1024
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  Quotas.cinder_update:
+    -
+      args:
+        max_quota: 1024
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  Quotas.neutron_update:
+    -
+      args:
+        max_quota: 1024
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  Quotas.nova_update_and_delete:
+    -
+      args:
+        max_quota: 1024
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  Quotas.nova_update:
+    -
+      args:
+        max_quota: 1024
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount,times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-requests.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-requests.yaml
new file mode 100644 (file)
index 0000000..b7d2033
--- /dev/null
@@ -0,0 +1,28 @@
+  HttpRequests.check_random_request:
+    -
+      args:
+        requests:
+          -
+            url: "http://www.example.com"
+            method: "GET"
+            status_code: 200
+          -
+            url: "http://www.openstack.org"
+            method: "GET"
+        status_code: 200
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  HttpRequests.check_request:
+    -
+      args:
+        url: "http://www.example.com"
+        method: "GET"
+        status_code: 200
+        allow_redirects: False
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-smoke.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-smoke.yaml
new file mode 100644 (file)
index 0000000..f102edb
--- /dev/null
@@ -0,0 +1,268 @@
+  TempestScenario.list_of_tests:
+  -
+    args:
+        tempest_conf: /etc/tempest/tempest.conf
+        test_names:
+        - tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_get_flavor
+       - tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors
+       - tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON.test_list_flavors_with_detail
+       - tempest.api.compute.images.test_images_oneserver.ImagesOneServerTestJSON.test_create_delete_image
+       - tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_get_image
+       - tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images
+       - tempest.api.compute.images.test_list_images.ListImagesTestJSON.test_list_images_with_detail
+       - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_create
+       - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_create_with_optional_cidr
+       - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_create_with_optional_group_id
+       - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_delete_when_peer_group_deleted
+       - tempest.api.compute.security_groups.test_security_group_rules.SecurityGroupRulesTestJSON.test_security_group_rules_list
+       - tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_security_group_create_get_delete
+       - tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_security_groups_create_list_delete
+       - tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_server_security_groups
+       - tempest.api.compute.security_groups.test_security_groups.SecurityGroupsTestJSON.test_update_security_groups
+       - tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_add_remove_fixed_ip
+       - tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_create_list_show_delete_interfaces
+       - tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers
+       - tempest.api.compute.servers.test_create_server.ServersTestJSON.test_list_servers_with_detail
+       - tempest.api.compute.servers.test_create_server.ServersTestJSON.test_verify_server_details
+       - tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers
+       - tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_list_servers_with_detail
+       - tempest.api.compute.servers.test_create_server.ServersTestManualDisk.test_verify_server_details
+       - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_hard
+       - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_reboot_server_soft
+       - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_rebuild_server
+       - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm
+       - tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON.test_resize_server_confirm_from_stopped
+       - tempest.api.compute.servers.test_server_addresses.ServerAddressesTestJSON.test_list_server_addresses
+       - tempest.api.compute.servers.test_server_addresses.ServerAddressesTestJSON.test_list_server_addresses_by_network
+       - tempest.api.compute.servers.test_server_rescue.ServerRescueTestJSON.test_rescue_unrescue_instance
+       - tempest.api.compute.test_quotas.QuotasTestJSON.test_compare_tenant_quotas_with_default_quotas
+       - tempest.api.compute.test_quotas.QuotasTestJSON.test_get_default_quotas
+       - tempest.api.compute.test_quotas.QuotasTestJSON.test_get_quotas
+       - tempest.api.compute.volumes.test_volumes_get.VolumesGetTestJSON.test_volume_create_get_delete
+       - tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest.test_cluster_template_create
+       - tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest.test_cluster_template_delete
+       - tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest.test_cluster_template_get
+       - tempest.api.data_processing.test_cluster_templates.ClusterTemplateTest.test_cluster_template_list
+       - tempest.api.data_processing.test_data_sources.DataSourceTest.test_external_hdfs_data_source_create
+       - tempest.api.data_processing.test_data_sources.DataSourceTest.test_external_hdfs_data_source_delete
+       - tempest.api.data_processing.test_data_sources.DataSourceTest.test_external_hdfs_data_source_get
+       - tempest.api.data_processing.test_data_sources.DataSourceTest.test_external_hdfs_data_source_list
+       - tempest.api.data_processing.test_data_sources.DataSourceTest.test_local_hdfs_data_source_create
+       - tempest.api.data_processing.test_data_sources.DataSourceTest.test_local_hdfs_data_source_delete
+       - tempest.api.data_processing.test_data_sources.DataSourceTest.test_local_hdfs_data_source_get
+       - tempest.api.data_processing.test_data_sources.DataSourceTest.test_local_hdfs_data_source_list
+       - tempest.api.data_processing.test_data_sources.DataSourceTest.test_swift_data_source_create
+       - tempest.api.data_processing.test_data_sources.DataSourceTest.test_swift_data_source_delete
+       - tempest.api.data_processing.test_data_sources.DataSourceTest.test_swift_data_source_get
+       - tempest.api.data_processing.test_data_sources.DataSourceTest.test_swift_data_source_list
+       - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_internal_db_job_binary_create
+       - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_internal_db_job_binary_delete
+       - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_internal_db_job_binary_get
+       - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_internal_db_job_binary_list
+       - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_job_binary_get_data
+       - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_swift_job_binary_create
+       - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_swift_job_binary_delete
+       - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_swift_job_binary_get
+       - tempest.api.data_processing.test_job_binaries.JobBinaryTest.test_swift_job_binary_list
+       - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_create
+       - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_delete
+       - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_get
+       - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_get_data
+       - tempest.api.data_processing.test_job_binary_internals.JobBinaryInternalTest.test_job_binary_internal_list
+       - tempest.api.data_processing.test_jobs.JobTest.test_job_create
+       - tempest.api.data_processing.test_jobs.JobTest.test_job_delete
+       - tempest.api.data_processing.test_jobs.JobTest.test_job_get
+       - tempest.api.data_processing.test_jobs.JobTest.test_job_list
+       - tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest.test_node_group_template_create
+       - tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest.test_node_group_template_delete
+       - tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest.test_node_group_template_get
+       - tempest.api.data_processing.test_node_group_templates.NodeGroupTemplateTest.test_node_group_template_list
+       - tempest.api.data_processing.test_plugins.PluginsTest.test_plugin_get
+       - tempest.api.data_processing.test_plugins.PluginsTest.test_plugin_list
+       - tempest.api.database.flavors.test_flavors.DatabaseFlavorsTest.test_compare_db_flavors_with_os
+       - tempest.api.database.flavors.test_flavors.DatabaseFlavorsTest.test_get_db_flavor
+       - tempest.api.database.flavors.test_flavors.DatabaseFlavorsTest.test_list_db_flavors
+       - tempest.api.database.limits.test_limits.DatabaseLimitsTest.test_absolute_limits
+       - tempest.api.database.versions.test_versions.DatabaseVersionsTest.test_list_db_versions
+       - tempest.api.identity.admin.v2.test_services.ServicesTestJSON.test_list_services
+       - tempest.api.identity.admin.v2.test_users.UsersTestJSON.test_create_user
+       - tempest.api.identity.admin.v3.test_credentials.CredentialsTestJSON.test_credentials_create_get_update_delete
+       - tempest.api.identity.admin.v3.test_domains.DomainsTestJSON.test_create_update_delete_domain
+       - tempest.api.identity.admin.v3.test_endpoints.EndPointsTestJSON.test_update_endpoint
+       - tempest.api.identity.admin.v3.test_groups.GroupsV3TestJSON.test_group_users_add_list_delete
+       - tempest.api.identity.admin.v3.test_policies.PoliciesTestJSON.test_create_update_delete_policy
+       - tempest.api.identity.admin.v3.test_regions.RegionsTestJSON.test_create_region_with_specific_id
+       - tempest.api.identity.admin.v3.test_roles.RolesV3TestJSON.test_role_create_update_get_list
+       - tempest.api.identity.admin.v3.test_services.ServicesTestJSON.test_create_update_get_service
+       - tempest.api.identity.admin.v3.test_trusts.TrustsV3TestJSON.test_get_trusts_all
+       - tempest.api.messaging.test_claims.TestClaims.test_post_claim
+       - tempest.api.messaging.test_claims.TestClaims.test_query_claim
+       - tempest.api.messaging.test_claims.TestClaims.test_release_claim
+       - tempest.api.messaging.test_claims.TestClaims.test_update_claim
+       - tempest.api.messaging.test_messages.TestMessages.test_delete_multiple_messages
+       - tempest.api.messaging.test_messages.TestMessages.test_delete_single_message
+       - tempest.api.messaging.test_messages.TestMessages.test_get_message
+       - tempest.api.messaging.test_messages.TestMessages.test_get_multiple_messages
+       - tempest.api.messaging.test_messages.TestMessages.test_list_messages
+       - tempest.api.messaging.test_messages.TestMessages.test_post_messages
+       - tempest.api.messaging.test_queues.TestManageQueue.test_check_queue_existence
+       - tempest.api.messaging.test_queues.TestManageQueue.test_check_queue_head
+       - tempest.api.messaging.test_queues.TestManageQueue.test_get_queue_stats
+       - tempest.api.messaging.test_queues.TestManageQueue.test_list_queues
+       - tempest.api.messaging.test_queues.TestManageQueue.test_set_and_get_queue_metadata
+       - tempest.api.messaging.test_queues.TestQueues.test_create_delete_queue
+       - tempest.api.network.test_extensions.ExtensionsTestJSON.test_list_show_extensions
+       - tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_create_floating_ip_specifying_a_fixed_ip_address
+       - tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_create_list_show_update_delete_floating_ip
+       - tempest.api.network.test_networks.BulkNetworkOpsIpV6TestJSON.test_bulk_create_delete_network
+       - tempest.api.network.test_networks.BulkNetworkOpsIpV6TestJSON.test_bulk_create_delete_port
+       - tempest.api.network.test_networks.BulkNetworkOpsIpV6TestJSON.test_bulk_create_delete_subnet
+       - tempest.api.network.test_networks.BulkNetworkOpsTestJSON.test_bulk_create_delete_network
+       - tempest.api.network.test_networks.BulkNetworkOpsTestJSON.test_bulk_create_delete_port
+       - tempest.api.network.test_networks.BulkNetworkOpsTestJSON.test_bulk_create_delete_subnet
+       - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_update_delete_network_subnet
+       - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_external_network_visibility
+       - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_networks
+       - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_subnets
+       - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_network
+       - tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_subnet
+       - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_create_update_delete_network_subnet
+       - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_external_network_visibility
+       - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_list_networks
+       - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_list_subnets
+       - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_show_network
+       - tempest.api.network.test_networks.NetworksIpV6TestJSON.test_show_subnet
+       - tempest.api.network.test_networks.NetworksTestJSON.test_create_update_delete_network_subnet
+       - tempest.api.network.test_networks.NetworksTestJSON.test_external_network_visibility
+       - tempest.api.network.test_networks.NetworksTestJSON.test_list_networks
+       - tempest.api.network.test_networks.NetworksTestJSON.test_list_subnets
+       - tempest.api.network.test_networks.NetworksTestJSON.test_show_network
+       - tempest.api.network.test_networks.NetworksTestJSON.test_show_subnet
+       - tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_in_allowed_allocation_pools
+       - tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_with_no_securitygroups
+       - tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_update_delete_port
+       - tempest.api.network.test_ports.PortsIpV6TestJSON.test_list_ports
+       - tempest.api.network.test_ports.PortsIpV6TestJSON.test_show_port
+       - tempest.api.network.test_ports.PortsTestJSON.test_create_port_in_allowed_allocation_pools
+       - tempest.api.network.test_ports.PortsTestJSON.test_create_port_with_no_securitygroups
+       - tempest.api.network.test_ports.PortsTestJSON.test_create_update_delete_port
+       - tempest.api.network.test_ports.PortsTestJSON.test_list_ports
+       - tempest.api.network.test_ports.PortsTestJSON.test_show_port
+       - tempest.api.network.test_routers.RoutersIpV6Test.test_add_multiple_router_interfaces
+       - tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_port_id
+       - tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_subnet_id
+       - tempest.api.network.test_routers.RoutersIpV6Test.test_create_show_list_update_delete_router
+       - tempest.api.network.test_routers.RoutersTest.test_add_multiple_router_interfaces
+       - tempest.api.network.test_routers.RoutersTest.test_add_remove_router_interface_with_port_id
+       - tempest.api.network.test_routers.RoutersTest.test_add_remove_router_interface_with_subnet_id
+       - tempest.api.network.test_routers.RoutersTest.test_create_show_list_update_delete_router
+       - tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_list_update_show_delete_security_group
+       - tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_show_delete_security_group_rule
+       - tempest.api.network.test_security_groups.SecGroupIPv6Test.test_list_security_groups
+       - tempest.api.network.test_security_groups.SecGroupTest.test_create_list_update_show_delete_security_group
+       - tempest.api.network.test_security_groups.SecGroupTest.test_create_show_delete_security_group_rule
+       - tempest.api.network.test_security_groups.SecGroupTest.test_list_security_groups
+       - tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_admin_modify_quota
+       - tempest.api.object_storage.test_account_quotas.AccountQuotasTest.test_upload_valid_object
+       - tempest.api.object_storage.test_account_services.AccountTest.test_list_account_metadata
+       - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers
+       - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_end_marker
+       - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_format_json
+       - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_format_xml
+       - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_limit
+       - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_limit_and_end_marker
+       - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_limit_and_marker
+       - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_limit_and_marker_and_end_marker
+       - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_marker
+       - tempest.api.object_storage.test_account_services.AccountTest.test_list_containers_with_marker_and_end_marker
+       - tempest.api.object_storage.test_account_services.AccountTest.test_list_extensions
+       - tempest.api.object_storage.test_account_services.AccountTest.test_list_no_account_metadata
+       - tempest.api.object_storage.test_account_services.AccountTest.test_list_no_containers
+       - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_create_and_delete_metadata
+       - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_create_matadata_key
+       - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_create_metadata
+       - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_delete_matadata
+       - tempest.api.object_storage.test_account_services.AccountTest.test_update_account_metadata_with_delete_matadata_key
+       - tempest.api.object_storage.test_container_acl.ObjectTestACLs.test_read_object_with_rights
+       - tempest.api.object_storage.test_container_acl.ObjectTestACLs.test_write_object_with_rights
+       - tempest.api.object_storage.test_container_quotas.ContainerQuotasTest.test_upload_large_object
+       - tempest.api.object_storage.test_container_quotas.ContainerQuotasTest.test_upload_too_many_objects
+       - tempest.api.object_storage.test_container_quotas.ContainerQuotasTest.test_upload_valid_object
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_overwrite
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_key
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_metadata_value
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_remove_metadata_key
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_create_container_with_remove_metadata_value
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_delete_container
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_delimiter
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_end_marker
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_format_json
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_format_xml
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_limit
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_marker
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_no_object
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_path
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_contents_with_prefix
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_list_container_metadata
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_list_no_container_metadata
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_create_and_delete_matadata
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_create_matadata_key
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_create_metadata
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_delete_metadata
+       - tempest.api.object_storage.test_container_services.ContainerTest.test_update_container_metadata_with_delete_metadata_key
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_2d_way
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_across_containers
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_in_same_container
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_to_itself
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_with_x_fresh_metadata
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_with_x_object_meta
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_copy_object_with_x_object_metakey
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_if_match
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_if_modified_since
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_if_unmodified_since
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_metadata
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_range
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_x_newest
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_get_object_with_x_object_manifest
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_list_no_object_metadata
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_list_object_metadata
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_list_object_metadata_with_x_object_manifest
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata_with_create_and_remove_metadata
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata_with_x_object_manifest
+       - tempest.api.object_storage.test_object_services.ObjectTest.test_update_object_metadata_with_x_remove_object_metakey
+       - tempest.api.object_storage.test_object_services.PublicObjectTest.test_access_public_container_object_without_using_creds
+       - tempest.api.object_storage.test_object_services.PublicObjectTest.test_access_public_object_with_another_user_creds
+       - tempest.api.object_storage.test_object_version.ContainerTest.test_versioned_container
+       - tempest.api.orchestration.stacks.test_resource_types.ResourceTypesTest.test_resource_type_list
+       - tempest.api.orchestration.stacks.test_resource_types.ResourceTypesTest.test_resource_type_show
+       - tempest.api.orchestration.stacks.test_resource_types.ResourceTypesTest.test_resource_type_template
+       - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_get_deployment_list
+       - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_get_deployment_metadata
+       - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_get_software_config
+       - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_software_deployment_create_validate
+       - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_software_deployment_update_no_metadata_change
+       - tempest.api.orchestration.stacks.test_soft_conf.TestSoftwareConfig.test_software_deployment_update_with_metadata_change
+       - tempest.api.orchestration.stacks.test_stacks.StacksTestJSON.test_stack_crud_no_resources
+       - tempest.api.orchestration.stacks.test_stacks.StacksTestJSON.test_stack_list_responds
+       - tempest.api.telemetry.test_telemetry_notification_api.TelemetryNotificationAPITestJSON.test_check_glance_v1_notifications
+       - tempest.api.telemetry.test_telemetry_notification_api.TelemetryNotificationAPITestJSON.test_check_glance_v2_notifications
+       - tempest.api.volume.test_volumes_actions.VolumesV1ActionsTest.test_attach_detach_volume_to_instance
+       - tempest.api.volume.test_volumes_actions.VolumesV2ActionsTest.test_attach_detach_volume_to_instance
+       - tempest.api.volume.test_volumes_get.VolumesV1GetTest.test_volume_create_get_update_delete
+       - tempest.api.volume.test_volumes_get.VolumesV1GetTest.test_volume_create_get_update_delete_from_image
+       - tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete
+       - tempest.api.volume.test_volumes_get.VolumesV2GetTest.test_volume_create_get_update_delete_from_image
+       - tempest.api.volume.test_volumes_list.VolumesV1ListTestJSON.test_volume_list
+       - tempest.api.volume.test_volumes_list.VolumesV2ListTestJSON.test_volume_list
+    runner:
+      concurrency: 1
+      times: 1
+      type: serial
+    sla:
+      failure_rate:
+        max: 0
+
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-vm.yaml b/testcases/VIM/OpenStack/CI/rally_cert/scenario/opnfv-vm.yaml
new file mode 100644 (file)
index 0000000..ae3230f
--- /dev/null
@@ -0,0 +1,37 @@
+  VMTasks.boot_runcommand_delete:
+    -
+      args:
+        {{ vm_params(image_name, flavor_name) }}
+        floating_network: {{ floating_network }}
+        force_delete: false
+        interpreter: /bin/sh
+        script: {{ sup_dir }}/instance_dd_test.sh
+        username: admin
+      context:
+        {% call user_context(tenants_amount, users_amount, use_existing_users) %}
+        network: {}
+        {% endcall %}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
+
+  VMTasks.boot_runcommand_delete:
+    -
+      args:
+        {{ vm_params(image_name, flavor_name) }}
+        fixed_network: private
+        floating_network: {{ floating_network }}
+        force_delete: false
+        interpreter: /bin/sh
+        script: {{ sup_dir }}/instance_dd_test.sh
+        use_floatingip: true
+        username: admin
+        volume_args:
+          size: 2
+      context:
+        {{ user_context(tenants_amount, users_amount, use_existing_users) }}
+      runner:
+        {{ constant_runner(concurrency=2*controllers_amount, times=10*controllers_amount, is_smoke=smoke) }}
+      sla:
+        {{ no_failures_sla() }}
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/support/instance_dd_test.sh b/testcases/VIM/OpenStack/CI/rally_cert/scenario/support/instance_dd_test.sh
new file mode 100644 (file)
index 0000000..e3bf234
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/sh
+time_seconds(){ (time -p $1 ) 2>&1 |awk '/real/{print $2}'; }
+file=/tmp/test.img
+c=${1:-$SIZE}
+c=${c:-1000} #default is 1GB
+write_seq=$(time_seconds "dd if=/dev/zero of=$file bs=1M count=$c")
+read_seq=$(time_seconds "dd if=$file of=/dev/null bs=1M count=$c")
+[ -f $file ] && rm $file
+
+echo "{
+    \"write_seq_${c}m\": $write_seq,
+    \"read_seq_${c}m\": $read_seq
+    }"
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/autoscaling_policy.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/autoscaling_policy.yaml.template
new file mode 100644 (file)
index 0000000..a22487e
--- /dev/null
@@ -0,0 +1,17 @@
+heat_template_version: 2013-05-23
+
+resources:
+  test_group:
+    type: OS::Heat::AutoScalingGroup
+    properties:
+      desired_capacity: 0
+      max_size: 0
+      min_size: 0
+      resource:
+        type: OS::Heat::RandomString
+  test_policy:
+    type: OS::Heat::ScalingPolicy
+    properties:
+      adjustment_type: change_in_capacity
+      auto_scaling_group_id: { get_resource: test_group }
+      scaling_adjustment: 1
\ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/default.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/default.yaml.template
new file mode 100644 (file)
index 0000000..eb4f2f2
--- /dev/null
@@ -0,0 +1 @@
+heat_template_version: 2014-10-16
\ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/random_strings.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/random_strings.yaml.template
new file mode 100644 (file)
index 0000000..2dd676c
--- /dev/null
@@ -0,0 +1,13 @@
+heat_template_version: 2014-10-16
+
+description: Test template for rally create-update-delete scenario
+
+resources:
+  test_string_one:
+    type: OS::Heat::RandomString
+    properties:
+      length: 20
+  test_string_two:
+    type: OS::Heat::RandomString
+    properties:
+      length: 20
\ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/resource_group.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/resource_group.yaml.template
new file mode 100644 (file)
index 0000000..b3f505f
--- /dev/null
@@ -0,0 +1,13 @@
+heat_template_version: 2014-10-16
+
+description: Test template for rally create-update-delete scenario
+
+resources:
+  test_group:
+    type: OS::Heat::ResourceGroup
+    properties:
+      count: 2
+      resource_def:
+        type: OS::Heat::RandomString
+        properties:
+          length: 20
\ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_ports.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_ports.yaml.template
new file mode 100644 (file)
index 0000000..909f45d
--- /dev/null
@@ -0,0 +1,64 @@
+heat_template_version: 2013-05-23
+
+parameters:
+  # set all correct defaults for parameters before launch test
+  public_net:
+    type: string
+    default: public
+  image:
+    type: string
+    default: cirros-0.3.4-x86_64-uec
+  flavor:
+    type: string
+    default: m1.tiny
+  cidr:
+    type: string
+    default: 11.11.11.0/24
+
+resources:
+  server:
+    type: OS::Nova::Server
+    properties:
+      image: {get_param: image}
+      flavor: {get_param: flavor}
+      networks:
+        - port: { get_resource: server_port }
+
+  router:
+    type: OS::Neutron::Router
+    properties:
+      external_gateway_info:
+        network: {get_param: public_net}
+
+  router_interface:
+    type: OS::Neutron::RouterInterface
+    properties:
+      router_id: { get_resource: router }
+      subnet_id: { get_resource: private_subnet }
+
+  private_net:
+    type: OS::Neutron::Net
+
+  private_subnet:
+    type: OS::Neutron::Subnet
+    properties:
+      network: { get_resource: private_net }
+      cidr: {get_param: cidr}
+
+  port_security_group:
+    type: OS::Neutron::SecurityGroup
+    properties:
+      name: default_port_security_group
+      description: >
+        Default security group assigned to port. The neutron default group is not
+        used because neutron creates several groups with the same name=default and
+        nova cannot chooses which one should it use.
+
+  server_port:
+    type: OS::Neutron::Port
+    properties:
+      network: {get_resource: private_net}
+      fixed_ips:
+        - subnet: { get_resource: private_subnet }
+      security_groups:
+        - { get_resource: port_security_group }
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_volume.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/server_with_volume.yaml.template
new file mode 100644 (file)
index 0000000..23c8827
--- /dev/null
@@ -0,0 +1,39 @@
+heat_template_version: 2013-05-23
+
+parameters:
+  # set all correct defaults for parameters before launch test
+  image:
+    type: string
+    default: cirros-0.3.4-x86_64-uec
+  flavor:
+    type: string
+    default: m1.tiny
+  availability_zone:
+    type: string
+    description: The Availability Zone to launch the instance.
+    default: nova
+  volume_size:
+    type: number
+    description: Size of the volume to be created.
+    default: 1
+    constraints:
+      - range: { min: 1, max: 1024 }
+        description: must be between 1 and 1024 Gb.
+
+resources:
+  server:
+    type: OS::Nova::Server
+    properties:
+      image: {get_param: image}
+      flavor: {get_param: flavor}
+  cinder_volume:
+    type: OS::Cinder::Volume
+    properties:
+      size: { get_param: volume_size }
+      availability_zone: { get_param: availability_zone }
+  volume_attachment:
+    type: OS::Cinder::VolumeAttachment
+    properties:
+      volume_id: { get_resource: cinder_volume }
+      instance_uuid: { get_resource: server}
+      mountpoint: /dev/vdc
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_autoscaling_policy_inplace.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_autoscaling_policy_inplace.yaml.template
new file mode 100644 (file)
index 0000000..cf34879
--- /dev/null
@@ -0,0 +1,23 @@
+heat_template_version: 2013-05-23
+
+description: >
+  Test template for create-update-delete-stack scenario in rally.
+  The template updates resource parameters without resource re-creation(replacement)
+  in the stack defined by autoscaling_policy.yaml.template. It allows to measure
+  performance of "pure" resource update operation only.
+
+resources:
+  test_group:
+    type: OS::Heat::AutoScalingGroup
+    properties:
+      desired_capacity: 0
+      max_size: 0
+      min_size: 0
+      resource:
+        type: OS::Heat::RandomString
+  test_policy:
+    type: OS::Heat::ScalingPolicy
+    properties:
+      adjustment_type: change_in_capacity
+      auto_scaling_group_id: { get_resource: test_group }
+      scaling_adjustment: -1
\ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_add.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_add.yaml.template
new file mode 100644 (file)
index 0000000..e06d42e
--- /dev/null
@@ -0,0 +1,19 @@
+heat_template_version: 2014-10-16
+
+description: >
+  Test template for create-update-delete-stack scenario in rally.
+  The template updates the stack defined by random_strings.yaml.template with additional resource.
+
+resources:
+  test_string_one:
+    type: OS::Heat::RandomString
+    properties:
+      length: 20
+  test_string_two:
+    type: OS::Heat::RandomString
+    properties:
+      length: 20
+  test_string_three:
+    type: OS::Heat::RandomString
+    properties:
+      length: 20
\ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_delete.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_delete.yaml.template
new file mode 100644 (file)
index 0000000..d02593e
--- /dev/null
@@ -0,0 +1,11 @@
+heat_template_version: 2014-10-16
+
+description: >
+  Test template for create-update-delete-stack scenario in rally.
+  The template deletes one resource from the stack defined by random_strings.yaml.template.
+
+resources:
+  test_string_one:
+    type: OS::Heat::RandomString
+    properties:
+      length: 20
\ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_replace.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_random_strings_replace.yaml.template
new file mode 100644 (file)
index 0000000..46d8bff
--- /dev/null
@@ -0,0 +1,19 @@
+heat_template_version: 2014-10-16
+
+description: >
+  Test template for create-update-delete-stack scenario in rally.
+  The template deletes one resource from the stack defined by
+  random_strings.yaml.template and re-creates it with the updated parameters
+  (so-called update-replace). That happens because some parameters cannot be
+  changed without resource re-creation. The template allows to measure performance
+  of update-replace operation.
+
+resources:
+  test_string_one:
+    type: OS::Heat::RandomString
+    properties:
+      length: 20
+  test_string_two:
+    type: OS::Heat::RandomString
+    properties:
+      length: 40
\ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_increase.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_increase.yaml.template
new file mode 100644 (file)
index 0000000..891074e
--- /dev/null
@@ -0,0 +1,16 @@
+heat_template_version: 2014-10-16
+
+description: >
+  Test template for create-update-delete-stack scenario in rally.
+  The template updates one resource from the stack defined by resource_group.yaml.template
+  and adds children resources to that resource.
+
+resources:
+  test_group:
+    type: OS::Heat::ResourceGroup
+    properties:
+      count: 3
+      resource_def:
+        type: OS::Heat::RandomString
+        properties:
+          length: 20
\ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_reduce.yaml.template b/testcases/VIM/OpenStack/CI/rally_cert/scenario/templates/updated_resource_group_reduce.yaml.template
new file mode 100644 (file)
index 0000000..b4d1d17
--- /dev/null
@@ -0,0 +1,16 @@
+heat_template_version: 2014-10-16
+
+description: >
+  Test template for create-update-delete-stack scenario in rally.
+  The template updates one resource from the stack defined by resource_group.yaml.template
+  and deletes children resources from that resource.
+
+resources:
+  test_group:
+    type: OS::Heat::ResourceGroup
+    properties:
+      count: 1
+      resource_def:
+        type: OS::Heat::RandomString
+        properties:
+          length: 20
\ No newline at end of file
diff --git a/testcases/VIM/OpenStack/CI/rally_cert/task.yaml b/testcases/VIM/OpenStack/CI/rally_cert/task.yaml
new file mode 100644 (file)
index 0000000..299421a
--- /dev/null
@@ -0,0 +1,59 @@
+{%- set glance_image_location = glance_image_location|default("http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img") %}
+{%- set image_name = image_name|default("functest-img-rally") %}
+{%- set flavor_name = flavor_name|default("m1.tiny") %}
+{%- set use_existing_users = use_existing_users|default(false) %}
+{%- set service_list = service_list|default(["authenticate", "cinder", "keystone", "nova", "glance", "neutron", "quotas", "requests", "heat", "vm"]) %}
+{%- set smoke = smoke|default(true) %}
+{%- set floating_network = floating_network|default("net04_ext") %}
+{%- set controllers_amount = controllers_amount|default(1) %}
+{%- if smoke %}
+{%- set users_amount = 1 %}
+{%- set tenants_amount = 1 %}
+{%- else %}
+{%- set users_amount = users_amount|default(1) %}
+{%- set tenants_amount = tenants_amount|default(1) %}
+{%- endif %}
+
+{%- from "macro/macro.yaml" import user_context, vm_params, unlimited_volumes, constant_runner, rps_runner, no_failures_sla -%}
+{%- from "macro/macro.yaml" import volumes, unlimited_nova, unlimited_neutron, glance_args -%}
+
+---
+{% if "authenticate" in service_list %}
+{%- include "scenario/opnfv-authenticate.yaml"-%}
+{% endif %}
+
+{% if "cinder" in service_list %}
+{%- include "scenario/opnfv-cinder.yaml"-%}
+{% endif %}
+
+{% if "keystone" in service_list %}
+{%- include "scenario/opnfv-keystone.yaml"-%}
+{% endif %}
+
+{% if "nova" in service_list %}
+{%- include "scenario/opnfv-nova.yaml"-%}
+{% endif %}
+
+{% if "glance" in service_list %}
+{%- include "scenario/opnfv-glance.yaml"-%}
+{% endif %}
+
+{% if "neutron" in service_list %}
+{%- include "scenario/opnfv-neutron.yaml"-%}
+{% endif %}
+
+{% if "quotas" in service_list %}
+{%- include "scenario/opnfv-quotas.yaml"-%}
+{% endif %}
+
+{% if "requests" in service_list %}
+{%- include "scenario/opnfv-requests.yaml"-%}
+{% endif %}
+
+{% if "heat" in service_list %}
+{%- include "scenario/opnfv-heat.yaml"-%}
+{% endif %}
+
+{% if "vm" in service_list %}
+{%- include "scenario/opnfv-vm.yaml"-%}
+{% endif %}