Rally live migration case selection
[functest-xtesting.git] / testcases / VIM / OpenStack / CI / libraries / run_rally-cert.py
index 1c27b02..5f5cd62 100755 (executable)
 # 0.3 (19/10/2015) remove Tempest from run_rally
 # and push result into test DB
 #
-
-import re
-import json
-import os
 import argparse
+import iniparse
+import json
 import logging
-import yaml
+import os
+import re
 import requests
 import subprocess
 import sys
+import time
+import yaml
+
 from novaclient import client as novaclient
 from glanceclient import client as glanceclient
 from keystoneclient.v2_0 import client as keystoneclient
 from neutronclient.v2_0 import client as neutronclient
+from cinderclient import client as cinderclient
 
 """ tests configuration """
 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
@@ -52,6 +55,9 @@ parser.add_argument("-s", "--smoke",
 parser.add_argument("-v", "--verbose",
                     help="Print verbose info about the progress",
                     action="store_true")
+parser.add_argument("-n", "--noclean",
+                    help="Don't clean the created resources for this test.",
+                    action="store_true")
 
 args = parser.parse_args()
 
@@ -77,7 +83,7 @@ formatter = logging.Formatter("%(asctime)s - %(name)s - "
 ch.setFormatter(formatter)
 logger.addHandler(ch)
 
-REPO_PATH=os.environ['repos_dir']+'/functest/'
+REPO_PATH = os.environ['repos_dir']+'/functest/'
 if not os.path.exists(REPO_PATH):
     logger.error("Functest repository directory not found '%s'" % REPO_PATH)
     exit(-1)
@@ -89,8 +95,8 @@ with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
 f.close()
 
 HOME = os.environ['HOME']+"/"
-####todo:
-#SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
+### todo:
+# SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
 #    get("directories").get("dir_rally_scn")
 SCENARIOS_DIR = REPO_PATH + "testcases/VIM/OpenStack/CI/rally_cert/"
 ###
@@ -100,15 +106,15 @@ SUPPORT_DIR = SCENARIOS_DIR + "scenario/support"
 FLAVOR_NAME = "m1.tiny"
 USERS_AMOUNT = 2
 TENANTS_AMOUNT = 3
-CONTROLLERS_AMOUNT = 2
+ITERATIONS_AMOUNT = 10
+CONCURRENCY = 4
+
 ###
 RESULTS_DIR = functest_yaml.get("general").get("directories"). \
     get("dir_rally_res")
+TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories"). \
+    get("dir_results") + '/tempest/tempest.conf'
 TEST_DB = functest_yaml.get("results").get("test_db_url")
-FLOATING_NETWORK = functest_yaml.get("general"). \
-    get("openstack").get("neutron_public_net_name")
-FLOATING_SUBNET_CIDR = functest_yaml.get("general"). \
-    get("openstack").get("neutron_public_subnet_cidr")
 PRIVATE_NETWORK = functest_yaml.get("general"). \
     get("openstack").get("neutron_private_net_name")
 
@@ -121,17 +127,22 @@ GLANCE_IMAGE_FORMAT = functest_yaml.get("general"). \
 GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
     get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
 
+CINDER_VOLUME_TYPE_NAME = "volume_test"
+
+
+SUMMARY = []
 
-def push_results_to_db(payload):
+
+def push_results_to_db(case, payload):
 
     url = TEST_DB + "/results"
     installer = functest_utils.get_installer_type(logger)
-    git_version = functest_utils.get_git_branch(REPO_PATH)
+    scenario = functest_utils.get_scenario(logger)
     pod_name = functest_utils.get_pod_name(logger)
     # TODO pod_name hardcoded, info shall come from Jenkins
-    params = {"project_name": "functest", "case_name": "Rally",
+    params = {"project_name": "functest", "case_name": case,
               "pod_name": pod_name, "installer": installer,
-              "version": git_version, "details": payload}
+              "version": scenario, "details": payload}
 
     headers = {'Content-Type': 'application/json'}
     r = requests.post(url, data=json.dumps(params), headers=headers)
@@ -160,53 +171,128 @@ def task_succeed(json_raw):
     :return: Bool
     """
     rally_report = json.loads(json_raw)
-    rally_report = rally_report[0]
-    if rally_report is None:
-        return False
-    if rally_report.get('result') is None:
-        return False
-
-    for result in rally_report.get('result'):
-        if len(result.get('error')) > 0:
+    for report in rally_report:
+        if report is None or report.get('result') is None:
             return False
 
+        for result in report.get('result'):
+            if result is None or len(result.get('error')) > 0:
+                return False
+
     return True
 
 
+def live_migration_supported():
+    config = iniparse.ConfigParser()
+    if config.read(TEMPEST_CONF_FILE) and \
+       config.has_section('compute-feature-enabled') and \
+       config.has_option('compute-feature-enabled', 'live_migration'):
+       return config.getboolean('compute-feature-enabled', 'live_migration')
+
+    return False
+
+
 def build_task_args(test_file_name):
     task_args = {'service_list': [test_file_name]}
     task_args['smoke'] = args.smoke
     task_args['image_name'] = GLANCE_IMAGE_NAME
     task_args['flavor_name'] = FLAVOR_NAME
     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
-    task_args['floating_network'] = FLOATING_NETWORK
-    task_args['floating_subnet_cidr'] = FLOATING_SUBNET_CIDR
-    task_args['netid'] = functest_utils.get_network_id(client_dict['neutron'],
-                                    PRIVATE_NETWORK).encode('ascii', 'ignore')
     task_args['tmpl_dir'] = TEMPLATE_DIR
     task_args['sup_dir'] = SUPPORT_DIR
     task_args['users_amount'] = USERS_AMOUNT
     task_args['tenants_amount'] = TENANTS_AMOUNT
-    task_args['controllers_amount'] = CONTROLLERS_AMOUNT
+    task_args['iterations'] = ITERATIONS_AMOUNT
+    task_args['concurrency'] = CONCURRENCY
+
+    ext_net = functest_utils.get_external_net(client_dict['neutron'])
+    if ext_net:
+        task_args['floating_network'] = str(ext_net)
+    else:
+        task_args['floating_network'] = ''
+
+    net_id = functest_utils.get_network_id(client_dict['neutron'],
+                                           PRIVATE_NETWORK)
+    task_args['netid'] = str(net_id)
+    task_args['live_migration'] = live_migration_supported()
 
     return task_args
 
 
+def get_output(proc, test_name):
+    global SUMMARY
+    result = ""
+    nb_tests = 0
+    overall_duration = 0.0
+    success = 0.0
+    nb_totals = 0
+
+    while proc.poll() is None:
+        line = proc.stdout.readline()
+        if args.verbose:
+            result += line
+        else:
+            if "Load duration" in line or \
+               "started" in line or \
+               "finished" in line or \
+               " Preparing" in line or \
+               "+-" in line or \
+               "|" in line:
+                result += line
+            elif "test scenario" in line:
+                result += "\n" + line
+            elif "Full duration" in line:
+                result += line + "\n\n"
+
+        # parse output for summary report
+        if "| " in line and \
+           "| action" not in line and \
+           "| Starting" not in line and \
+           "| Completed" not in line and \
+           "| ITER" not in line and \
+           "|   " not in line and \
+           "| total" not in line:
+            nb_tests += 1
+        elif "| total" in line:
+            percentage = ((line.split('|')[8]).strip(' ')).strip('%')
+            success += float(percentage)
+            nb_totals += 1
+        elif "Full duration" in line:
+            overall_duration += float(line.split(': ')[1])
+
+    overall_duration="{:10.2f}".format(overall_duration)
+    if nb_totals == 0:
+        success_avg = 0
+    else:
+        success_avg = "{:0.2f}".format(success / nb_totals)
+
+    scenario_summary = {'test_name': test_name,
+                        'overall_duration': overall_duration,
+                        'nb_tests': nb_tests,
+                        'success': success_avg}
+    SUMMARY.append(scenario_summary)
+
+    logger.info("\n" + result)
+
+    return result
+
+
 def run_task(test_name):
     #
     # the "main" function of the script who launch rally for a task
     # :param test_name: name for the rally test
     # :return: void
     #
-
-    logger.info('starting {} test ...'.format(test_name))
+    global SUMMARY
+    logger.info('Starting test scenario "{}" ...'.format(test_name))
 
     task_file = '{}task.yaml'.format(SCENARIOS_DIR)
     if not os.path.exists(task_file):
         logger.error("Task file '%s' does not exist." % task_file)
         exit(-1)
 
-    test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/", test_name)
+    test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/",
+                                              test_name)
     if not os.path.exists(test_file_name):
         logger.error("The scenario '%s' does not exist." % test_file_name)
         exit(-1)
@@ -218,23 +304,19 @@ def run_task(test_name):
                "--task-args \"{}\" ".format(build_task_args(test_name))
     logger.debug('running command line : {}'.format(cmd_line))
 
-    p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, stderr=RALLY_STDERR, shell=True)
-    result = ""
-    while p.poll() is None:
-        l = p.stdout.readline()
-        print l.replace('\n', '')
-        result += l
-
-    task_id = get_task_id(result)
+    p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
+                         stderr=RALLY_STDERR, shell=True)
+    output = get_output(p, test_name)
+    task_id = get_task_id(output)
     logger.debug('task_id : {}'.format(task_id))
 
     if task_id is None:
-        logger.error("failed to retrieve task_id")
+        logger.error("Failed to retrieve task_id.")
         exit(-1)
 
     # check for result directory and create it otherwise
     if not os.path.exists(RESULTS_DIR):
-        logger.debug('does not exists, we create it'.format(RESULTS_DIR))
+        logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
         os.makedirs(RESULTS_DIR)
 
     # write html report file
@@ -261,41 +343,64 @@ def run_task(test_name):
     # Push results in payload of testcase
     if args.report:
         logger.debug("Push result into DB")
-        push_results_to_db(json_data)
+        push_results_to_db("Rally_details", json_data)
 
     """ parse JSON operation result """
     if task_succeed(json_results):
-        print 'Test OK'
+        logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
     else:
-        print 'Test KO'
+        logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
 
 
 def main():
+    global SUMMARY
     # configure script
     if not (args.test_name in tests):
         logger.error('argument not valid')
         exit(-1)
 
+    SUMMARY = []
     creds_nova = functest_utils.get_credentials("nova")
-    nova_client = novaclient.Client('2',**creds_nova)
+    nova_client = novaclient.Client('2', **creds_nova)
     creds_neutron = functest_utils.get_credentials("neutron")
     neutron_client = neutronclient.Client(**creds_neutron)
     creds_keystone = functest_utils.get_credentials("keystone")
     keystone_client = keystoneclient.Client(**creds_keystone)
     glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
-                                                   endpoint_type='publicURL')
+                                                              endpoint_type='publicURL')
     glance_client = glanceclient.Client(1, glance_endpoint,
                                         token=keystone_client.auth_token)
+    creds_cinder = functest_utils.get_credentials("cinder")
+    cinder_client = cinderclient.Client('2', creds_cinder['username'],
+                                        creds_cinder['api_key'],
+                                        creds_cinder['project_id'],
+                                        creds_cinder['auth_url'],
+                                        service_type="volume")
 
     client_dict['neutron'] = neutron_client
 
+    volume_types = functest_utils.list_volume_types(cinder_client,
+                                                    private=False)
+    if not volume_types:
+        volume_type = functest_utils.create_volume_type(cinder_client,
+                                                        CINDER_VOLUME_TYPE_NAME)
+        if not volume_type:
+            logger.error("Failed to create volume type...")
+            exit(-1)
+        else:
+            logger.debug("Volume type '%s' created succesfully..." \
+                         % CINDER_VOLUME_TYPE_NAME)
+    else:
+        logger.debug("Using existing volume type(s)...")
+
     image_id = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
 
     if image_id == '':
-        logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, \
+        logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
                                                            GLANCE_IMAGE_PATH))
-        image_id = functest_utils.create_glance_image(glance_client,\
-                                                GLANCE_IMAGE_NAME,GLANCE_IMAGE_PATH)
+        image_id = functest_utils.create_glance_image(glance_client,
+                                                      GLANCE_IMAGE_NAME,
+                                                      GLANCE_IMAGE_PATH)
         if not image_id:
             logger.error("Failed to create the Glance image...")
             exit(-1)
@@ -304,22 +409,85 @@ def main():
                          % (GLANCE_IMAGE_NAME, image_id))
     else:
         logger.debug("Using existing image '%s' with ID '%s'..." \
-                     % (GLANCE_IMAGE_NAME,image_id))
+                     % (GLANCE_IMAGE_NAME, image_id))
 
     if args.test_name == "all":
         for test_name in tests:
             if not (test_name == 'all' or
                     test_name == 'vm'):
-                print(test_name)
                 run_task(test_name)
     else:
-        print(args.test_name)
+        logger.debug("Test name: " + args.test_name)
         run_task(args.test_name)
 
+    report = "\n"\
+             "                                                              \n"\
+             "                     Rally Summary Report\n"\
+             "+===================+============+===============+===========+\n"\
+             "| Module            | Duration   | nb. Test Run  | Success   |\n"\
+             "+===================+============+===============+===========+\n"
+    payload = []
+
+    #for each scenario we draw a row for the table
+    total_duration = 0.0
+    total_nb_tests = 0
+    total_success = 0.0
+    for s in SUMMARY:
+        name = "{0:<17}".format(s['test_name'])
+        duration = float(s['overall_duration'])
+        total_duration += duration
+        duration = time.strftime("%M:%S", time.gmtime(duration))
+        duration = "{0:<10}".format(duration)
+        nb_tests = "{0:<13}".format(s['nb_tests'])
+        total_nb_tests += int(s['nb_tests'])
+        success = "{0:<10}".format(str(s['success'])+'%')
+        total_success += float(s['success'])
+        report += ""\
+        "| " + name + " | " + duration + " | " + nb_tests + " | " + success + "|\n"\
+        "+-------------------+------------+---------------+-----------+\n"
+        payload.append({'module': name,
+                        'details': {'duration': s['overall_duration'],
+                                    'nb tests': s['nb_tests'],
+                                    'success': s['success']}})
+
+    total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
+    total_duration_str2 = "{0:<10}".format(total_duration_str)
+    total_nb_tests_str = "{0:<13}".format(total_nb_tests)
+    total_success = "{:0.2f}".format(total_success / len(SUMMARY))
+    total_success_str = "{0:<10}".format(str(total_success)+'%')
+    report += "+===================+============+===============+===========+\n"
+    report += "| TOTAL:            | " + total_duration_str2 + " | " + \
+            total_nb_tests_str  + " | " + total_success_str + "|\n"
+    report += "+===================+============+===============+===========+\n"
+
+    logger.info("\n"+report)
+    payload.append({'summary': {'duration': total_duration,
+                               'nb tests': total_nb_tests,
+                               'nb success': total_success}})
+
+    # Generate json results for DB
+    #json_results = {"timestart": time_start, "duration": total_duration,
+    #                "tests": int(total_nb_tests), "success": int(total_success)}
+    #logger.info("Results: "+str(json_results))
+
+    if args.report:
+        logger.debug("Pushing Rally summary into DB...")
+        push_results_to_db("Rally", payload)
+
+    if args.noclean:
+        exit(0)
+
     logger.debug("Deleting image '%s' with ID '%s'..." \
                          % (GLANCE_IMAGE_NAME, image_id))
     if not functest_utils.delete_glance_image(nova_client, image_id):
         logger.error("Error deleting the glance image")
 
+    if not volume_types:
+        logger.debug("Deleting volume type '%s'..." \
+                             % CINDER_VOLUME_TYPE_NAME)
+        if not functest_utils.delete_volume_type(cinder_client, volume_type):
+            logger.error("Error in deleting volume type...")
+
+
 if __name__ == '__main__':
     main()