3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
11 import re, json, os, urllib2, argparse, logging, yaml
13 with open('../functest.yaml') as f:
14 functest_yaml = yaml.safe_load(f)
17 HOME = os.environ['HOME']+"/"
18 SCENARIOS_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_rally_scn")
19 RESULTS_DIR = HOME + functest_yaml.get("general").get("directories").get("dir_rally_res")
21 """ tests configuration """
22 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone', 'neutron', 'nova', 'quotas', 'requests', 'tempest', 'vm', 'all', 'smoke']
23 parser = argparse.ArgumentParser()
24 parser.add_argument("test_name", help="The name of the test you want to perform with rally. "
25 "Possible values are : "
26 "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | {d[5]} | {d[6]} "
27 "| {d[7]} | {d[8]} | {d[9]} | {d[10]} | {d[11]} | {d[12]}]. The 'all' value performs all the tests scenarios "
28 "except 'tempest'".format(d=tests))
30 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
31 parser.add_argument("test_mode", help="Tempest test mode", nargs='?', default="smoke")
32 args = parser.parse_args()
33 test_mode=args.test_mode
35 if not args.test_name == "tempest":
36 if not args.test_mode == "smoke":
37 parser.error("test_mode is only used with tempest")
39 """ logging configuration """
40 logger = logging.getLogger('run_rally')
41 logger.setLevel(logging.DEBUG)
43 ch = logging.StreamHandler()
45 ch.setLevel(logging.DEBUG)
47 ch.setLevel(logging.INFO)
49 formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
50 ch.setFormatter(formatter)
54 def get_tempest_id(cmd_raw):
56 get task id from command rally result
58 :return: task_id as string
60 taskid_re = re.compile('^Verification UUID: (.*)$')
61 for line in cmd_raw.splitlines(True):
63 match = taskid_re.match(line)
69 def get_task_id(cmd_raw):
71 get task id from command rally result
73 :return: task_id as string
75 taskid_re = re.compile('^Task +(.*): started$')
76 for line in cmd_raw.splitlines(True):
78 match = taskid_re.match(line)
84 def task_succeed(json_raw):
86 Parse JSON from rally JSON results
90 rally_report = json.loads(json_raw)
91 rally_report = rally_report[0]
92 if rally_report is None:
94 if rally_report.get('result') is None:
97 for result in rally_report.get('result'):
98 if len(result.get('error')) > 0:
105 the function dedicated to Tempest (functional tests for OpenStack)
106 :param test_mode: Tempest mode smoke (default), full, ..
109 logger.info('starting {} Tempest ...'.format(test_mode))
112 cmd = os.popen("date '+%d%m%Y_%H%M'")
113 test_date = cmd.read().rstrip()
115 cmd_line = "rally verify start {}".format(test_mode)
116 logger.debug('running command line : {}'.format(cmd_line))
117 cmd = os.popen(cmd_line)
118 task_id = get_tempest_id(cmd.read())
119 logger.debug('task_id : {}'.format(task_id))
122 logger.error("failed to retrieve task_id")
125 """ check for result directory and create it otherwise """
126 if not os.path.exists(RESULTS_DIR):
127 logger.debug('does not exists, we create it'.format(RESULTS_DIR))
128 os.makedirs(RESULTS_DIR)
130 """ write log report file """
131 report_file_name = '{}opnfv-tempest-{}.log'.format(RESULTS_DIR, test_date)
132 cmd_line = "rally verify detailed {} > {} ".format(task_id, report_file_name)
133 logger.debug('running command line : {}'.format(cmd_line))
137 def run_task(test_name):
139 the "main" function of the script who lunch rally for a task
140 :param test_name: name for the rally test
143 logger.info('starting {} test ...'.format(test_name))
146 cmd = os.popen("date '+%d%m%Y_%H%M'")
147 test_date = cmd.read().rstrip()
149 """ check directory for scenarios test files or retrieve from git otherwise"""
151 test_file_name = '{}opnfv-{}.json'.format(SCENARIOS_DIR, test_name)
152 if not os.path.exists(test_file_name):
153 logger.debug('{} does not exists'.format(test_file_name))
154 proceed_test = retrieve_test_cases_file(test_name, SCENARIOS_DIR)
156 """ we do the test only if we have a scenario test file """
158 logger.debug('Scenario fetched from : {}'.format(test_file_name))
159 cmd_line = "rally task start --abort-on-sla-failure %s" % test_file_name
160 logger.debug('running command line : {}'.format(cmd_line))
161 cmd = os.popen(cmd_line)
162 task_id = get_task_id(cmd.read())
163 logger.debug('task_id : {}'.format(task_id))
166 logger.error("failed to retrieve task_id")
169 """ check for result directory and create it otherwise """
170 if not os.path.exists(RESULTS_DIR):
171 logger.debug('does not exists, we create it'.format(RESULTS_DIR))
172 os.makedirs(RESULTS_DIR)
174 """ write html report file """
175 report_file_name = '{}opnfv-{}-{}.html'.format(RESULTS_DIR, test_name, test_date)
176 cmd_line = "rally task report %s --out %s" % (task_id, report_file_name)
177 logger.debug('running command line : {}'.format(cmd_line))
180 """ get and save rally operation JSON result """
181 cmd_line = "rally task results %s" % task_id
182 logger.debug('running command line : {}'.format(cmd_line))
183 cmd = os.popen(cmd_line)
184 json_results = cmd.read()
185 with open('{}opnfv-{}-{}.json'.format(RESULTS_DIR, test_name, test_date), 'w') as f:
186 logger.debug('saving json file')
187 f.write(json_results)
188 logger.debug('saving json file2')
190 """ parse JSON operation result """
191 if task_succeed(json_results):
196 logger.error('{} test failed, unable to fetch a scenario test file'.format(test_name))
199 def retrieve_test_cases_file(test_name, tests_path):
201 Retrieve from github the sample test files
202 :return: Boolean that indicates the retrieval status
205 """ do not add the "/" at the end """
206 url_base = "https://git.opnfv.org/cgit/functest/plain/testcases/VIM/OpenStack/CI/suites"
208 test_file_name = 'opnfv-{}.json'.format(test_name)
209 logger.info('fetching {}/{} ...'.format(url_base, test_file_name))
212 response = urllib2.urlopen('{}/{}'.format(url_base, test_file_name))
213 except (urllib2.HTTPError, urllib2.URLError):
215 file_raw = response.read()
217 """ check if the test path exist otherwise we create it """
218 if not os.path.exists(tests_path):
219 os.makedirs(tests_path)
221 with open('{}/{}'.format(tests_path, test_file_name), 'w') as f:
227 """ configure script """
228 if not (args.test_name in tests):
229 logger.error('argument not valid')
232 if args.test_name == "all":
233 for test_name in tests:
234 if not (test_name == 'all' or test_name == 'tempest'):
238 print(args.test_name)
239 if args.test_name == 'tempest':
242 run_task(args.test_name)
244 if __name__ == '__main__':