Merge "Fix success status parsing of rally results"
[functest.git] / testcases / VIM / OpenStack / CI / libraries / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16
17 import re
18 import json
19 import os
20 import argparse
21 import logging
22 import yaml
23 import requests
24 import subprocess
25 import sys
26 from novaclient import client as novaclient
27 from glanceclient import client as glanceclient
28 from keystoneclient.v2_0 import client as keystoneclient
29 from neutronclient.v2_0 import client as neutronclient
30 from cinderclient import client as cinderclient
31
32 """ tests configuration """
33 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
34          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
35 parser = argparse.ArgumentParser()
36 parser.add_argument("test_name",
37                     help="Module name to be tested. "
38                          "Possible values are : "
39                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
40                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
41                          "{d[10]} ] "
42                          "The 'all' value "
43                          "performs all possible test scenarios"
44                          .format(d=tests))
45
46 parser.add_argument("-d", "--debug", help="Debug mode",  action="store_true")
47 parser.add_argument("-r", "--report",
48                     help="Create json result file",
49                     action="store_true")
50 parser.add_argument("-s", "--smoke",
51                     help="Smoke test mode",
52                     action="store_true")
53 parser.add_argument("-v", "--verbose",
54                     help="Print verbose info about the progress",
55                     action="store_true")
56
57 args = parser.parse_args()
58
59 client_dict = {}
60
61 if args.verbose:
62     RALLY_STDERR = subprocess.STDOUT
63 else:
64     RALLY_STDERR = open(os.devnull, 'w')
65
66 """ logging configuration """
67 logger = logging.getLogger("run_rally")
68 logger.setLevel(logging.DEBUG)
69
70 ch = logging.StreamHandler()
71 if args.debug:
72     ch.setLevel(logging.DEBUG)
73 else:
74     ch.setLevel(logging.INFO)
75
76 formatter = logging.Formatter("%(asctime)s - %(name)s - "
77                               "%(levelname)s - %(message)s")
78 ch.setFormatter(formatter)
79 logger.addHandler(ch)
80
81 REPO_PATH=os.environ['repos_dir']+'/functest/'
82 if not os.path.exists(REPO_PATH):
83     logger.error("Functest repository directory not found '%s'" % REPO_PATH)
84     exit(-1)
85 sys.path.append(REPO_PATH + "testcases/")
86 import functest_utils
87
88 with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
89     functest_yaml = yaml.safe_load(f)
90 f.close()
91
92 HOME = os.environ['HOME']+"/"
93 ####todo:
94 #SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
95 #    get("directories").get("dir_rally_scn")
96 SCENARIOS_DIR = REPO_PATH + "testcases/VIM/OpenStack/CI/rally_cert/"
97 ###
98 TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates"
99 SUPPORT_DIR = SCENARIOS_DIR + "scenario/support"
100 ###todo:
101 FLAVOR_NAME = "m1.tiny"
102 USERS_AMOUNT = 2
103 TENANTS_AMOUNT = 3
104 CONTROLLERS_AMOUNT = 2
105 ###
106 RESULTS_DIR = functest_yaml.get("general").get("directories"). \
107     get("dir_rally_res")
108 TEST_DB = functest_yaml.get("results").get("test_db_url")
109 FLOATING_NETWORK = functest_yaml.get("general"). \
110     get("openstack").get("neutron_public_net_name")
111 PRIVATE_NETWORK = functest_yaml.get("general"). \
112     get("openstack").get("neutron_private_net_name")
113
114 GLANCE_IMAGE_NAME = functest_yaml.get("general"). \
115     get("openstack").get("image_name")
116 GLANCE_IMAGE_FILENAME = functest_yaml.get("general"). \
117     get("openstack").get("image_file_name")
118 GLANCE_IMAGE_FORMAT = functest_yaml.get("general"). \
119     get("openstack").get("image_disk_format")
120 GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
121     get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
122
123 CINDER_VOLUME_TYPE_NAME = "volume_test"
124
125
126 def push_results_to_db(payload):
127
128     url = TEST_DB + "/results"
129     installer = functest_utils.get_installer_type(logger)
130     scenario = functest_utils.get_scenario(logger)
131     pod_name = functest_utils.get_pod_name(logger)
132     # TODO pod_name hardcoded, info shall come from Jenkins
133     params = {"project_name": "functest", "case_name": "Rally",
134               "pod_name": pod_name, "installer": installer,
135               "version": scenario, "details": payload}
136
137     headers = {'Content-Type': 'application/json'}
138     r = requests.post(url, data=json.dumps(params), headers=headers)
139     logger.debug(r)
140
141
142 def get_task_id(cmd_raw):
143     """
144     get task id from command rally result
145     :param cmd_raw:
146     :return: task_id as string
147     """
148     taskid_re = re.compile('^Task +(.*): started$')
149     for line in cmd_raw.splitlines(True):
150         line = line.strip()
151         match = taskid_re.match(line)
152         if match:
153             return match.group(1)
154     return None
155
156
157 def task_succeed(json_raw):
158     """
159     Parse JSON from rally JSON results
160     :param json_raw:
161     :return: Bool
162     """
163     rally_report = json.loads(json_raw)
164     for report in rally_report:
165         if report is None or report.get('result') is None:
166             return False
167
168         for result in report.get('result'):
169             if result is None or len(result.get('error')) > 0:
170                 return False
171
172     return True
173
174
175 def build_task_args(test_file_name):
176     task_args = {'service_list': [test_file_name]}
177     task_args['smoke'] = args.smoke
178     task_args['image_name'] = GLANCE_IMAGE_NAME
179     task_args['flavor_name'] = FLAVOR_NAME
180     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
181     task_args['floating_network'] = FLOATING_NETWORK
182     task_args['netid'] = functest_utils.get_network_id(client_dict['neutron'],
183                                     PRIVATE_NETWORK).encode('ascii', 'ignore')
184     task_args['tmpl_dir'] = TEMPLATE_DIR
185     task_args['sup_dir'] = SUPPORT_DIR
186     task_args['users_amount'] = USERS_AMOUNT
187     task_args['tenants_amount'] = TENANTS_AMOUNT
188     task_args['controllers_amount'] = CONTROLLERS_AMOUNT
189
190     return task_args
191
192
193 def get_output(proc):
194     result = ""
195     if args.verbose:
196         while proc.poll() is None:
197             line = proc.stdout.readline()
198             print line.replace('\n', '')
199             result += line
200     else:
201         while proc.poll() is None:
202             line = proc.stdout.readline()
203             if "Load duration" in line or \
204                "started" in line or \
205                "finished" in line or \
206                " Preparing" in line or \
207                "+-" in line or \
208                "|" in line:
209                 result += line
210             elif "test scenario" in line:
211                 result += "\n" + line
212             elif "Full duration" in line:
213                 result += line + "\n\n"
214         logger.info("\n" + result)
215
216     return result
217
218
219 def run_task(test_name):
220     #
221     # the "main" function of the script who launch rally for a task
222     # :param test_name: name for the rally test
223     # :return: void
224     #
225
226     logger.info('Starting test scenario "{}" ...'.format(test_name))
227
228     task_file = '{}task.yaml'.format(SCENARIOS_DIR)
229     if not os.path.exists(task_file):
230         logger.error("Task file '%s' does not exist." % task_file)
231         exit(-1)
232
233     test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/", test_name)
234     if not os.path.exists(test_file_name):
235         logger.error("The scenario '%s' does not exist." % test_file_name)
236         exit(-1)
237
238     logger.debug('Scenario fetched from : {}'.format(test_file_name))
239
240     cmd_line = "rally task start --abort-on-sla-failure " + \
241                "--task {} ".format(task_file) + \
242                "--task-args \"{}\" ".format(build_task_args(test_name))
243     logger.debug('running command line : {}'.format(cmd_line))
244
245     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, stderr=RALLY_STDERR, shell=True)
246     output = get_output(p)
247     task_id = get_task_id(output)
248     logger.debug('task_id : {}'.format(task_id))
249
250     if task_id is None:
251         logger.error("failed to retrieve task_id")
252         exit(-1)
253
254     # check for result directory and create it otherwise
255     if not os.path.exists(RESULTS_DIR):
256         logger.debug('does not exists, we create it'.format(RESULTS_DIR))
257         os.makedirs(RESULTS_DIR)
258
259     # write html report file
260     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
261     cmd_line = "rally task report {} --out {}".format(task_id,
262                                                       report_file_name)
263
264     logger.debug('running command line : {}'.format(cmd_line))
265     os.popen(cmd_line)
266
267     # get and save rally operation JSON result
268     cmd_line = "rally task results %s" % task_id
269     logger.debug('running command line : {}'.format(cmd_line))
270     cmd = os.popen(cmd_line)
271     json_results = cmd.read()
272     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
273         logger.debug('saving json file')
274         f.write(json_results)
275
276     with open('{}opnfv-{}.json'
277               .format(RESULTS_DIR, test_name)) as json_file:
278         json_data = json.load(json_file)
279
280     # Push results in payload of testcase
281     if args.report:
282         logger.debug("Push result into DB")
283         push_results_to_db(json_data)
284
285     """ parse JSON operation result """
286     if task_succeed(json_results):
287         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
288     else:
289         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
290
291
292 def main():
293     # configure script
294     if not (args.test_name in tests):
295         logger.error('argument not valid')
296         exit(-1)
297
298     creds_nova = functest_utils.get_credentials("nova")
299     nova_client = novaclient.Client('2',**creds_nova)
300     creds_neutron = functest_utils.get_credentials("neutron")
301     neutron_client = neutronclient.Client(**creds_neutron)
302     creds_keystone = functest_utils.get_credentials("keystone")
303     keystone_client = keystoneclient.Client(**creds_keystone)
304     glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
305                                                    endpoint_type='publicURL')
306     glance_client = glanceclient.Client(1, glance_endpoint,
307                                         token=keystone_client.auth_token)
308     creds_cinder = functest_utils.get_credentials("cinder")
309     cinder_client = cinderclient.Client('2',creds_cinder['username'],
310                                         creds_cinder['api_key'],
311                                         creds_cinder['project_id'],
312                                         creds_cinder['auth_url'],
313                                         service_type="volume")
314
315     client_dict['neutron'] = neutron_client
316
317     volume_types = functest_utils.list_volume_types(cinder_client, private=False)
318     if not volume_types:
319         volume_type = functest_utils.create_volume_type(cinder_client, \
320                                                         CINDER_VOLUME_TYPE_NAME)
321         if not volume_type:
322             logger.error("Failed to create volume type...")
323             exit(-1)
324         else:
325             logger.debug("Volume type '%s' created succesfully..." \
326                          % CINDER_VOLUME_TYPE_NAME)
327     else:
328         logger.debug("Using existing volume type(s)...")
329
330     image_id = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
331
332     if image_id == '':
333         logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, \
334                                                            GLANCE_IMAGE_PATH))
335         image_id = functest_utils.create_glance_image(glance_client,\
336                                                 GLANCE_IMAGE_NAME,GLANCE_IMAGE_PATH)
337         if not image_id:
338             logger.error("Failed to create the Glance image...")
339             exit(-1)
340         else:
341             logger.debug("Image '%s' with ID '%s' created succesfully ." \
342                          % (GLANCE_IMAGE_NAME, image_id))
343     else:
344         logger.debug("Using existing image '%s' with ID '%s'..." \
345                      % (GLANCE_IMAGE_NAME,image_id))
346
347     if args.test_name == "all":
348         for test_name in tests:
349             if not (test_name == 'all' or
350                     test_name == 'vm'):
351                 run_task(test_name)
352     else:
353         print(args.test_name)
354         run_task(args.test_name)
355
356     logger.debug("Deleting image '%s' with ID '%s'..." \
357                          % (GLANCE_IMAGE_NAME, image_id))
358     if not functest_utils.delete_glance_image(nova_client, image_id):
359         logger.error("Error deleting the glance image")
360
361     if not volume_types:
362         logger.debug("Deleting volume type '%s'..." \
363                              % CINDER_VOLUME_TYPE_NAME)
364         if not functest_utils.delete_volume_type(cinder_client, volume_type):
365             logger.error("Error in deleting volume type...")
366
367
368 if __name__ == '__main__':
369     main()