Reduce Rally output
[functest-xtesting.git] / testcases / VIM / OpenStack / CI / libraries / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16
17 import re
18 import json
19 import os
20 import argparse
21 import logging
22 import yaml
23 import requests
24 import subprocess
25 import sys
26 from novaclient import client as novaclient
27 from glanceclient import client as glanceclient
28 from keystoneclient.v2_0 import client as keystoneclient
29 from neutronclient.v2_0 import client as neutronclient
30
31 """ tests configuration """
32 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
33          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
34 parser = argparse.ArgumentParser()
35 parser.add_argument("test_name",
36                     help="Module name to be tested. "
37                          "Possible values are : "
38                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
39                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
40                          "{d[10]} ] "
41                          "The 'all' value "
42                          "performs all possible test scenarios"
43                          .format(d=tests))
44
45 parser.add_argument("-d", "--debug", help="Debug mode",  action="store_true")
46 parser.add_argument("-r", "--report",
47                     help="Create json result file",
48                     action="store_true")
49 parser.add_argument("-s", "--smoke",
50                     help="Smoke test mode",
51                     action="store_true")
52 parser.add_argument("-v", "--verbose",
53                     help="Print verbose info about the progress",
54                     action="store_true")
55
56 args = parser.parse_args()
57
58 client_dict = {}
59
60 if args.verbose:
61     RALLY_STDERR = subprocess.STDOUT
62 else:
63     RALLY_STDERR = open(os.devnull, 'w')
64
65 """ logging configuration """
66 logger = logging.getLogger("run_rally")
67 logger.setLevel(logging.DEBUG)
68
69 ch = logging.StreamHandler()
70 if args.debug:
71     ch.setLevel(logging.DEBUG)
72 else:
73     ch.setLevel(logging.INFO)
74
75 formatter = logging.Formatter("%(asctime)s - %(name)s - "
76                               "%(levelname)s - %(message)s")
77 ch.setFormatter(formatter)
78 logger.addHandler(ch)
79
80 REPO_PATH=os.environ['repos_dir']+'/functest/'
81 if not os.path.exists(REPO_PATH):
82     logger.error("Functest repository directory not found '%s'" % REPO_PATH)
83     exit(-1)
84 sys.path.append(REPO_PATH + "testcases/")
85 import functest_utils
86
87 with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
88     functest_yaml = yaml.safe_load(f)
89 f.close()
90
91 HOME = os.environ['HOME']+"/"
92 ####todo:
93 #SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
94 #    get("directories").get("dir_rally_scn")
95 SCENARIOS_DIR = REPO_PATH + "testcases/VIM/OpenStack/CI/rally_cert/"
96 ###
97 TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates"
98 SUPPORT_DIR = SCENARIOS_DIR + "scenario/support"
99 ###todo:
100 FLAVOR_NAME = "m1.tiny"
101 USERS_AMOUNT = 2
102 TENANTS_AMOUNT = 3
103 CONTROLLERS_AMOUNT = 2
104 ###
105 RESULTS_DIR = functest_yaml.get("general").get("directories"). \
106     get("dir_rally_res")
107 TEST_DB = functest_yaml.get("results").get("test_db_url")
108 FLOATING_NETWORK = functest_yaml.get("general"). \
109     get("openstack").get("neutron_public_net_name")
110 PRIVATE_NETWORK = functest_yaml.get("general"). \
111     get("openstack").get("neutron_private_net_name")
112
113 GLANCE_IMAGE_NAME = functest_yaml.get("general"). \
114     get("openstack").get("image_name")
115 GLANCE_IMAGE_FILENAME = functest_yaml.get("general"). \
116     get("openstack").get("image_file_name")
117 GLANCE_IMAGE_FORMAT = functest_yaml.get("general"). \
118     get("openstack").get("image_disk_format")
119 GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
120     get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
121
122
123 def push_results_to_db(payload):
124
125     url = TEST_DB + "/results"
126     installer = functest_utils.get_installer_type(logger)
127     scenario = functest_utils.get_scenario(logger)
128     pod_name = functest_utils.get_pod_name(logger)
129     # TODO pod_name hardcoded, info shall come from Jenkins
130     params = {"project_name": "functest", "case_name": "Rally",
131               "pod_name": pod_name, "installer": installer,
132               "version": scenario, "details": payload}
133
134     headers = {'Content-Type': 'application/json'}
135     r = requests.post(url, data=json.dumps(params), headers=headers)
136     logger.debug(r)
137
138
139 def get_task_id(cmd_raw):
140     """
141     get task id from command rally result
142     :param cmd_raw:
143     :return: task_id as string
144     """
145     taskid_re = re.compile('^Task +(.*): started$')
146     for line in cmd_raw.splitlines(True):
147         line = line.strip()
148         match = taskid_re.match(line)
149         if match:
150             return match.group(1)
151     return None
152
153
154 def task_succeed(json_raw):
155     """
156     Parse JSON from rally JSON results
157     :param json_raw:
158     :return: Bool
159     """
160     rally_report = json.loads(json_raw)
161     rally_report = rally_report[0]
162     if rally_report is None:
163         return False
164     if rally_report.get('result') is None:
165         return False
166
167     for result in rally_report.get('result'):
168         if len(result.get('error')) > 0:
169             return False
170
171     return True
172
173
174 def build_task_args(test_file_name):
175     task_args = {'service_list': [test_file_name]}
176     task_args['smoke'] = args.smoke
177     task_args['image_name'] = GLANCE_IMAGE_NAME
178     task_args['flavor_name'] = FLAVOR_NAME
179     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
180     task_args['floating_network'] = FLOATING_NETWORK
181     task_args['netid'] = functest_utils.get_network_id(client_dict['neutron'],
182                                     PRIVATE_NETWORK).encode('ascii', 'ignore')
183     task_args['tmpl_dir'] = TEMPLATE_DIR
184     task_args['sup_dir'] = SUPPORT_DIR
185     task_args['users_amount'] = USERS_AMOUNT
186     task_args['tenants_amount'] = TENANTS_AMOUNT
187     task_args['controllers_amount'] = CONTROLLERS_AMOUNT
188
189     return task_args
190
191
192 def run_task(test_name):
193     #
194     # the "main" function of the script who launch rally for a task
195     # :param test_name: name for the rally test
196     # :return: void
197     #
198
199     logger.info('starting {} test ...'.format(test_name))
200
201     task_file = '{}task.yaml'.format(SCENARIOS_DIR)
202     if not os.path.exists(task_file):
203         logger.error("Task file '%s' does not exist." % task_file)
204         exit(-1)
205
206     test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/", test_name)
207     if not os.path.exists(test_file_name):
208         logger.error("The scenario '%s' does not exist." % test_file_name)
209         exit(-1)
210
211     logger.debug('Scenario fetched from : {}'.format(test_file_name))
212
213     cmd_line = "rally task start --abort-on-sla-failure " + \
214                "--task {} ".format(task_file) + \
215                "--task-args \"{}\" ".format(build_task_args(test_name))
216     logger.debug('running command line : {}'.format(cmd_line))
217
218     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, stderr=RALLY_STDERR, shell=True)
219     result = ""
220     while p.poll() is None:
221         #l = p.stdout.readline()
222         #line = l.replace('\n', '')
223         line = p.stdout.readline()
224         if "Load duration" in line or \
225             "started" in line or \
226             "finished" in line or \
227             " Preparing" in line or \
228             "+-" in line or \
229             "|" in line:
230             result += line
231         elif "test scenario" in line:
232             result += "\n" + line
233         elif "Full duration" in line:
234             result += line + "\n\n"
235
236     logger.info("\n" + result)
237     task_id = get_task_id(result)
238     logger.debug('task_id : {}'.format(task_id))
239
240     if task_id is None:
241         logger.error("failed to retrieve task_id")
242         exit(-1)
243
244     # check for result directory and create it otherwise
245     if not os.path.exists(RESULTS_DIR):
246         logger.debug('does not exists, we create it'.format(RESULTS_DIR))
247         os.makedirs(RESULTS_DIR)
248
249     # write html report file
250     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
251     cmd_line = "rally task report {} --out {}".format(task_id,
252                                                       report_file_name)
253
254     logger.debug('running command line : {}'.format(cmd_line))
255     os.popen(cmd_line)
256
257     # get and save rally operation JSON result
258     cmd_line = "rally task results %s" % task_id
259     logger.debug('running command line : {}'.format(cmd_line))
260     cmd = os.popen(cmd_line)
261     json_results = cmd.read()
262     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
263         logger.debug('saving json file')
264         f.write(json_results)
265
266     with open('{}opnfv-{}.json'
267               .format(RESULTS_DIR, test_name)) as json_file:
268         json_data = json.load(json_file)
269
270     # Push results in payload of testcase
271     if args.report:
272         logger.debug("Push result into DB")
273         push_results_to_db(json_data)
274
275     """ parse JSON operation result """
276     if task_succeed(json_results):
277         logger.info("Test OK.")
278     else:
279         logger.info("Test Failed.")
280
281
282 def main():
283     # configure script
284     if not (args.test_name in tests):
285         logger.error('argument not valid')
286         exit(-1)
287
288     creds_nova = functest_utils.get_credentials("nova")
289     nova_client = novaclient.Client('2',**creds_nova)
290     creds_neutron = functest_utils.get_credentials("neutron")
291     neutron_client = neutronclient.Client(**creds_neutron)
292     creds_keystone = functest_utils.get_credentials("keystone")
293     keystone_client = keystoneclient.Client(**creds_keystone)
294     glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
295                                                    endpoint_type='publicURL')
296     glance_client = glanceclient.Client(1, glance_endpoint,
297                                         token=keystone_client.auth_token)
298
299     client_dict['neutron'] = neutron_client
300
301     image_id = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
302
303     if image_id == '':
304         logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, \
305                                                            GLANCE_IMAGE_PATH))
306         image_id = functest_utils.create_glance_image(glance_client,\
307                                                 GLANCE_IMAGE_NAME,GLANCE_IMAGE_PATH)
308         if not image_id:
309             logger.error("Failed to create the Glance image...")
310             exit(-1)
311         else:
312             logger.debug("Image '%s' with ID '%s' created succesfully ." \
313                          % (GLANCE_IMAGE_NAME, image_id))
314     else:
315         logger.debug("Using existing image '%s' with ID '%s'..." \
316                      % (GLANCE_IMAGE_NAME,image_id))
317
318     if args.test_name == "all":
319         for test_name in tests:
320             if not (test_name == 'all' or
321                     test_name == 'vm'):
322                 run_task(test_name)
323     else:
324         print(args.test_name)
325         run_task(args.test_name)
326
327     logger.debug("Deleting image '%s' with ID '%s'..." \
328                          % (GLANCE_IMAGE_NAME, image_id))
329     if not functest_utils.delete_glance_image(nova_client, image_id):
330         logger.error("Error deleting the glance image")
331
332 if __name__ == '__main__':
333     main()