Put scenario in version field on the Test DB (rather that git indication)
[functest.git] / testcases / VIM / OpenStack / CI / libraries / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16
17 import re
18 import json
19 import os
20 import argparse
21 import logging
22 import yaml
23 import requests
24 import subprocess
25 import sys
26 from novaclient import client as novaclient
27 from glanceclient import client as glanceclient
28 from keystoneclient.v2_0 import client as keystoneclient
29 from neutronclient.v2_0 import client as neutronclient
30
31 """ tests configuration """
32 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
33          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
34 parser = argparse.ArgumentParser()
35 parser.add_argument("test_name",
36                     help="Module name to be tested. "
37                          "Possible values are : "
38                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
39                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
40                          "{d[10]} ] "
41                          "The 'all' value "
42                          "performs all possible test scenarios"
43                          .format(d=tests))
44
45 parser.add_argument("-d", "--debug", help="Debug mode",  action="store_true")
46 parser.add_argument("-r", "--report",
47                     help="Create json result file",
48                     action="store_true")
49 parser.add_argument("-s", "--smoke",
50                     help="Smoke test mode",
51                     action="store_true")
52 parser.add_argument("-v", "--verbose",
53                     help="Print verbose info about the progress",
54                     action="store_true")
55
56 args = parser.parse_args()
57
58 client_dict = {}
59
60 if args.verbose:
61     RALLY_STDERR = subprocess.STDOUT
62 else:
63     RALLY_STDERR = open(os.devnull, 'w')
64
65 """ logging configuration """
66 logger = logging.getLogger("run_rally")
67 logger.setLevel(logging.DEBUG)
68
69 ch = logging.StreamHandler()
70 if args.debug:
71     ch.setLevel(logging.DEBUG)
72 else:
73     ch.setLevel(logging.INFO)
74
75 formatter = logging.Formatter("%(asctime)s - %(name)s - "
76                               "%(levelname)s - %(message)s")
77 ch.setFormatter(formatter)
78 logger.addHandler(ch)
79
80 REPO_PATH=os.environ['repos_dir']+'/functest/'
81 if not os.path.exists(REPO_PATH):
82     logger.error("Functest repository directory not found '%s'" % REPO_PATH)
83     exit(-1)
84 sys.path.append(REPO_PATH + "testcases/")
85 import functest_utils
86
87 with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
88     functest_yaml = yaml.safe_load(f)
89 f.close()
90
91 HOME = os.environ['HOME']+"/"
92 ####todo:
93 #SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
94 #    get("directories").get("dir_rally_scn")
95 SCENARIOS_DIR = REPO_PATH + "testcases/VIM/OpenStack/CI/rally_cert/"
96 ###
97 TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates"
98 SUPPORT_DIR = SCENARIOS_DIR + "scenario/support"
99 ###todo:
100 FLAVOR_NAME = "m1.tiny"
101 USERS_AMOUNT = 2
102 TENANTS_AMOUNT = 3
103 CONTROLLERS_AMOUNT = 2
104 ###
105 RESULTS_DIR = functest_yaml.get("general").get("directories"). \
106     get("dir_rally_res")
107 TEST_DB = functest_yaml.get("results").get("test_db_url")
108 FLOATING_NETWORK = functest_yaml.get("general"). \
109     get("openstack").get("neutron_public_net_name")
110 FLOATING_SUBNET_CIDR = functest_yaml.get("general"). \
111     get("openstack").get("neutron_public_subnet_cidr")
112 PRIVATE_NETWORK = functest_yaml.get("general"). \
113     get("openstack").get("neutron_private_net_name")
114
115 GLANCE_IMAGE_NAME = functest_yaml.get("general"). \
116     get("openstack").get("image_name")
117 GLANCE_IMAGE_FILENAME = functest_yaml.get("general"). \
118     get("openstack").get("image_file_name")
119 GLANCE_IMAGE_FORMAT = functest_yaml.get("general"). \
120     get("openstack").get("image_disk_format")
121 GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
122     get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
123
124
125 def push_results_to_db(payload):
126
127     url = TEST_DB + "/results"
128     installer = functest_utils.get_installer_type(logger)
129     scenario = functest_utils.get_scenario(logger)
130     pod_name = functest_utils.get_pod_name(logger)
131     # TODO pod_name hardcoded, info shall come from Jenkins
132     params = {"project_name": "functest", "case_name": "Rally",
133               "pod_name": pod_name, "installer": installer,
134               "version": scenario, "details": payload}
135
136     headers = {'Content-Type': 'application/json'}
137     r = requests.post(url, data=json.dumps(params), headers=headers)
138     logger.debug(r)
139
140
141 def get_task_id(cmd_raw):
142     """
143     get task id from command rally result
144     :param cmd_raw:
145     :return: task_id as string
146     """
147     taskid_re = re.compile('^Task +(.*): started$')
148     for line in cmd_raw.splitlines(True):
149         line = line.strip()
150         match = taskid_re.match(line)
151         if match:
152             return match.group(1)
153     return None
154
155
156 def task_succeed(json_raw):
157     """
158     Parse JSON from rally JSON results
159     :param json_raw:
160     :return: Bool
161     """
162     rally_report = json.loads(json_raw)
163     rally_report = rally_report[0]
164     if rally_report is None:
165         return False
166     if rally_report.get('result') is None:
167         return False
168
169     for result in rally_report.get('result'):
170         if len(result.get('error')) > 0:
171             return False
172
173     return True
174
175
176 def build_task_args(test_file_name):
177     task_args = {'service_list': [test_file_name]}
178     task_args['smoke'] = args.smoke
179     task_args['image_name'] = GLANCE_IMAGE_NAME
180     task_args['flavor_name'] = FLAVOR_NAME
181     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
182     task_args['floating_network'] = FLOATING_NETWORK
183     task_args['floating_subnet_cidr'] = FLOATING_SUBNET_CIDR
184     task_args['netid'] = functest_utils.get_network_id(client_dict['neutron'],
185                                     PRIVATE_NETWORK).encode('ascii', 'ignore')
186     task_args['tmpl_dir'] = TEMPLATE_DIR
187     task_args['sup_dir'] = SUPPORT_DIR
188     task_args['users_amount'] = USERS_AMOUNT
189     task_args['tenants_amount'] = TENANTS_AMOUNT
190     task_args['controllers_amount'] = CONTROLLERS_AMOUNT
191
192     return task_args
193
194
195 def run_task(test_name):
196     #
197     # the "main" function of the script who launch rally for a task
198     # :param test_name: name for the rally test
199     # :return: void
200     #
201
202     logger.info('starting {} test ...'.format(test_name))
203
204     task_file = '{}task.yaml'.format(SCENARIOS_DIR)
205     if not os.path.exists(task_file):
206         logger.error("Task file '%s' does not exist." % task_file)
207         exit(-1)
208
209     test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/", test_name)
210     if not os.path.exists(test_file_name):
211         logger.error("The scenario '%s' does not exist." % test_file_name)
212         exit(-1)
213
214     logger.debug('Scenario fetched from : {}'.format(test_file_name))
215
216     cmd_line = "rally task start --abort-on-sla-failure " + \
217                "--task {} ".format(task_file) + \
218                "--task-args \"{}\" ".format(build_task_args(test_name))
219     logger.debug('running command line : {}'.format(cmd_line))
220
221     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, stderr=RALLY_STDERR, shell=True)
222     result = ""
223     while p.poll() is None:
224         l = p.stdout.readline()
225         print l.replace('\n', '')
226         result += l
227
228     task_id = get_task_id(result)
229     logger.debug('task_id : {}'.format(task_id))
230
231     if task_id is None:
232         logger.error("failed to retrieve task_id")
233         exit(-1)
234
235     # check for result directory and create it otherwise
236     if not os.path.exists(RESULTS_DIR):
237         logger.debug('does not exists, we create it'.format(RESULTS_DIR))
238         os.makedirs(RESULTS_DIR)
239
240     # write html report file
241     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
242     cmd_line = "rally task report {} --out {}".format(task_id,
243                                                       report_file_name)
244
245     logger.debug('running command line : {}'.format(cmd_line))
246     os.popen(cmd_line)
247
248     # get and save rally operation JSON result
249     cmd_line = "rally task results %s" % task_id
250     logger.debug('running command line : {}'.format(cmd_line))
251     cmd = os.popen(cmd_line)
252     json_results = cmd.read()
253     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
254         logger.debug('saving json file')
255         f.write(json_results)
256
257     with open('{}opnfv-{}.json'
258               .format(RESULTS_DIR, test_name)) as json_file:
259         json_data = json.load(json_file)
260
261     # Push results in payload of testcase
262     if args.report:
263         logger.debug("Push result into DB")
264         push_results_to_db(json_data)
265
266     """ parse JSON operation result """
267     if task_succeed(json_results):
268         print 'Test OK'
269     else:
270         print 'Test KO'
271
272
273 def main():
274     # configure script
275     if not (args.test_name in tests):
276         logger.error('argument not valid')
277         exit(-1)
278
279     creds_nova = functest_utils.get_credentials("nova")
280     nova_client = novaclient.Client('2',**creds_nova)
281     creds_neutron = functest_utils.get_credentials("neutron")
282     neutron_client = neutronclient.Client(**creds_neutron)
283     creds_keystone = functest_utils.get_credentials("keystone")
284     keystone_client = keystoneclient.Client(**creds_keystone)
285     glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
286                                                    endpoint_type='publicURL')
287     glance_client = glanceclient.Client(1, glance_endpoint,
288                                         token=keystone_client.auth_token)
289
290     client_dict['neutron'] = neutron_client
291
292     image_id = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
293
294     if image_id == '':
295         logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, \
296                                                            GLANCE_IMAGE_PATH))
297         image_id = functest_utils.create_glance_image(glance_client,\
298                                                 GLANCE_IMAGE_NAME,GLANCE_IMAGE_PATH)
299         if not image_id:
300             logger.error("Failed to create the Glance image...")
301             exit(-1)
302         else:
303             logger.debug("Image '%s' with ID '%s' created succesfully ." \
304                          % (GLANCE_IMAGE_NAME, image_id))
305     else:
306         logger.debug("Using existing image '%s' with ID '%s'..." \
307                      % (GLANCE_IMAGE_NAME,image_id))
308
309     if args.test_name == "all":
310         for test_name in tests:
311             if not (test_name == 'all' or
312                     test_name == 'vm'):
313                 print(test_name)
314                 run_task(test_name)
315     else:
316         print(args.test_name)
317         run_task(args.test_name)
318
319     logger.debug("Deleting image '%s' with ID '%s'..." \
320                          % (GLANCE_IMAGE_NAME, image_id))
321     if not functest_utils.delete_glance_image(nova_client, image_id):
322         logger.error("Error deleting the glance image")
323
324 if __name__ == '__main__':
325     main()