d77cd408348ce2ca384cfee9b29341fd3c899c72
[functest.git] / testcases / VIM / OpenStack / CI / libraries / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16
17 import re
18 import json
19 import os
20 import argparse
21 import logging
22 import yaml
23 import requests
24 import subprocess
25 import sys
26 from novaclient import client as novaclient
27 from glanceclient import client as glanceclient
28 from keystoneclient.v2_0 import client as keystoneclient
29 from neutronclient.v2_0 import client as neutronclient
30 from cinderclient import client as cinderclient
31
32 """ tests configuration """
33 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
34          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
35 parser = argparse.ArgumentParser()
36 parser.add_argument("test_name",
37                     help="Module name to be tested. "
38                          "Possible values are : "
39                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
40                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
41                          "{d[10]} ] "
42                          "The 'all' value "
43                          "performs all possible test scenarios"
44                          .format(d=tests))
45
46 parser.add_argument("-d", "--debug", help="Debug mode",  action="store_true")
47 parser.add_argument("-r", "--report",
48                     help="Create json result file",
49                     action="store_true")
50 parser.add_argument("-s", "--smoke",
51                     help="Smoke test mode",
52                     action="store_true")
53 parser.add_argument("-v", "--verbose",
54                     help="Print verbose info about the progress",
55                     action="store_true")
56
57 args = parser.parse_args()
58
59 client_dict = {}
60
61 if args.verbose:
62     RALLY_STDERR = subprocess.STDOUT
63 else:
64     RALLY_STDERR = open(os.devnull, 'w')
65
66 """ logging configuration """
67 logger = logging.getLogger("run_rally")
68 logger.setLevel(logging.DEBUG)
69
70 ch = logging.StreamHandler()
71 if args.debug:
72     ch.setLevel(logging.DEBUG)
73 else:
74     ch.setLevel(logging.INFO)
75
76 formatter = logging.Formatter("%(asctime)s - %(name)s - "
77                               "%(levelname)s - %(message)s")
78 ch.setFormatter(formatter)
79 logger.addHandler(ch)
80
81 REPO_PATH=os.environ['repos_dir']+'/functest/'
82 if not os.path.exists(REPO_PATH):
83     logger.error("Functest repository directory not found '%s'" % REPO_PATH)
84     exit(-1)
85 sys.path.append(REPO_PATH + "testcases/")
86 import functest_utils
87
88 with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
89     functest_yaml = yaml.safe_load(f)
90 f.close()
91
92 HOME = os.environ['HOME']+"/"
93 ####todo:
94 #SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
95 #    get("directories").get("dir_rally_scn")
96 SCENARIOS_DIR = REPO_PATH + "testcases/VIM/OpenStack/CI/rally_cert/"
97 ###
98 TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates"
99 SUPPORT_DIR = SCENARIOS_DIR + "scenario/support"
100 ###todo:
101 FLAVOR_NAME = "m1.tiny"
102 USERS_AMOUNT = 2
103 TENANTS_AMOUNT = 3
104 CONTROLLERS_AMOUNT = 2
105 ###
106 RESULTS_DIR = functest_yaml.get("general").get("directories"). \
107     get("dir_rally_res")
108 TEST_DB = functest_yaml.get("results").get("test_db_url")
109 FLOATING_NETWORK = functest_yaml.get("general"). \
110     get("openstack").get("neutron_public_net_name")
111 PRIVATE_NETWORK = functest_yaml.get("general"). \
112     get("openstack").get("neutron_private_net_name")
113
114 GLANCE_IMAGE_NAME = functest_yaml.get("general"). \
115     get("openstack").get("image_name")
116 GLANCE_IMAGE_FILENAME = functest_yaml.get("general"). \
117     get("openstack").get("image_file_name")
118 GLANCE_IMAGE_FORMAT = functest_yaml.get("general"). \
119     get("openstack").get("image_disk_format")
120 GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
121     get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
122
123 CINDER_VOLUME_TYPE_NAME = "volume_test"
124
125
126 def push_results_to_db(payload):
127
128     url = TEST_DB + "/results"
129     installer = functest_utils.get_installer_type(logger)
130     scenario = functest_utils.get_scenario(logger)
131     pod_name = functest_utils.get_pod_name(logger)
132     # TODO pod_name hardcoded, info shall come from Jenkins
133     params = {"project_name": "functest", "case_name": "Rally",
134               "pod_name": pod_name, "installer": installer,
135               "version": scenario, "details": payload}
136
137     headers = {'Content-Type': 'application/json'}
138     r = requests.post(url, data=json.dumps(params), headers=headers)
139     logger.debug(r)
140
141
142 def get_task_id(cmd_raw):
143     """
144     get task id from command rally result
145     :param cmd_raw:
146     :return: task_id as string
147     """
148     taskid_re = re.compile('^Task +(.*): started$')
149     for line in cmd_raw.splitlines(True):
150         line = line.strip()
151         match = taskid_re.match(line)
152         if match:
153             return match.group(1)
154     return None
155
156
157 def task_succeed(json_raw):
158     """
159     Parse JSON from rally JSON results
160     :param json_raw:
161     :return: Bool
162     """
163     rally_report = json.loads(json_raw)
164     rally_report = rally_report[0]
165     if rally_report is None:
166         return False
167     if rally_report.get('result') is None:
168         return False
169
170     for result in rally_report.get('result'):
171         if len(result.get('error')) > 0:
172             return False
173
174     return True
175
176
177 def build_task_args(test_file_name):
178     task_args = {'service_list': [test_file_name]}
179     task_args['smoke'] = args.smoke
180     task_args['image_name'] = GLANCE_IMAGE_NAME
181     task_args['flavor_name'] = FLAVOR_NAME
182     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
183     task_args['floating_network'] = FLOATING_NETWORK
184     task_args['netid'] = functest_utils.get_network_id(client_dict['neutron'],
185                                     PRIVATE_NETWORK).encode('ascii', 'ignore')
186     task_args['tmpl_dir'] = TEMPLATE_DIR
187     task_args['sup_dir'] = SUPPORT_DIR
188     task_args['users_amount'] = USERS_AMOUNT
189     task_args['tenants_amount'] = TENANTS_AMOUNT
190     task_args['controllers_amount'] = CONTROLLERS_AMOUNT
191
192     return task_args
193
194
195 def run_task(test_name):
196     #
197     # the "main" function of the script who launch rally for a task
198     # :param test_name: name for the rally test
199     # :return: void
200     #
201
202     logger.info('starting {} test ...'.format(test_name))
203
204     task_file = '{}task.yaml'.format(SCENARIOS_DIR)
205     if not os.path.exists(task_file):
206         logger.error("Task file '%s' does not exist." % task_file)
207         exit(-1)
208
209     test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/", test_name)
210     if not os.path.exists(test_file_name):
211         logger.error("The scenario '%s' does not exist." % test_file_name)
212         exit(-1)
213
214     logger.debug('Scenario fetched from : {}'.format(test_file_name))
215
216     cmd_line = "rally task start --abort-on-sla-failure " + \
217                "--task {} ".format(task_file) + \
218                "--task-args \"{}\" ".format(build_task_args(test_name))
219     logger.debug('running command line : {}'.format(cmd_line))
220
221     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, stderr=RALLY_STDERR, shell=True)
222     result = ""
223     while p.poll() is None:
224         #l = p.stdout.readline()
225         #line = l.replace('\n', '')
226         line = p.stdout.readline()
227         if "Load duration" in line or \
228             "started" in line or \
229             "finished" in line or \
230             " Preparing" in line or \
231             "+-" in line or \
232             "|" in line:
233             result += line
234         elif "test scenario" in line:
235             result += "\n" + line
236         elif "Full duration" in line:
237             result += line + "\n\n"
238
239     logger.info("\n" + result)
240     task_id = get_task_id(result)
241     logger.debug('task_id : {}'.format(task_id))
242
243     if task_id is None:
244         logger.error("failed to retrieve task_id")
245         exit(-1)
246
247     # check for result directory and create it otherwise
248     if not os.path.exists(RESULTS_DIR):
249         logger.debug('does not exists, we create it'.format(RESULTS_DIR))
250         os.makedirs(RESULTS_DIR)
251
252     # write html report file
253     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
254     cmd_line = "rally task report {} --out {}".format(task_id,
255                                                       report_file_name)
256
257     logger.debug('running command line : {}'.format(cmd_line))
258     os.popen(cmd_line)
259
260     # get and save rally operation JSON result
261     cmd_line = "rally task results %s" % task_id
262     logger.debug('running command line : {}'.format(cmd_line))
263     cmd = os.popen(cmd_line)
264     json_results = cmd.read()
265     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
266         logger.debug('saving json file')
267         f.write(json_results)
268
269     with open('{}opnfv-{}.json'
270               .format(RESULTS_DIR, test_name)) as json_file:
271         json_data = json.load(json_file)
272
273     # Push results in payload of testcase
274     if args.report:
275         logger.debug("Push result into DB")
276         push_results_to_db(json_data)
277
278     """ parse JSON operation result """
279     if task_succeed(json_results):
280         logger.info('{} test OK.'.format(test_name) + "\n")
281     else:
282         logger.info('{} test Failed.'.format(test_name) + "\n")
283
284
285 def main():
286     # configure script
287     if not (args.test_name in tests):
288         logger.error('argument not valid')
289         exit(-1)
290
291     creds_nova = functest_utils.get_credentials("nova")
292     nova_client = novaclient.Client('2',**creds_nova)
293     creds_neutron = functest_utils.get_credentials("neutron")
294     neutron_client = neutronclient.Client(**creds_neutron)
295     creds_keystone = functest_utils.get_credentials("keystone")
296     keystone_client = keystoneclient.Client(**creds_keystone)
297     glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
298                                                    endpoint_type='publicURL')
299     glance_client = glanceclient.Client(1, glance_endpoint,
300                                         token=keystone_client.auth_token)
301     creds_cinder = functest_utils.get_credentials("cinder")
302     cinder_client = cinderclient.Client('2',creds_cinder['username'],
303                                         creds_cinder['api_key'],
304                                         creds_cinder['project_id'],
305                                         creds_cinder['auth_url'],
306                                         service_type="volume")
307
308     client_dict['neutron'] = neutron_client
309
310     volume_types = functest_utils.list_volume_types(cinder_client, private=False)
311     if not volume_types:
312         volume_type = functest_utils.create_volume_type(cinder_client, \
313                                                         CINDER_VOLUME_TYPE_NAME)
314         if not volume_type:
315             logger.error("Failed to create volume type...")
316             exit(-1)
317         else:
318             logger.debug("Volume type '%s' created succesfully..." \
319                          % CINDER_VOLUME_TYPE_NAME)
320     else:
321         logger.debug("Using existing volume type(s)...")
322
323     image_id = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
324
325     if image_id == '':
326         logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, \
327                                                            GLANCE_IMAGE_PATH))
328         image_id = functest_utils.create_glance_image(glance_client,\
329                                                 GLANCE_IMAGE_NAME,GLANCE_IMAGE_PATH)
330         if not image_id:
331             logger.error("Failed to create the Glance image...")
332             exit(-1)
333         else:
334             logger.debug("Image '%s' with ID '%s' created succesfully ." \
335                          % (GLANCE_IMAGE_NAME, image_id))
336     else:
337         logger.debug("Using existing image '%s' with ID '%s'..." \
338                      % (GLANCE_IMAGE_NAME,image_id))
339
340     if args.test_name == "all":
341         for test_name in tests:
342             if not (test_name == 'all' or
343                     test_name == 'vm'):
344                 run_task(test_name)
345     else:
346         print(args.test_name)
347         run_task(args.test_name)
348
349     logger.debug("Deleting image '%s' with ID '%s'..." \
350                          % (GLANCE_IMAGE_NAME, image_id))
351     if not functest_utils.delete_glance_image(nova_client, image_id):
352         logger.error("Error deleting the glance image")
353
354     if not volume_types:
355         logger.debug("Deleting volume type '%s'..." \
356                              % CINDER_VOLUME_TYPE_NAME)
357         if not functest_utils.delete_volume_type(cinder_client, volume_type):
358             logger.error("Error in deleting volume type...")
359
360
361 if __name__ == '__main__':
362     main()