Merge "Add script to generate the OpenStack defaults"
[functest.git] / testcases / VIM / OpenStack / CI / libraries / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16
17 import re
18 import json
19 import os
20 import argparse
21 import logging
22 import yaml
23 import requests
24 import subprocess
25 import sys
26 from novaclient import client as novaclient
27 from glanceclient import client as glanceclient
28 from keystoneclient.v2_0 import client as keystoneclient
29 from neutronclient.v2_0 import client as neutronclient
30 from cinderclient import client as cinderclient
31
32 """ tests configuration """
33 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
34          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
35 parser = argparse.ArgumentParser()
36 parser.add_argument("test_name",
37                     help="Module name to be tested. "
38                          "Possible values are : "
39                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
40                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
41                          "{d[10]} ] "
42                          "The 'all' value "
43                          "performs all possible test scenarios"
44                          .format(d=tests))
45
46 parser.add_argument("-d", "--debug", help="Debug mode",  action="store_true")
47 parser.add_argument("-r", "--report",
48                     help="Create json result file",
49                     action="store_true")
50 parser.add_argument("-s", "--smoke",
51                     help="Smoke test mode",
52                     action="store_true")
53 parser.add_argument("-v", "--verbose",
54                     help="Print verbose info about the progress",
55                     action="store_true")
56
57 args = parser.parse_args()
58
59 client_dict = {}
60
61 if args.verbose:
62     RALLY_STDERR = subprocess.STDOUT
63 else:
64     RALLY_STDERR = open(os.devnull, 'w')
65
66 """ logging configuration """
67 logger = logging.getLogger("run_rally")
68 logger.setLevel(logging.DEBUG)
69
70 ch = logging.StreamHandler()
71 if args.debug:
72     ch.setLevel(logging.DEBUG)
73 else:
74     ch.setLevel(logging.INFO)
75
76 formatter = logging.Formatter("%(asctime)s - %(name)s - "
77                               "%(levelname)s - %(message)s")
78 ch.setFormatter(formatter)
79 logger.addHandler(ch)
80
81 REPO_PATH=os.environ['repos_dir']+'/functest/'
82 if not os.path.exists(REPO_PATH):
83     logger.error("Functest repository directory not found '%s'" % REPO_PATH)
84     exit(-1)
85 sys.path.append(REPO_PATH + "testcases/")
86 import functest_utils
87
88 with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
89     functest_yaml = yaml.safe_load(f)
90 f.close()
91
92 HOME = os.environ['HOME']+"/"
93 ####todo:
94 #SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
95 #    get("directories").get("dir_rally_scn")
96 SCENARIOS_DIR = REPO_PATH + "testcases/VIM/OpenStack/CI/rally_cert/"
97 ###
98 TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates"
99 SUPPORT_DIR = SCENARIOS_DIR + "scenario/support"
100 ###todo:
101 FLAVOR_NAME = "m1.tiny"
102 USERS_AMOUNT = 2
103 TENANTS_AMOUNT = 3
104 CONTROLLERS_AMOUNT = 2
105 ###
106 RESULTS_DIR = functest_yaml.get("general").get("directories"). \
107     get("dir_rally_res")
108 TEST_DB = functest_yaml.get("results").get("test_db_url")
109 FLOATING_NETWORK = functest_yaml.get("general"). \
110     get("openstack").get("neutron_public_net_name")
111 PRIVATE_NETWORK = functest_yaml.get("general"). \
112     get("openstack").get("neutron_private_net_name")
113
114 GLANCE_IMAGE_NAME = functest_yaml.get("general"). \
115     get("openstack").get("image_name")
116 GLANCE_IMAGE_FILENAME = functest_yaml.get("general"). \
117     get("openstack").get("image_file_name")
118 GLANCE_IMAGE_FORMAT = functest_yaml.get("general"). \
119     get("openstack").get("image_disk_format")
120 GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
121     get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
122
123 CINDER_VOLUME_TYPE_NAME = "volume_test"
124
125
126 def push_results_to_db(payload):
127
128     url = TEST_DB + "/results"
129     installer = functest_utils.get_installer_type(logger)
130     scenario = functest_utils.get_scenario(logger)
131     pod_name = functest_utils.get_pod_name(logger)
132     # TODO pod_name hardcoded, info shall come from Jenkins
133     params = {"project_name": "functest", "case_name": "Rally",
134               "pod_name": pod_name, "installer": installer,
135               "version": scenario, "details": payload}
136
137     headers = {'Content-Type': 'application/json'}
138     r = requests.post(url, data=json.dumps(params), headers=headers)
139     logger.debug(r)
140
141
142 def get_task_id(cmd_raw):
143     """
144     get task id from command rally result
145     :param cmd_raw:
146     :return: task_id as string
147     """
148     taskid_re = re.compile('^Task +(.*): started$')
149     for line in cmd_raw.splitlines(True):
150         line = line.strip()
151         match = taskid_re.match(line)
152         if match:
153             return match.group(1)
154     return None
155
156
157 def task_succeed(json_raw):
158     """
159     Parse JSON from rally JSON results
160     :param json_raw:
161     :return: Bool
162     """
163     rally_report = json.loads(json_raw)
164     rally_report = rally_report[0]
165     if rally_report is None:
166         return False
167     if rally_report.get('result') is None:
168         return False
169
170     for result in rally_report.get('result'):
171         if len(result.get('error')) > 0:
172             return False
173
174     return True
175
176
177 def build_task_args(test_file_name):
178     task_args = {'service_list': [test_file_name]}
179     task_args['smoke'] = args.smoke
180     task_args['image_name'] = GLANCE_IMAGE_NAME
181     task_args['flavor_name'] = FLAVOR_NAME
182     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
183     task_args['floating_network'] = FLOATING_NETWORK
184     task_args['netid'] = functest_utils.get_network_id(client_dict['neutron'],
185                                     PRIVATE_NETWORK).encode('ascii', 'ignore')
186     task_args['tmpl_dir'] = TEMPLATE_DIR
187     task_args['sup_dir'] = SUPPORT_DIR
188     task_args['users_amount'] = USERS_AMOUNT
189     task_args['tenants_amount'] = TENANTS_AMOUNT
190     task_args['controllers_amount'] = CONTROLLERS_AMOUNT
191
192     return task_args
193
194
195 def get_output(proc):
196     result = ""
197     if args.verbose:
198         while proc.poll() is None:
199             line = proc.stdout.readline()
200             print line.replace('\n', '')
201             result += line
202     else:
203         while proc.poll() is None:
204             line = proc.stdout.readline()
205             if "Load duration" in line or \
206                "started" in line or \
207                "finished" in line or \
208                " Preparing" in line or \
209                "+-" in line or \
210                "|" in line:
211                 result += line
212             elif "test scenario" in line:
213                 result += "\n" + line
214             elif "Full duration" in line:
215                 result += line + "\n\n"
216         logger.info("\n" + result)
217
218     return result
219
220
221 def run_task(test_name):
222     #
223     # the "main" function of the script who launch rally for a task
224     # :param test_name: name for the rally test
225     # :return: void
226     #
227
228     logger.info('Starting test scenario "{}" ...'.format(test_name))
229
230     task_file = '{}task.yaml'.format(SCENARIOS_DIR)
231     if not os.path.exists(task_file):
232         logger.error("Task file '%s' does not exist." % task_file)
233         exit(-1)
234
235     test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/", test_name)
236     if not os.path.exists(test_file_name):
237         logger.error("The scenario '%s' does not exist." % test_file_name)
238         exit(-1)
239
240     logger.debug('Scenario fetched from : {}'.format(test_file_name))
241
242     cmd_line = "rally task start --abort-on-sla-failure " + \
243                "--task {} ".format(task_file) + \
244                "--task-args \"{}\" ".format(build_task_args(test_name))
245     logger.debug('running command line : {}'.format(cmd_line))
246
247     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, stderr=RALLY_STDERR, shell=True)
248     output = get_output(p)
249     task_id = get_task_id(output)
250     logger.debug('task_id : {}'.format(task_id))
251
252     if task_id is None:
253         logger.error("failed to retrieve task_id")
254         exit(-1)
255
256     # check for result directory and create it otherwise
257     if not os.path.exists(RESULTS_DIR):
258         logger.debug('does not exists, we create it'.format(RESULTS_DIR))
259         os.makedirs(RESULTS_DIR)
260
261     # write html report file
262     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
263     cmd_line = "rally task report {} --out {}".format(task_id,
264                                                       report_file_name)
265
266     logger.debug('running command line : {}'.format(cmd_line))
267     os.popen(cmd_line)
268
269     # get and save rally operation JSON result
270     cmd_line = "rally task results %s" % task_id
271     logger.debug('running command line : {}'.format(cmd_line))
272     cmd = os.popen(cmd_line)
273     json_results = cmd.read()
274     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
275         logger.debug('saving json file')
276         f.write(json_results)
277
278     with open('{}opnfv-{}.json'
279               .format(RESULTS_DIR, test_name)) as json_file:
280         json_data = json.load(json_file)
281
282     # Push results in payload of testcase
283     if args.report:
284         logger.debug("Push result into DB")
285         push_results_to_db(json_data)
286
287     """ parse JSON operation result """
288     if task_succeed(json_results):
289         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
290     else:
291         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
292
293
294 def main():
295     # configure script
296     if not (args.test_name in tests):
297         logger.error('argument not valid')
298         exit(-1)
299
300     creds_nova = functest_utils.get_credentials("nova")
301     nova_client = novaclient.Client('2',**creds_nova)
302     creds_neutron = functest_utils.get_credentials("neutron")
303     neutron_client = neutronclient.Client(**creds_neutron)
304     creds_keystone = functest_utils.get_credentials("keystone")
305     keystone_client = keystoneclient.Client(**creds_keystone)
306     glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
307                                                    endpoint_type='publicURL')
308     glance_client = glanceclient.Client(1, glance_endpoint,
309                                         token=keystone_client.auth_token)
310     creds_cinder = functest_utils.get_credentials("cinder")
311     cinder_client = cinderclient.Client('2',creds_cinder['username'],
312                                         creds_cinder['api_key'],
313                                         creds_cinder['project_id'],
314                                         creds_cinder['auth_url'],
315                                         service_type="volume")
316
317     client_dict['neutron'] = neutron_client
318
319     volume_types = functest_utils.list_volume_types(cinder_client, private=False)
320     if not volume_types:
321         volume_type = functest_utils.create_volume_type(cinder_client, \
322                                                         CINDER_VOLUME_TYPE_NAME)
323         if not volume_type:
324             logger.error("Failed to create volume type...")
325             exit(-1)
326         else:
327             logger.debug("Volume type '%s' created succesfully..." \
328                          % CINDER_VOLUME_TYPE_NAME)
329     else:
330         logger.debug("Using existing volume type(s)...")
331
332     image_id = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
333
334     if image_id == '':
335         logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, \
336                                                            GLANCE_IMAGE_PATH))
337         image_id = functest_utils.create_glance_image(glance_client,\
338                                                 GLANCE_IMAGE_NAME,GLANCE_IMAGE_PATH)
339         if not image_id:
340             logger.error("Failed to create the Glance image...")
341             exit(-1)
342         else:
343             logger.debug("Image '%s' with ID '%s' created succesfully ." \
344                          % (GLANCE_IMAGE_NAME, image_id))
345     else:
346         logger.debug("Using existing image '%s' with ID '%s'..." \
347                      % (GLANCE_IMAGE_NAME,image_id))
348
349     if args.test_name == "all":
350         for test_name in tests:
351             if not (test_name == 'all' or
352                     test_name == 'vm'):
353                 run_task(test_name)
354     else:
355         print(args.test_name)
356         run_task(args.test_name)
357
358     logger.debug("Deleting image '%s' with ID '%s'..." \
359                          % (GLANCE_IMAGE_NAME, image_id))
360     if not functest_utils.delete_glance_image(nova_client, image_id):
361         logger.error("Error deleting the glance image")
362
363     if not volume_types:
364         logger.debug("Deleting volume type '%s'..." \
365                              % CINDER_VOLUME_TYPE_NAME)
366         if not functest_utils.delete_volume_type(cinder_client, volume_type):
367             logger.error("Error in deleting volume type...")
368
369
370 if __name__ == '__main__':
371     main()