Reduce the number of iterations to ten in rally scenarios
[functest.git] / testcases / VIM / OpenStack / CI / libraries / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16
17 import re
18 import json
19 import os
20 import argparse
21 import logging
22 import yaml
23 import requests
24 import subprocess
25 import sys
26 from novaclient import client as novaclient
27 from glanceclient import client as glanceclient
28 from keystoneclient.v2_0 import client as keystoneclient
29 from neutronclient.v2_0 import client as neutronclient
30 from cinderclient import client as cinderclient
31
32 """ tests configuration """
33 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
34          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
35 parser = argparse.ArgumentParser()
36 parser.add_argument("test_name",
37                     help="Module name to be tested. "
38                          "Possible values are : "
39                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
40                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
41                          "{d[10]} ] "
42                          "The 'all' value "
43                          "performs all possible test scenarios"
44                          .format(d=tests))
45
46 parser.add_argument("-d", "--debug", help="Debug mode",  action="store_true")
47 parser.add_argument("-r", "--report",
48                     help="Create json result file",
49                     action="store_true")
50 parser.add_argument("-s", "--smoke",
51                     help="Smoke test mode",
52                     action="store_true")
53 parser.add_argument("-v", "--verbose",
54                     help="Print verbose info about the progress",
55                     action="store_true")
56
57 args = parser.parse_args()
58
59 client_dict = {}
60
61 if args.verbose:
62     RALLY_STDERR = subprocess.STDOUT
63 else:
64     RALLY_STDERR = open(os.devnull, 'w')
65
66 """ logging configuration """
67 logger = logging.getLogger("run_rally")
68 logger.setLevel(logging.DEBUG)
69
70 ch = logging.StreamHandler()
71 if args.debug:
72     ch.setLevel(logging.DEBUG)
73 else:
74     ch.setLevel(logging.INFO)
75
76 formatter = logging.Formatter("%(asctime)s - %(name)s - "
77                               "%(levelname)s - %(message)s")
78 ch.setFormatter(formatter)
79 logger.addHandler(ch)
80
81 REPO_PATH=os.environ['repos_dir']+'/functest/'
82 if not os.path.exists(REPO_PATH):
83     logger.error("Functest repository directory not found '%s'" % REPO_PATH)
84     exit(-1)
85 sys.path.append(REPO_PATH + "testcases/")
86 import functest_utils
87
88 with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
89     functest_yaml = yaml.safe_load(f)
90 f.close()
91
92 HOME = os.environ['HOME']+"/"
93 ####todo:
94 #SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
95 #    get("directories").get("dir_rally_scn")
96 SCENARIOS_DIR = REPO_PATH + "testcases/VIM/OpenStack/CI/rally_cert/"
97 ###
98 TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates"
99 SUPPORT_DIR = SCENARIOS_DIR + "scenario/support"
100 ###todo:
101 FLAVOR_NAME = "m1.tiny"
102 USERS_AMOUNT = 2
103 TENANTS_AMOUNT = 3
104 ITERATIONS_AMOUNT = 10
105 CONCURRENCY = 4
106
107 ###
108 RESULTS_DIR = functest_yaml.get("general").get("directories"). \
109     get("dir_rally_res")
110 TEST_DB = functest_yaml.get("results").get("test_db_url")
111 FLOATING_NETWORK = functest_yaml.get("general"). \
112     get("openstack").get("neutron_public_net_name")
113 PRIVATE_NETWORK = functest_yaml.get("general"). \
114     get("openstack").get("neutron_private_net_name")
115
116 GLANCE_IMAGE_NAME = functest_yaml.get("general"). \
117     get("openstack").get("image_name")
118 GLANCE_IMAGE_FILENAME = functest_yaml.get("general"). \
119     get("openstack").get("image_file_name")
120 GLANCE_IMAGE_FORMAT = functest_yaml.get("general"). \
121     get("openstack").get("image_disk_format")
122 GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
123     get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
124
125 CINDER_VOLUME_TYPE_NAME = "volume_test"
126
127
128 def push_results_to_db(payload):
129
130     url = TEST_DB + "/results"
131     installer = functest_utils.get_installer_type(logger)
132     scenario = functest_utils.get_scenario(logger)
133     pod_name = functest_utils.get_pod_name(logger)
134     # TODO pod_name hardcoded, info shall come from Jenkins
135     params = {"project_name": "functest", "case_name": "Rally",
136               "pod_name": pod_name, "installer": installer,
137               "version": scenario, "details": payload}
138
139     headers = {'Content-Type': 'application/json'}
140     r = requests.post(url, data=json.dumps(params), headers=headers)
141     logger.debug(r)
142
143
144 def get_task_id(cmd_raw):
145     """
146     get task id from command rally result
147     :param cmd_raw:
148     :return: task_id as string
149     """
150     taskid_re = re.compile('^Task +(.*): started$')
151     for line in cmd_raw.splitlines(True):
152         line = line.strip()
153         match = taskid_re.match(line)
154         if match:
155             return match.group(1)
156     return None
157
158
159 def task_succeed(json_raw):
160     """
161     Parse JSON from rally JSON results
162     :param json_raw:
163     :return: Bool
164     """
165     rally_report = json.loads(json_raw)
166     for report in rally_report:
167         if report is None or report.get('result') is None:
168             return False
169
170         for result in report.get('result'):
171             if result is None or len(result.get('error')) > 0:
172                 return False
173
174     return True
175
176
177 def build_task_args(test_file_name):
178     task_args = {'service_list': [test_file_name]}
179     task_args['smoke'] = args.smoke
180     task_args['image_name'] = GLANCE_IMAGE_NAME
181     task_args['flavor_name'] = FLAVOR_NAME
182     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
183     task_args['floating_network'] = FLOATING_NETWORK
184     task_args['netid'] = functest_utils.get_network_id(client_dict['neutron'],
185                                     PRIVATE_NETWORK).encode('ascii', 'ignore')
186     task_args['tmpl_dir'] = TEMPLATE_DIR
187     task_args['sup_dir'] = SUPPORT_DIR
188     task_args['users_amount'] = USERS_AMOUNT
189     task_args['tenants_amount'] = TENANTS_AMOUNT
190     task_args['iterations'] = ITERATIONS_AMOUNT
191     task_args['concurrency'] = CONCURRENCY
192
193     return task_args
194
195
196 def get_output(proc):
197     result = ""
198     if args.verbose:
199         while proc.poll() is None:
200             line = proc.stdout.readline()
201             print line.replace('\n', '')
202             result += line
203     else:
204         while proc.poll() is None:
205             line = proc.stdout.readline()
206             if "Load duration" in line or \
207                "started" in line or \
208                "finished" in line or \
209                " Preparing" in line or \
210                "+-" in line or \
211                "|" in line:
212                 result += line
213             elif "test scenario" in line:
214                 result += "\n" + line
215             elif "Full duration" in line:
216                 result += line + "\n\n"
217         logger.info("\n" + result)
218
219     return result
220
221
222 def run_task(test_name):
223     #
224     # the "main" function of the script who launch rally for a task
225     # :param test_name: name for the rally test
226     # :return: void
227     #
228
229     logger.info('Starting test scenario "{}" ...'.format(test_name))
230
231     task_file = '{}task.yaml'.format(SCENARIOS_DIR)
232     if not os.path.exists(task_file):
233         logger.error("Task file '%s' does not exist." % task_file)
234         exit(-1)
235
236     test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/", test_name)
237     if not os.path.exists(test_file_name):
238         logger.error("The scenario '%s' does not exist." % test_file_name)
239         exit(-1)
240
241     logger.debug('Scenario fetched from : {}'.format(test_file_name))
242
243     cmd_line = "rally task start --abort-on-sla-failure " + \
244                "--task {} ".format(task_file) + \
245                "--task-args \"{}\" ".format(build_task_args(test_name))
246     logger.debug('running command line : {}'.format(cmd_line))
247
248     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, stderr=RALLY_STDERR, shell=True)
249     output = get_output(p)
250     task_id = get_task_id(output)
251     logger.debug('task_id : {}'.format(task_id))
252
253     if task_id is None:
254         logger.error("failed to retrieve task_id")
255         exit(-1)
256
257     # check for result directory and create it otherwise
258     if not os.path.exists(RESULTS_DIR):
259         logger.debug('does not exists, we create it'.format(RESULTS_DIR))
260         os.makedirs(RESULTS_DIR)
261
262     # write html report file
263     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
264     cmd_line = "rally task report {} --out {}".format(task_id,
265                                                       report_file_name)
266
267     logger.debug('running command line : {}'.format(cmd_line))
268     os.popen(cmd_line)
269
270     # get and save rally operation JSON result
271     cmd_line = "rally task results %s" % task_id
272     logger.debug('running command line : {}'.format(cmd_line))
273     cmd = os.popen(cmd_line)
274     json_results = cmd.read()
275     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
276         logger.debug('saving json file')
277         f.write(json_results)
278
279     with open('{}opnfv-{}.json'
280               .format(RESULTS_DIR, test_name)) as json_file:
281         json_data = json.load(json_file)
282
283     # Push results in payload of testcase
284     if args.report:
285         logger.debug("Push result into DB")
286         push_results_to_db(json_data)
287
288     """ parse JSON operation result """
289     if task_succeed(json_results):
290         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
291     else:
292         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
293
294
295 def main():
296     # configure script
297     if not (args.test_name in tests):
298         logger.error('argument not valid')
299         exit(-1)
300
301     creds_nova = functest_utils.get_credentials("nova")
302     nova_client = novaclient.Client('2',**creds_nova)
303     creds_neutron = functest_utils.get_credentials("neutron")
304     neutron_client = neutronclient.Client(**creds_neutron)
305     creds_keystone = functest_utils.get_credentials("keystone")
306     keystone_client = keystoneclient.Client(**creds_keystone)
307     glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
308                                                    endpoint_type='publicURL')
309     glance_client = glanceclient.Client(1, glance_endpoint,
310                                         token=keystone_client.auth_token)
311     creds_cinder = functest_utils.get_credentials("cinder")
312     cinder_client = cinderclient.Client('2',creds_cinder['username'],
313                                         creds_cinder['api_key'],
314                                         creds_cinder['project_id'],
315                                         creds_cinder['auth_url'],
316                                         service_type="volume")
317
318     client_dict['neutron'] = neutron_client
319
320     volume_types = functest_utils.list_volume_types(cinder_client, private=False)
321     if not volume_types:
322         volume_type = functest_utils.create_volume_type(cinder_client, \
323                                                         CINDER_VOLUME_TYPE_NAME)
324         if not volume_type:
325             logger.error("Failed to create volume type...")
326             exit(-1)
327         else:
328             logger.debug("Volume type '%s' created succesfully..." \
329                          % CINDER_VOLUME_TYPE_NAME)
330     else:
331         logger.debug("Using existing volume type(s)...")
332
333     image_id = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
334
335     if image_id == '':
336         logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, \
337                                                            GLANCE_IMAGE_PATH))
338         image_id = functest_utils.create_glance_image(glance_client,\
339                                                 GLANCE_IMAGE_NAME,GLANCE_IMAGE_PATH)
340         if not image_id:
341             logger.error("Failed to create the Glance image...")
342             exit(-1)
343         else:
344             logger.debug("Image '%s' with ID '%s' created succesfully ." \
345                          % (GLANCE_IMAGE_NAME, image_id))
346     else:
347         logger.debug("Using existing image '%s' with ID '%s'..." \
348                      % (GLANCE_IMAGE_NAME,image_id))
349
350     if args.test_name == "all":
351         for test_name in tests:
352             if not (test_name == 'all' or
353                     test_name == 'vm'):
354                 run_task(test_name)
355     else:
356         print(args.test_name)
357         run_task(args.test_name)
358
359     logger.debug("Deleting image '%s' with ID '%s'..." \
360                          % (GLANCE_IMAGE_NAME, image_id))
361     if not functest_utils.delete_glance_image(nova_client, image_id):
362         logger.error("Error deleting the glance image")
363
364     if not volume_types:
365         logger.debug("Deleting volume type '%s'..." \
366                              % CINDER_VOLUME_TYPE_NAME)
367         if not functest_utils.delete_volume_type(cinder_client, volume_type):
368             logger.error("Error in deleting volume type...")
369
370
371 if __name__ == '__main__':
372     main()