1 # -*- coding: utf-8 -*-
3 # Licensed under the Apache License, Version 2.0 (the "License"); you may
4 # not use this file except in compliance with the License. You may obtain
5 # a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 # License for the specific language governing permissions and limitations
15 """Executing test of plugins"""
18 from keystoneclient.v3 import client
26 from opnfv.deployment import factory
29 GNOCCHI_NAME = 'gnocchi'
30 ID_RSA_SRC = '/root/.ssh/id_rsa'
31 ID_RSA_DST_DIR = '/home/opnfv/.ssh'
32 ID_RSA_DST = ID_RSA_DST_DIR + '/id_rsa'
33 APEX_IP = subprocess.check_output("echo $INSTALLER_IP", shell=True)
35 APEX_USER_STACK = 'stack'
36 APEX_PKEY = '/root/.ssh/id_rsa'
39 class KeystoneException(Exception):
40 """Keystone exception class"""
41 def __init__(self, message, exc=None, response=None):
44 message -- error message
49 message += "\nReason: %s" % exc
50 super(KeystoneException, self).__init__(message)
52 self.response = response
56 class InvalidResponse(KeystoneException):
57 """Invalid Keystone exception class"""
58 def __init__(self, exc, response):
64 super(InvalidResponse, self).__init__(
65 "Invalid response", exc, response)
68 class GnocchiClient(object):
69 # Gnocchi Client to authenticate and request meters
71 self._auth_token = None
72 self._gnocchi_url = None
73 self._meter_list = None
78 return self._auth_token
80 def get_gnocchi_url(self):
82 return self._gnocchi_url
84 def get_gnocchi_metrics(self, criteria=None):
85 # Subject to change if metric gathering is different for gnocchi
86 self._request_meters(criteria)
87 return self._meter_list
89 def _auth_server(self):
90 # Request token in authentication server
91 logger.debug('Connecting to the auth server {}'.format(
92 os.environ['OS_AUTH_URL']))
93 keystone = client.Client(username=os.environ['OS_USERNAME'],
94 password=os.environ['OS_PASSWORD'],
95 tenant_name=os.environ['OS_USERNAME'],
96 auth_url=os.environ['OS_AUTH_URL'])
97 self._auth_token = keystone.auth_token
98 for service in keystone.service_catalog.get_data():
99 if service['name'] == GNOCCHI_NAME:
100 for service_type in service['endpoints']:
101 if service_type['interface'] == 'internal':
102 self._gnocchi_url = service_type['url']
104 if self._gnocchi_url is None:
105 logger.warning('Gnocchi is not registered in service catalog')
107 def _request_meters(self, criteria):
108 """Request meter list values from ceilometer
111 criteria -- criteria for ceilometer meter list
114 url = self._gnocchi_url + ('/v2/metric?limit=400')
116 url = self._gnocchi_url \
117 + ('/v3/metric/%s?q.field=metric&limit=400' % criteria)
118 headers = {'X-Auth-Token': self._auth_token}
119 resp = requests.get(url, headers=headers)
121 resp.raise_for_status()
122 self._meter_list = resp.json()
123 except (KeyError, ValueError, requests.exceptions.HTTPError) as err:
124 raise InvalidResponse(err, resp)
127 class AodhClient(object):
128 # Gnocchi Client to authenticate and request meters
130 self._auth_token = None
131 self._aodh_url = None
132 self._meter_list = None
134 def auth_token(self):
137 return self._auth_token
139 def get_aodh_url(self):
141 return self._gnocchi_url
143 def get_aodh_metrics(self, criteria=None):
144 # Subject to change if metric gathering is different for gnocchi
145 self._request_meters(criteria)
146 return self._meter_list
148 def _auth_server(self):
149 # Request token in authentication server
150 logger.debug('Connecting to the AODH auth server {}'.format(
151 os.environ['OS_AUTH_URL']))
152 keystone = client.Client(username=os.environ['OS_USERNAME'],
153 password=os.environ['OS_PASSWORD'],
154 tenant_name=os.environ['OS_USERNAME'],
155 auth_url=os.environ['OS_AUTH_URL'])
156 self._auth_token = keystone.auth_token
157 for service in keystone.service_catalog.get_data():
158 if service['name'] == AODH_NAME:
159 for service_type in service['endpoints']:
160 if service_type['interface'] == 'internal':
161 self._gnocchi_url = service_type['url']
163 if self._aodh_url is None:
164 logger.warning('Aodh is not registered in service catalog')
167 class SNMPClient(object):
168 """Client to request SNMP meters"""
169 def __init__(self, conf, compute_node):
172 conf -- ConfigServer instance
173 compute_node -- Compute node object
176 self.compute_node = compute_node
178 def get_snmp_metrics(self, compute_node, mib_file, mib_strings):
181 cmd = "snmpwalk -v 2c -c public localhost IF-MIB::interfaces"
182 ip = compute_node.get_ip()
183 snmp_output = self.conf.execute_command(cmd, ip)
185 for mib_string in mib_strings:
186 snmp_output[mib_string] = self.conf.execute_command(
187 "snmpwalk -v2c -m {} -c public localhost {}".format(
188 mib_file, mib_string), compute_node.get_ip())
192 class CSVClient(object):
193 """Client to request CSV meters"""
194 def __init__(self, conf):
197 conf -- ConfigServer instance
202 self, compute_node, plugin_subdirectories, meter_categories):
206 compute_node -- compute node instance
207 plugin_subdirectories -- list of subdirectories of plug-in
208 meter_categories -- categories which will be tested
210 Return list of metrics.
212 stdout = self.conf.execute_command(
213 "date '+%Y-%m-%d'", compute_node.get_ip())
214 date = stdout[0].strip()
216 for plugin_subdir in plugin_subdirectories:
217 for meter_category in meter_categories:
218 stdout = self.conf.execute_command(
219 "tail -2 /var/lib/collectd/csv/"
220 + "{0}.jf.intel.com/{1}/{2}-{3}".format(
221 compute_node.get_name(), plugin_subdir, meter_category,
223 compute_node.get_ip())
224 # Storing last two values
228 'Getting last two CSV entries of meter category '
229 + '{0} in {1} subdir failed'.format(
230 meter_category, plugin_subdir))
232 old_value = int(values[0][0:values[0].index('.')])
233 new_value = int(values[1][0:values[1].index('.')])
235 plugin_subdir, meter_category, old_value, new_value))
239 def get_csv_categories_for_ipmi(conf, compute_node):
243 compute_node -- compute node instance
245 Return list of categories.
247 stdout = conf.execute_command(
248 "date '+%Y-%m-%d'", compute_node.get_ip())
249 date = stdout[0].strip()
250 categories = conf.execute_command(
251 "ls /var/lib/collectd/csv/{0}.jf.intel.com/ipmi | grep {1}".format(
252 compute_node.get_name(), date), compute_node.get_ip())
253 return [category.strip()[:-11] for category in categories]
256 def _process_result(compute_node, test, result, results_list):
257 """Print test result and append it to results list.
260 test -- testcase name
261 result -- boolean test result
262 results_list -- results list
266 'Compute node {0} test case {1} PASSED.'.format(
270 'Compute node {0} test case {1} FAILED.'.format(
272 results_list.append((compute_node, test, result))
275 def _print_label(label):
276 """Print label on the screen
279 label -- label string
281 label = label.strip()
284 label = ' ' + label + ' '
285 length_label = len(label)
286 length1 = (length - length_label) / 2
287 length2 = length - length_label - length1
288 length1 = max(3, length1)
289 length2 = max(3, length2)
290 logger.info(('=' * length1) + label + ('=' * length2))
293 def _print_plugin_label(plugin, node_name):
294 """Print plug-in label.
297 plugin -- plug-in name
301 'Node {0}: Plug-in {1} Test case execution'.format(node_name, plugin))
304 def _print_final_result_of_plugin(
305 plugin, compute_ids, results, out_plugins, out_plugin):
306 """Print final results of plug-in.
309 plugin -- plug-in name
310 compute_ids -- list of compute node IDs
311 results -- results list
312 out_plugins -- list of out plug-ins
313 out_plugin -- used out plug-in
316 for id in compute_ids:
317 if out_plugins[id] == out_plugin:
318 if (id, plugin, True) in results:
319 print_line += ' PASS |'
320 elif (id, plugin, False) in results \
321 and out_plugins[id] == out_plugin:
322 print_line += ' FAIL |'
324 print_line += ' NOT EX |'
325 elif out_plugin == 'Gnocchi':
326 print_line += ' NOT EX |'
328 print_line += ' NOT EX |'
332 def print_overall_summary(compute_ids, tested_plugins, results, out_plugins):
333 """Print overall summary table.
336 compute_ids -- list of compute IDs
337 tested_plugins -- list of plug-ins
338 results -- results list
339 out_plugins -- list of used out plug-ins
341 compute_node_names = ['Node-{}'.format(i) for i in range(
343 # compute_node_names = ['Node-{}'.format(id) for id in compute_ids]
344 all_computes_in_line = ''
345 for compute in compute_node_names:
346 all_computes_in_line += '| ' + compute + (' ' * (7 - len(compute)))
347 line_of_nodes = '| Test ' + all_computes_in_line + '|'
348 logger.info('=' * 70)
349 logger.info('+' + ('-' * ((9 * len(compute_node_names))+16)) + '+')
351 '|' + ' ' * ((9*len(compute_node_names))/2)
354 9*len(compute_node_names) - (9*len(compute_node_names))/2)
357 '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
358 logger.info(line_of_nodes)
360 '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
361 out_plugins_print = ['Gnocchi']
362 if 'SNMP' in out_plugins.values():
363 out_plugins_print.append('SNMP')
364 if 'AODH' in out_plugins.values():
365 out_plugins_print.append('AODH')
366 if 'CSV' in out_plugins.values():
367 out_plugins_print.append('CSV')
368 for out_plugin in out_plugins_print:
369 output_plugins_line = ''
370 for id in compute_ids:
371 out_plugin_result = 'FAIL'
372 if out_plugin == 'Gnocchi':
373 out_plugin_result = \
374 'PASS' if out_plugins[id] == out_plugin else 'FAIL'
375 if out_plugin == 'AODH':
376 if out_plugins[id] == out_plugin:
377 out_plugin_result = \
378 'PASS' if out_plugins[id] == out_plugin else 'FAIL'
379 if out_plugin == 'SNMP':
380 if out_plugins[id] == out_plugin:
381 out_plugin_result = \
382 'PASS' if out_plugins[id] == out_plugin else 'FAIL'
383 if out_plugin == 'CSV':
384 if out_plugins[id] == out_plugin:
385 out_plugin_result = \
387 plugin for comp_id, plugin, res in results
388 if comp_id == id and res] else 'FAIL'
390 out_plugin_result = 'SKIP'
391 output_plugins_line += '| ' + out_plugin_result + ' '
393 '| OUT:{}'.format(out_plugin) + (' ' * (11 - len(out_plugin)))
394 + output_plugins_line + '|')
395 for plugin in sorted(tested_plugins.values()):
396 line_plugin = _print_final_result_of_plugin(
397 plugin, compute_ids, results, out_plugins, out_plugin)
399 '| IN:{}'.format(plugin) + (' ' * (11-len(plugin)))
402 '+' + ('-' * 16) + '+'
403 + (('-' * 8) + '+') * len(compute_node_names))
404 logger.info('=' * 70)
408 test_labels, name, gnocchi_running, aodh_running, snmp_running,
409 controllers, compute_node, conf, results, error_plugins, out_plugins):
410 """Execute the testcase.
413 test_labels -- dictionary of plug-in IDs and their display names
414 name -- plug-in ID, key of test_labels dictionary
415 ceilometer_running -- boolean indicating whether Ceilometer is running
416 compute_node -- compute node ID
417 conf -- ConfigServer instance
418 results -- results list
419 error_plugins -- list of tuples with plug-in errors
420 (plugin, error_description, is_critical):
421 plugin -- plug-in ID, key of test_labels dictionary
422 error_decription -- description of the error
423 is_critical -- boolean value indicating whether error is critical
425 ovs_interfaces = conf.get_ovs_interfaces(compute_node)
426 ovs_configured_interfaces = conf.get_plugin_config_values(
427 compute_node, 'ovs_events', 'Interfaces')
428 ovs_configured_bridges = conf.get_plugin_config_values(
429 compute_node, 'ovs_stats', 'Bridges')
430 ovs_existing_configured_int = [
431 interface for interface in ovs_interfaces
432 if interface in ovs_configured_interfaces]
433 ovs_existing_configured_bridges = [
434 bridge for bridge in ovs_interfaces
435 if bridge in ovs_configured_bridges]
436 plugin_prerequisites = {
438 conf.is_libpqos_on_node(compute_node),
439 'libpqos must be installed.')],
441 conf.is_installed(compute_node, 'mcelog'),
442 'mcelog must be installed.')],
444 len(ovs_existing_configured_int) > 0 or len(ovs_interfaces) > 0,
445 'Interfaces must be configured.')],
447 len(ovs_existing_configured_bridges) > 0,
448 'Bridges must be configured.')]}
449 gnocchi_criteria_lists = {
450 'hugepages': ['hugepages'],
451 'mcelog': ['mcelog'],
452 'ovs_events': ['interface-ovs-system'],
453 'ovs_stats': ['ovs_stats-br0.br0']}
454 aodh_criteria_lists = {
455 'mcelog': ['mcelog.errors'],
456 'ovs_events': ['ovs_events.gauge']}
458 'intel_rdt': '/usr/share/snmp/mibs/Intel-Rdt.txt',
459 'hugepages': '/usr/share/snmp/mibs/Intel-Hugepages.txt',
460 'mcelog': '/usr/share/snmp/mibs/Intel-Mcelog.txt'}
463 'INTEL-RDT-MIB::rdtLlc.1',
464 'INTEL-RDT-MIB::rdtIpc.1',
465 'INTEL-RDT-MIB::rdtMbmRemote.1',
466 'INTEL-RDT-MIB::rdtMbmLocal.1'],
468 'INTEL-HUGEPAGES-MIB::hugepagesPageFree'],
470 'INTEL-MCELOG-MIB::memoryCorrectedErrors.1',
471 'INTEL-MCELOG-MIB::memoryCorrectedErrors.2']}
472 nr_hugepages = int(time.time()) % 10000
475 'hugepages': 'echo {} > /sys/kernel/'.format(nr_hugepages)
476 + 'mm/hugepages/hugepages-2048kB/nr_hugepages',
477 'mcelog': '/root/mce-inject_df < /root/corrected'}
480 'intel_rdt-{}'.format(core)
481 for core in conf.get_plugin_config_values(
482 compute_node, 'intel_rdt', 'Cores')],
484 'hugepages-mm-2048Kb', 'hugepages-node0-2048Kb',
485 'hugepages-node1-2048Kb', 'hugepages-mm-1048576Kb',
486 'hugepages-node0-1048576Kb', 'hugepages-node1-1048576Kb'],
489 'mcelog-SOCKET_0_CHANNEL_0_DIMM_any',
490 'mcelog-SOCKET_0_CHANNEL_any_DIMM_any'],
492 'ovs_stats-{0}.{0}'.format(interface)
493 for interface in ovs_existing_configured_bridges],
495 'ovs_events-{}'.format(interface)
497 ovs_existing_configured_int
498 if len(ovs_existing_configured_int) > 0 else ovs_interfaces)]}
499 csv_meter_categories_ipmi = get_csv_categories_for_ipmi(conf, compute_node)
500 csv_meter_categories = {
502 'bytes-llc', 'ipc', 'memory_bandwidth-local',
503 'memory_bandwidth-remote'],
504 'hugepages': ['vmpage_number-free', 'vmpage_number-used'],
505 'ipmi': csv_meter_categories_ipmi,
507 'errors-corrected_memory_errors',
508 'errors-uncorrected_memory_errors',
509 'errors-corrected_memory_errors_in_24h',
510 'errors-uncorrected_memory_errors_in_24h'],
512 'if_collisions', 'if_dropped', 'if_errors', 'if_packets',
513 'if_rx_errors-crc', 'if_rx_errors-frame', 'if_rx_errors-over',
514 'if_rx_octets', 'if_tx_octets'],
515 'ovs_events': ['gauge-link_status']}
518 test_labels[name] if name in test_labels else name,
519 compute_node.get_name())
520 plugin_critical_errors = [
521 error for plugin, error, critical in error_plugins
522 if plugin == name and critical]
523 if plugin_critical_errors:
524 logger.error('Following critical errors occurred:'.format(name))
525 for error in plugin_critical_errors:
526 logger.error(' * ' + error)
528 compute_node.get_id(), test_labels[name], False, results)
531 error for plugin, error, critical in error_plugins
532 if plugin == name and not critical]
534 logger.warning('Following non-critical errors occured:')
535 for error in plugin_errors:
536 logger.warning(' * ' + error)
537 failed_prerequisites = []
538 if name in plugin_prerequisites:
539 failed_prerequisites = [
540 prerequisite_name for prerequisite_passed,
541 prerequisite_name in plugin_prerequisites[name]
542 if not prerequisite_passed]
543 if failed_prerequisites:
545 '{} test will not be executed, '.format(name)
546 + 'following prerequisites failed:')
547 for prerequisite in failed_prerequisites:
548 logger.error(' * {}'.format(prerequisite))
551 plugin_interval = conf.get_plugin_interval(compute_node, name)
552 res = conf.test_plugins_with_gnocchi(
553 compute_node.get_id(), plugin_interval, logger,
554 criteria_list=gnocchi_criteria_lists[name])
556 res = conf.test_plugins_with_aodh(
557 compute_node.get_id(), plugin_interval,
558 logger, creteria_list=aodh_criteria_lists[name])
561 name in snmp_mib_files and name in snmp_mib_strings \
562 and tests.test_snmp_sends_data(
564 conf.get_plugin_interval(compute_node, name), logger,
565 SNMPClient(conf, compute_node), snmp_mib_files[name],
566 snmp_mib_strings[name], snmp_in_commands[name], conf)
568 res = tests.test_csv_handles_plugin_data(
569 compute_node, conf.get_plugin_interval(compute_node, name),
570 name, csv_subdirs[name], csv_meter_categories[name],
571 logger, CSVClient(conf))
572 if res and plugin_errors:
574 'Test works, but will be reported as failure,'
575 + 'because of non-critical errors.')
578 compute_node.get_id(), test_labels[name], res, results)
581 def get_results_for_ovs_events(
582 plugin_labels, plugin_name, gnocchi_running,
583 compute_node, conf, results, error_plugins):
584 """ Testing OVS Events with python plugin
586 plugin_label = 'OVS events'
587 res = conf.enable_ovs_events(
588 compute_node, plugin_label, error_plugins, create_backup=False)
590 compute_node.get_id(), plugin_label, res, results)
591 logger.info("Results for OVS Events = {}" .format(results))
594 def mcelog_install():
595 """Install mcelog on compute nodes."""
596 _print_label('Enabling mcelog on compute nodes')
597 handler = factory.Factory.get_handler('apex',
601 nodes = handler.get_nodes()
603 if node.is_compute():
604 centos_release = node.run_cmd('uname -r')
605 if '3.10.0-514.26.2.el7.x86_64' not in centos_release:
607 'Mcelog will not be enabled '
608 + 'on node-{0}, '.format(node.get_dict()['id'])
609 + 'unsupported CentOS release found ({1}).'.format(
613 'Checking if mcelog is enabled'
614 + ' on node-{}...'.format(node.get_dict()['id']))
615 res = node.run_cmd('ls')
616 if 'mce-inject_ea' and 'corrected' in res:
618 'Mcelog seems to be already installed '
619 + 'on node-{}.'.format(node.get_dict()['id']))
620 node.run_cmd('modprobe mce-inject_ea')
621 node.run_cmd('mce-inject_ea < corrected')
624 'Mcelog will be enabled on node-{}...'.format(
625 node.get_dict()['id']))
627 '/usr/local/lib/python2.7/dist-packages/baro_tests/'
628 + 'mce-inject_ea', 'mce-inject_ea')
629 node.run_cmd('chmod a+x mce-inject_ea')
630 node.run_cmd('echo "CPU 0 BANK 0" > corrected')
632 'echo "STATUS 0xcc00008000010090" >>'
635 'echo "ADDR 0x0010FFFFFFF" >> corrected')
636 node.run_cmd('modprobe mce-inject')
637 node.run_cmd('mce-inject_ea < corrected')
638 logger.info('Mcelog is installed on all compute nodes')
642 """Uninstall mcelog from compute nodes."""
643 handler = factory.Factory.get_handler(
644 'apex', APEX_IP, APEX_USER, APEX_PKEY)
645 nodes = handler.get_nodes()
647 if node.is_compute():
648 output = node.run_cmd('ls')
649 if 'mce-inject_ea' in output:
650 node.run_cmd('rm mce-inject_ea')
651 if 'corrected' in output:
652 node.run_cmd('rm corrected')
653 node.run_cmd('systemctl restart mcelog')
654 logger.info('Mcelog is deleted from all compute nodes')
658 if not os.path.isdir(ID_RSA_DST_DIR):
659 os.makedirs(ID_RSA_DST_DIR)
660 if not os.path.isfile(ID_RSA_DST):
662 "RSA key file {} doesn't exist".format(ID_RSA_DST)
663 + ", it will be downloaded from installer node.")
664 handler = factory.Factory.get_handler(
665 'apex', APEX_IP, APEX_USER, APEX_PKEY)
666 apex = handler.get_installer_node()
667 apex.get_file(ID_RSA_SRC, ID_RSA_DST)
669 logger.info("RSA key file {} exists.".format(ID_RSA_DST))
673 """Check whether there is global logger available and if not, define one."""
674 if 'logger' not in globals():
676 logger = logger.Logger("barometercollectd").getLogger()
679 def main(bt_logger=None):
680 """Check each compute node sends gnocchi metrics.
683 bt_logger -- logger instance
685 logging.getLogger("paramiko").setLevel(logging.WARNING)
686 logging.getLogger("stevedore").setLevel(logging.WARNING)
687 logging.getLogger("opnfv.deployment.manager").setLevel(logging.WARNING)
688 if bt_logger is None:
693 _print_label("Starting barometer tests suite")
695 conf = config_server.ConfigServer(APEX_IP, APEX_USER, logger)
696 controllers = conf.get_controllers()
697 if len(controllers) == 0:
698 logger.error('No controller nodes found!')
700 computes = conf.get_computes()
701 if len(computes) == 0:
702 logger.error('No compute nodes found!')
706 'Display of Control and Compute nodes available in the set up')
707 logger.info('controllers: {}'.format([('{0}: {1} ({2})'.format(
708 node.get_id(), node.get_name(),
709 node.get_ip())) for node in controllers]))
710 logger.info('computes: {}'.format([('{0}: {1} ({2})'.format(
711 node.get_id(), node.get_name(), node.get_ip()))
712 for node in computes]))
715 gnocchi_running_on_con = False
716 aodh_running_on_con = False
718 _print_label('Testing Gnocchi, AODH and SNMP on controller nodes')
720 for controller in controllers:
721 gnocchi_client = GnocchiClient()
722 gnocchi_client.auth_token()
724 gnocchi_running_on_con and conf.is_gnocchi_running(controller))
725 aodh_client = AodhClient()
726 aodh_client.auth_token()
728 aodh_running_on_con and conf.is_aodh_running(controller))
730 logger.info("Gnocchi is running on controller.")
732 logger.error("Gnocchi is not running on controller.")
733 logger.info("AODH is running on controller.")
735 logger.error("Gnocchi is not running on Controller")
736 logger.error("AODH is not running on controller.")
737 logger.info("SNMP is running on controller.")
739 logger.error("Gnocchi is not running on Controller")
740 logger.error("AODH is not running on controller.")
741 logger.error("SNMP is not running on controller.")
742 logger.info("CSV will be enabled on compute nodes.")
745 compute_node_names = []
748 'intel_rdt': 'Intel RDT',
749 'hugepages': 'Hugepages',
752 'ovs_stats': 'OVS stats',
753 'ovs_events': 'OVS events'}
755 'gnocchi': 'Gnocchi',
759 for compute_node in computes:
760 node_id = compute_node.get_id()
761 node_name = compute_node.get_name()
762 out_plugins[node_id] = 'CSV'
763 compute_ids.append(node_id)
764 compute_node_names.append(node_name)
765 plugins_to_enable = []
766 _print_label('NODE {}: Test Gnocchi Plug-in'.format(node_name))
767 logger.info('Checking if gnocchi plug-in is included in compute nodes.')
768 if not conf.check_gnocchi_plugin_included(compute_node):
769 logger.error('Gnocchi plug-in is not included.')
771 'Testcases on node {} will not be executed'.format(node_name))
773 collectd_restarted, collectd_warnings = \
774 conf.restart_collectd(compute_node)
777 'Sleeping for {} seconds after collectd restart...'.format(
779 time.sleep(sleep_time)
780 if not collectd_restarted:
781 for warning in collectd_warnings:
782 logger.warning(warning)
784 'Restart of collectd on node {} failed'.format(node_name))
786 'Testcases on node {} will not be executed'.format(
789 for warning in collectd_warnings:
790 logger.warning(warning)
793 out_plugins[node_id] = 'Gnocchi'
794 logger.info("Gnocchi is active and collecting data")
796 out_plugins[node_id] = 'AODH'
797 logger.info("AODH withh be tested")
798 _print_label('Node {}: Test AODH' .format(node_name))
799 logger.info("Checking if AODH is running")
800 logger.info("AODH is running")
802 out_plugins[node_id] = 'SNMP'
803 logger.info("SNMP will be tested.")
804 _print_label('NODE {}: Test SNMP'.format(node_id))
805 logger.info("Checking if SNMP is running.")
806 logger.info("SNMP is running.")
808 plugins_to_enable.append('csv')
809 out_plugins[node_id] = 'CSV'
810 logger.error("Gnocchi, AODH, SNMP are not running")
812 "CSV will be enabled for verification "
813 + "of test plugins.")
814 if plugins_to_enable:
816 'NODE {}: Enabling Test Plug-in '.format(node_name)
817 + 'and Test case execution')
819 if plugins_to_enable and not conf.enable_plugins(
820 compute_node, plugins_to_enable, error_plugins,
821 create_backup=False):
823 'Failed to test plugins on node {}.'.format(node_id))
825 'Testcases on node {} will not be executed'.format(
828 if plugins_to_enable:
829 collectd_restarted, collectd_warnings = \
830 conf.restart_collectd(compute_node)
833 'Sleeping for {} seconds'.format(sleep_time)
834 + ' after collectd restart...')
835 time.sleep(sleep_time)
836 if plugins_to_enable and not collectd_restarted:
837 for warning in collectd_warnings:
838 logger.warning(warning)
840 'Restart of collectd on node {} failed'.format(
843 'Testcases on node {}'.format(node_id)
844 + ' will not be executed.')
846 if collectd_warnings:
847 for warning in collectd_warnings:
848 logger.warning(warning)
850 for plugin_name in sorted(plugin_labels.keys()):
852 plugin_labels, plugin_name, gnocchi_running,
853 aodh_running, snmp_running, controllers,
854 compute_node, conf, results, error_plugins,
855 out_plugins[node_id])
857 _print_label('NODE {}: Restoring config file'.format(node_name))
858 conf.restore_config(compute_node)
860 print_overall_summary(compute_ids, plugin_labels, results, out_plugins)
862 if ((len([res for res in results if not res[2]]) > 0)
863 or (len(results) < len(computes) * len(plugin_labels))):
864 logger.error('Some tests have failed or have not been executed')
869 if __name__ == '__main__':