1 # -*- coding: utf-8 -*-
3 # Licensed under the Apache License, Version 2.0 (the "License"); you may
4 # not use this file except in compliance with the License. You may obtain
5 # a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 # License for the specific language governing permissions and limitations
15 """Executing test of plugins"""
18 from keystoneclient.v3 import client
26 from opnfv.deployment import factory
29 GNOCCHI_NAME = 'gnocchi'
30 ID_RSA_SRC = '/root/.ssh/id_rsa'
31 ID_RSA_DST_DIR = '/root/.ssh'
32 ID_RSA_DST = ID_RSA_DST_DIR + '/id_rsa'
33 APEX_IP = subprocess.check_output("echo $INSTALLER_IP", shell=True)
35 APEX_USER_STACK = 'stack'
36 APEX_PKEY = '/root/.ssh/id_rsa'
39 class KeystoneException(Exception):
40 """Keystone exception class"""
41 def __init__(self, message, exc=None, response=None):
44 message -- error message
49 message += "\nReason: %s" % exc
50 super(KeystoneException, self).__init__(message)
52 self.response = response
56 class InvalidResponse(KeystoneException):
57 """Invalid Keystone exception class"""
58 def __init__(self, exc, response):
64 super(InvalidResponse, self).__init__(
65 "Invalid response", exc, response)
69 handler = factory.Factory.get_handler('apex',
73 nodes = handler.get_nodes()
77 class GnocchiClient(object):
78 # Gnocchi Client to authenticate and request meters
80 self._auth_token = None
81 self._gnocchi_url = None
82 self._meter_list = None
87 return self._auth_token
89 def get_gnocchi_url(self):
91 return self._gnocchi_url
93 def get_gnocchi_metrics(self, criteria=None):
94 # Subject to change if metric gathering is different for gnocchi
95 self._request_meters(criteria)
96 return self._meter_list
98 def _auth_server(self):
99 # Request token in authentication server
100 logger.debug('Connecting to the auth server {}'.format(
101 os.environ['OS_AUTH_URL']))
102 keystone = client.Client(username=os.environ['OS_USERNAME'],
103 password=os.environ['OS_PASSWORD'],
104 tenant_name=os.environ['OS_USERNAME'],
105 auth_url=os.environ['OS_AUTH_URL'])
106 self._auth_token = keystone.auth_token
107 for service in keystone.service_catalog.get_data():
108 if service['name'] == GNOCCHI_NAME:
109 for service_type in service['endpoints']:
110 if service_type['interface'] == 'internal':
111 self._gnocchi_url = service_type['url']
113 if self._gnocchi_url is None:
114 logger.warning('Gnocchi is not registered in service catalog')
116 def _request_meters(self, criteria):
117 """Request meter list values from ceilometer
120 criteria -- criteria for ceilometer meter list
123 url = self._gnocchi_url + ('/v2/metric?limit=400')
125 url = self._gnocchi_url \
126 + ('/v3/metric/%s?q.field=metric&limit=400' % criteria)
127 headers = {'X-Auth-Token': self._auth_token}
128 resp = requests.get(url, headers=headers)
130 resp.raise_for_status()
131 self._meter_list = resp.json()
132 except (KeyError, ValueError, requests.exceptions.HTTPError) as err:
133 raise InvalidResponse(err, resp)
136 class AodhClient(object):
137 # Gnocchi Client to authenticate and request meters
139 self._auth_token = None
140 self._aodh_url = None
141 self._meter_list = None
143 def auth_token(self):
146 return self._auth_token
148 def get_aodh_url(self):
150 return self._gnocchi_url
152 def get_aodh_metrics(self, criteria=None):
153 # Subject to change if metric gathering is different for gnocchi
154 self._request_meters(criteria)
155 return self._meter_list
157 def _auth_server(self):
158 # Request token in authentication server
159 logger.debug('Connecting to the AODH auth server {}'.format(
160 os.environ['OS_AUTH_URL']))
161 keystone = client.Client(username=os.environ['OS_USERNAME'],
162 password=os.environ['OS_PASSWORD'],
163 tenant_name=os.environ['OS_USERNAME'],
164 auth_url=os.environ['OS_AUTH_URL'])
165 self._auth_token = keystone.auth_token
166 for service in keystone.service_catalog.get_data():
167 if service['name'] == AODH_NAME:
168 for service_type in service['endpoints']:
169 if service_type['interface'] == 'internal':
170 self._gnocchi_url = service_type['url']
172 if self._aodh_url is None:
173 logger.warning('Aodh is not registered in service catalog')
176 class SNMPClient(object):
177 """Client to request SNMP meters"""
178 def __init__(self, conf, compute_node):
181 conf -- ConfigServer instance
182 compute_node -- Compute node object
185 self.compute_node = compute_node
187 def get_snmp_metrics(self, compute_node, mib_file, mib_strings):
190 cmd = "snmpwalk -v 2c -c public localhost IF-MIB::interfaces"
191 ip = compute_node.get_ip()
192 snmp_output = self.conf.execute_command(cmd, ip)
194 for mib_string in mib_strings:
195 snmp_output[mib_string] = self.conf.execute_command(
196 "snmpwalk -v2c -m {} -c public localhost {}".format(
197 mib_file, mib_string), compute_node.get_ip())
201 class CSVClient(object):
202 """Client to request CSV meters"""
203 def __init__(self, conf):
206 conf -- ConfigServer instance
211 self, compute_node, plugin_subdirectories, meter_categories):
215 compute_node -- compute node instance
216 plugin_subdirectories -- list of subdirectories of plug-in
217 meter_categories -- categories which will be tested
219 Return list of metrics.
221 compute_name = compute_node.get_name()
222 nodes = get_apex_nodes()
224 if compute_name == node.get_dict()['name']:
227 hostname = node.run_cmd('hostname -A')
228 hostname = hostname.split()[0]
230 for plugin_subdir in plugin_subdirectories:
231 for meter_category in meter_categories:
232 stdout1 = node.run_cmd(
233 "tail -2 /var/lib/collectd/csv/"
234 + "{0}/{1}/{2}-{3}".format(
235 hostname, plugin_subdir,
236 meter_category, date))
237 stdout2 = node.run_cmd(
238 "tail -1 /var/lib/collectd/csv/"
239 + "{0}.jf.intel.com/{1}/{2}-{3}".format(
240 compute_node.get_name(), plugin_subdir,
241 meter_category, date))
242 # Storing last two values
246 'Getting last two CSV entries of meter category'
247 + ' {0} in {1} subdir failed'.format(
248 meter_category, plugin_subdir))
250 values = values.split(',')
251 old_value = float(values[0])
252 stdout2 = stdout2.split(',')
253 new_value = float(stdout2[0])
255 plugin_subdir, meter_category, old_value,
260 def get_csv_categories_for_ipmi(conf, compute_node):
264 compute_node -- compute node instance
266 Return list of categories.
268 stdout = conf.execute_command(
269 "date '+%Y-%m-%d'", compute_node.get_ip())
270 date = stdout[0].strip()
271 categories = conf.execute_command(
272 "ls /var/lib/collectd/csv/{0}.jf.intel.com/ipmi | grep {1}".format(
273 compute_node.get_name(), date), compute_node.get_ip())
274 return [category.strip()[:-11] for category in categories]
277 def _process_result(compute_node, out_plugin, test, result, results_list):
278 """Print test result and append it to results list.
281 test -- testcase name
282 result -- boolean test result
283 results_list -- results list
287 'Test case {0} PASSED with {1}.'.format(
291 'Test case {0} FAILED with {1}.'.format(
293 results_list.append((compute_node, out_plugin, test, result))
296 def _print_label(label):
297 """Print label on the screen
300 label -- label string
302 label = label.strip()
305 label = ' ' + label + ' '
306 length_label = len(label)
307 length1 = (length - length_label) / 2
308 length2 = length - length_label - length1
309 length1 = max(3, length1)
310 length2 = max(3, length2)
311 logger.info(('=' * length1) + label + ('=' * length2))
314 def _print_plugin_label(plugin, node_name):
315 """Print plug-in label.
318 plugin -- plug-in name
322 'Node {0}: Plug-in {1} Test case execution'.format(node_name, plugin))
325 def _print_final_result_of_plugin(
326 plugin, compute_ids, results, out_plugins, out_plugin):
327 """Print final results of plug-in.
330 plugin -- plug-in name
331 compute_ids -- list of compute node IDs
332 results -- results list
333 out_plugins -- list of out plug-ins
334 out_plugin -- used out plug-in
337 for id in compute_ids:
338 if out_plugin == 'Gnocchi':
339 if (id, out_plugin, plugin, True) in results:
340 print_line += ' PASS |'
341 elif (id, out_plugin, plugin, False) in results:
342 print_line += ' FAIL |'
344 print_line += ' NOT EX |'
345 elif out_plugin == 'AODH':
346 if (id, out_plugin, plugin, True) in results:
347 print_line += ' PASS |'
348 elif (id, out_plugin, plugin, False) in results:
349 print_line += ' FAIL |'
351 print_line += ' NOT EX |'
352 elif out_plugin == 'CSV':
353 if (id, out_plugin, plugin, True) in results:
354 print_line += ' PASS |'
355 elif (id, out_plugin, plugin, False) in results:
356 print_line += ' FAIL |'
358 print_line += ' NOT EX |'
360 print_line += ' SKIP |'
364 def print_overall_summary(
365 compute_ids, tested_plugins, aodh_plugins, results, out_plugins):
366 """Print overall summary table.
369 compute_ids -- list of compute IDs
370 tested_plugins -- list of plug-ins
371 results -- results list
372 out_plugins -- list of used out plug-ins
374 compute_node_names = ['Node-{}'.format(i) for i in range(
376 all_computes_in_line = ''
377 for compute in compute_node_names:
378 all_computes_in_line += '| ' + compute + (' ' * (7 - len(compute)))
379 line_of_nodes = '| Test ' + all_computes_in_line + '|'
380 logger.info('=' * 70)
381 logger.info('+' + ('-' * ((9 * len(compute_node_names))+16)) + '+')
383 '|' + ' ' * ((9*len(compute_node_names))/2)
386 9*len(compute_node_names) - (9*len(compute_node_names))/2)
389 '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
390 logger.info(line_of_nodes)
392 '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
393 out_plugins_print = []
394 out_plugins_print1 = []
395 for key in out_plugins.keys():
396 if 'Gnocchi' in out_plugins[key]:
397 out_plugins_print1.append('Gnocchi')
398 if 'AODH' in out_plugins[key]:
399 out_plugins_print1.append('AODH')
400 if 'SNMP' in out_plugins[key]:
401 out_plugins_print1.append('SNMP')
402 if 'CSV' in out_plugins[key]:
403 out_plugins_print1.append('CSV')
404 for i in out_plugins_print1:
405 if i not in out_plugins_print:
406 out_plugins_print.append(i)
407 for out_plugin in out_plugins_print:
408 output_plugins_line = ''
409 for id in compute_ids:
410 out_plugin_result = '----'
411 if out_plugin == 'Gnocchi':
412 out_plugin_result = \
413 'PASS' if 'Gnocchi' in out_plugins_print else 'FAIL'
414 if out_plugin == 'AODH':
415 out_plugin_result = \
416 'PASS' if out_plugin in out_plugins_print else 'FAIL'
417 if out_plugin == 'SNMP':
418 out_plugin_result = \
420 plugin for comp_id, out_pl, plugin, res in results
421 if comp_id == id and res] else 'FAIL'
422 if out_plugin == 'CSV':
423 out_plugin_result = \
425 plugin for comp_id, out_pl, plugin, res in results
426 if comp_id == id and res] else 'FAIL'
428 out_plugin_result = 'FAIL'
429 output_plugins_line += '| ' + out_plugin_result + ' '
431 '| OUT:{}'.format(out_plugin) + (' ' * (11 - len(out_plugin)))
432 + output_plugins_line + '|')
434 if out_plugin == 'AODH':
435 for plugin in sorted(aodh_plugins.values()):
436 line_plugin = _print_final_result_of_plugin(
437 plugin, compute_ids, results, out_plugins, out_plugin)
439 '| IN:{}'.format(plugin) + (' ' * (11-len(plugin)))
442 for plugin in sorted(tested_plugins.values()):
443 line_plugin = _print_final_result_of_plugin(
444 plugin, compute_ids, results, out_plugins, out_plugin)
446 '| IN:{}'.format(plugin) + (' ' * (11-len(plugin)))
449 '+' + ('-' * 16) + '+'
450 + (('-' * 8) + '+') * len(compute_node_names))
451 logger.info('=' * 70)
455 test_labels, name, out_plugin, controllers, compute_node,
456 conf, results, error_plugins, out_plugins):
457 """Execute the testcase.
460 test_labels -- dictionary of plug-in IDs and their display names
461 name -- plug-in ID, key of test_labels dictionary
462 ceilometer_running -- boolean indicating whether Ceilometer is running
463 compute_node -- compute node ID
464 conf -- ConfigServer instance
465 results -- results list
466 error_plugins -- list of tuples with plug-in errors
467 (plugin, error_description, is_critical):
468 plugin -- plug-in ID, key of test_labels dictionary
469 error_decription -- description of the error
470 is_critical -- boolean value indicating whether error is critical
472 ovs_interfaces = conf.get_ovs_interfaces(compute_node)
473 ovs_configured_interfaces = conf.get_plugin_config_values(
474 compute_node, 'ovs_events', 'Interfaces')
475 ovs_configured_bridges = conf.get_plugin_config_values(
476 compute_node, 'ovs_stats', 'Bridges')
477 ovs_existing_configured_int = [
478 interface for interface in ovs_interfaces
479 if interface in ovs_configured_interfaces]
480 ovs_existing_configured_bridges = [
481 bridge for bridge in ovs_interfaces
482 if bridge in ovs_configured_bridges]
483 plugin_prerequisites = {
485 conf.is_libpqos_on_node(compute_node),
486 'libpqos must be installed.')],
488 conf.is_mcelog_installed(compute_node, 'mcelog'),
489 'mcelog must be installed.')],
491 len(ovs_existing_configured_int) > 0 or len(ovs_interfaces) > 0,
492 'Interfaces must be configured.')],
494 len(ovs_existing_configured_bridges) > 0,
495 'Bridges must be configured.')]}
496 gnocchi_criteria_lists = {
497 'hugepages': 'hugepages',
499 'ovs_events': 'interface-ovs-system',
500 'ovs_stats': 'ovs_stats-br0.br0'}
501 aodh_criteria_lists = {
503 'ovs_events': 'ovs_events'}
505 'intel_rdt': '/usr/share/snmp/mibs/Intel-Rdt.txt',
506 'hugepages': '/usr/share/snmp/mibs/Intel-Hugepages.txt',
507 'mcelog': '/usr/share/snmp/mibs/Intel-Mcelog.txt'}
510 'INTEL-RDT-MIB::rdtLlc.1',
511 'INTEL-RDT-MIB::rdtIpc.1',
512 'INTEL-RDT-MIB::rdtMbmRemote.1',
513 'INTEL-RDT-MIB::rdtMbmLocal.1'],
515 'INTEL-HUGEPAGES-MIB::hugepagesPageFree'],
517 'INTEL-MCELOG-MIB::memoryCorrectedErrors.1',
518 'INTEL-MCELOG-MIB::memoryCorrectedErrors.2']}
519 nr_hugepages = int(time.time()) % 10000
522 'hugepages': 'echo {} > /sys/kernel/'.format(nr_hugepages)
523 + 'mm/hugepages/hugepages-2048kB/nr_hugepages',
524 'mcelog': '/root/mce-inject_df < /root/corrected'}
527 'intel_rdt-{}'.format(core)
528 for core in conf.get_plugin_config_values(
529 compute_node, 'intel_rdt', 'Cores')],
531 'hugepages-mm-2048Kb', 'hugepages-node0-2048Kb',
532 'hugepages-node1-2048Kb'],
535 'mcelog-SOCKET_0_CHANNEL_0_DIMM_any',
536 'mcelog-SOCKET_0_CHANNEL_any_DIMM_any'],
538 'ovs_stats-br0.br0'],
541 # csv_meter_categories_ipmi = get_csv_categories_for_ipmi(conf,
543 csv_meter_categories = {
545 'bytes-llc', 'ipc', 'memory_bandwidth-local',
546 'memory_bandwidth-remote'],
547 'hugepages': ['vmpage_number-free', 'vmpage_number-used'],
548 # 'ipmi': csv_meter_categories_ipmi,
550 'errors-corrected_memory_errors',
551 'errors-uncorrected_memory_errors'],
553 'if_dropped', 'if_errors', 'if_packets'],
554 'ovs_events': ['gauge-link_status']}
557 test_labels[name] if name in test_labels else name,
558 compute_node.get_name())
559 plugin_critical_errors = [
560 error for plugin, error, critical in error_plugins
561 if plugin == name and critical]
562 if plugin_critical_errors:
563 logger.error('Following critical errors occurred:'.format(name))
564 for error in plugin_critical_errors:
565 logger.error(' * ' + error)
567 compute_node.get_id(), out_plugin, test_labels[name], False,
571 error for plugin, error, critical in error_plugins
572 if plugin == name and not critical]
574 logger.warning('Following non-critical errors occured:')
575 for error in plugin_errors:
576 logger.warning(' * ' + error)
577 failed_prerequisites = []
578 if name in plugin_prerequisites:
579 failed_prerequisites = [
580 prerequisite_name for prerequisite_passed,
581 prerequisite_name in plugin_prerequisites[name]
582 if not prerequisite_passed]
583 if failed_prerequisites:
585 '{} test will not be executed, '.format(name)
586 + 'following prerequisites failed:')
587 for prerequisite in failed_prerequisites:
588 logger.error(' * {}'.format(prerequisite))
590 plugin_interval = conf.get_plugin_interval(compute_node, name)
591 if out_plugin == 'Gnocchi':
592 res = conf.test_plugins_with_gnocchi(
593 compute_node.get_name(), plugin_interval,
594 logger, criteria_list=gnocchi_criteria_lists[name])
595 if out_plugin == 'AODH':
596 res = conf.test_plugins_with_aodh(
597 compute_node.get_name(), plugin_interval,
598 logger, criteria_list=aodh_criteria_lists[name])
599 if out_plugin == 'SNMP':
601 name in snmp_mib_files and name in snmp_mib_strings \
602 and tests.test_snmp_sends_data(
604 plugin_interval, logger,
605 SNMPClient(conf, compute_node), snmp_mib_files[name],
606 snmp_mib_strings[name], snmp_in_commands[name], conf)
607 if out_plugin == 'CSV':
608 res = tests.test_csv_handles_plugin_data(
609 compute_node, conf.get_plugin_interval(compute_node, name),
610 name, csv_subdirs[name], csv_meter_categories[name],
611 logger, CSVClient(conf))
613 if res and plugin_errors:
615 'Test works, but will be reported as failure,'
616 + 'because of non-critical errors.')
619 compute_node.get_id(), out_plugin, test_labels[name],
623 def get_results_for_ovs_events(
624 plugin_labels, plugin_name, gnocchi_running,
625 compute_node, conf, results, error_plugins):
626 """ Testing OVS Events with python plugin
628 plugin_label = 'OVS events'
629 res = conf.enable_ovs_events(
630 compute_node, plugin_label, error_plugins, create_backup=False)
632 compute_node.get_id(), plugin_label, res, results)
633 logger.info("Results for OVS Events = {}" .format(results))
636 def create_ovs_bridge():
637 """Create OVS brides on compute nodes"""
638 handler = factory.Factory.get_handler('apex',
642 nodes = handler.get_nodes()
644 if node.is_compute():
645 node.run_cmd('sudo ovs-vsctl add-br br0')
646 node.run_cmd('sudo ovs-vsctl set-manager ptcp:6640')
647 logger.info('OVS Bridges created on compute nodes')
650 def mcelog_install():
651 """Install mcelog on compute nodes."""
652 _print_label('Enabling mcelog on compute nodes')
653 handler = factory.Factory.get_handler('apex',
657 nodes = handler.get_nodes()
659 if node.is_compute():
660 centos_release = node.run_cmd('uname -r')
661 if '3.10.0-514.26.2.el7.x86_64' not in centos_release:
663 'Mcelog will not be enabled '
664 + 'on node-{0}, '.format(node.get_dict()['name'])
665 + 'unsupported CentOS release found ({1}).'.format(
669 'Checking if mcelog is enabled'
670 + ' on node-{}...'.format(node.get_dict()['name']))
671 res = node.run_cmd('ls')
672 if 'mce-inject_ea' and 'corrected' in res:
674 'Mcelog seems to be already installed '
675 + 'on node-{}.'.format(node.get_dict()['name']))
676 node.run_cmd('sudo modprobe mce-inject')
677 node.run_cmd('sudo ./mce-inject_ea < corrected')
680 'Mcelog will be enabled on node-{}...'.format(
681 node.get_dict()['id']))
683 '/usr/local/lib/python2.7/dist-packages/baro_tests/'
684 + 'mce-inject_ea', 'mce-inject_ea')
685 node.run_cmd('chmod a+x mce-inject_ea')
686 node.run_cmd('echo "CPU 0 BANK 0" > corrected')
688 'echo "STATUS 0xcc00008000010090" >>'
691 'echo "ADDR 0x0010FFFFFFF" >> corrected')
692 node.run_cmd('sudo modprobe mce-inject')
693 node.run_cmd('sudo ./mce-inject_ea < corrected')
694 logger.info('Mcelog is installed on all compute nodes')
698 """Uninstall mcelog from compute nodes."""
699 handler = factory.Factory.get_handler(
700 'apex', APEX_IP, APEX_USER, APEX_PKEY)
701 nodes = handler.get_nodes()
703 if node.is_compute():
704 output = node.run_cmd('ls')
705 if 'mce-inject_ea' in output:
706 node.run_cmd('rm mce-inject_ea')
707 if 'corrected' in output:
708 node.run_cmd('rm corrected')
709 node.run_cmd('sudo systemctl restart mcelog')
710 logger.info('Mcelog is deleted from all compute nodes')
714 if not os.path.isdir(ID_RSA_DST_DIR):
715 os.makedirs(ID_RSA_DST_DIR)
716 if not os.path.isfile(ID_RSA_DST):
718 "RSA key file {} doesn't exist".format(ID_RSA_DST)
719 + ", it will be downloaded from installer node.")
720 handler = factory.Factory.get_handler(
721 'apex', APEX_IP, APEX_USER, APEX_PKEY)
722 apex = handler.get_installer_node()
723 apex.get_file(ID_RSA_SRC, ID_RSA_DST)
725 logger.info("RSA key file {} exists.".format(ID_RSA_DST))
729 """Check whether there is global logger available and if not, define one."""
730 if 'logger' not in globals():
732 logger = logger.Logger("barometercollectd").getLogger()
735 def main(bt_logger=None):
736 """Check each compute node sends gnocchi metrics.
739 bt_logger -- logger instance
741 logging.getLogger("paramiko").setLevel(logging.WARNING)
742 logging.getLogger("stevedore").setLevel(logging.WARNING)
743 logging.getLogger("opnfv.deployment.manager").setLevel(logging.WARNING)
744 if bt_logger is None:
749 _print_label("Starting barometer tests suite")
751 conf = config_server.ConfigServer(APEX_IP, APEX_USER, logger)
752 controllers = conf.get_controllers()
753 if len(controllers) == 0:
754 logger.error('No controller nodes found!')
756 computes = conf.get_computes()
757 if len(computes) == 0:
758 logger.error('No compute nodes found!')
762 'Display of Control and Compute nodes available in the set up')
763 logger.info('controllers: {}'.format([('{0}: {1}'.format(
764 node.get_name(), node.get_ip())) for node in controllers]))
765 logger.info('computes: {}'.format([('{0}: {1}'.format(
766 node.get_name(), node.get_ip())) for node in computes]))
770 gnocchi_running_on_con = False
771 aodh_running_on_con = False
773 _print_label('Testing Gnocchi, AODH and SNMP on nodes')
775 for controller in controllers:
777 gnocchi_running_on_con and conf.is_gnocchi_running(controller))
779 aodh_running_on_con or conf.is_aodh_running(controller))
782 compute_node_names = []
785 'intel_rdt': 'Intel RDT',
786 'hugepages': 'Hugepages',
789 'ovs_stats': 'OVS stats',
790 'ovs_events': 'OVS events'}
791 aodh_plugin_labels = {
793 'ovs_events': 'OVS events'}
795 out_plugins_to_test = []
796 for compute_node in computes:
797 node_id = compute_node.get_id()
798 node_name = compute_node.get_name()
799 out_plugins[node_id] = []
800 compute_ids.append(node_id)
801 compute_node_names.append(node_name)
802 plugins_to_enable = []
805 gnocchi_running or conf.check_gnocchi_plugin_included(
808 aodh_running and conf.check_aodh_plugin_included(compute_node))
810 out_plugins[node_id].append("Gnocchi")
812 out_plugins[node_id].append("AODH")
814 out_plugins_to_test.append("SNMP")
816 if 'gnocchi' not in out_plugins[node_id]:
817 logger.info("CSV will be enabled for verification")
818 plugins_to_enable.append('csv')
819 out_plugins[node_id].append("CSV")
820 if plugins_to_enable:
822 'NODE {}: Enabling Test Plug-in '.format(node_name)
823 + 'and Test case execution')
824 if plugins_to_enable and not conf.enable_plugins(
825 compute_node, plugins_to_enable, error_plugins,
826 create_backup=False):
828 'Failed to test plugins on node {}.'.format(node_id))
830 'Testcases on node {} will not be executed'.format(
833 if plugins_to_enable:
834 collectd_restarted, collectd_warnings = \
835 conf.restart_collectd(compute_node)
838 'Sleeping for {} seconds'.format(sleep_time)
839 + ' after collectd restart...')
840 time.sleep(sleep_time)
841 if plugins_to_enable and not collectd_restarted:
842 for warning in collectd_warnings:
843 logger.warning(warning)
845 'Restart of collectd on node {} failed'.format(
848 'Testcases on node {}'.format(node_id)
849 + ' will not be executed.')
851 if collectd_warnings:
852 for warning in collectd_warnings:
853 logger.warning(warning)
855 for i in out_plugins[node_id]:
857 for plugin_name in sorted(aodh_plugin_labels.keys()):
859 aodh_plugin_labels, plugin_name, i,
860 controllers, compute_node, conf, results,
861 error_plugins, out_plugins[node_id])
863 for plugin_name in sorted(plugin_labels.keys()):
865 plugin_labels, plugin_name, i,
866 controllers, compute_node, conf, results,
867 error_plugins, out_plugins[node_id])
870 print_overall_summary(
871 compute_ids, plugin_labels, aodh_plugin_labels, results, out_plugins)
873 if ((len([res for res in results if not res[2]]) > 0)
874 or (len(results) < len(computes) * len(plugin_labels))):
875 logger.error('Some tests have failed or have not been executed')
880 if __name__ == '__main__':