1 # -*- coding: utf-8 -*-
3 # Licensed under the Apache License, Version 2.0 (the "License"); you may
4 # not use this file except in compliance with the License. You may obtain
5 # a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 # License for the specific language governing permissions and limitations
15 """Executing test of plugins"""
18 from keystoneclient.v3 import client
26 from opnfv.deployment import factory
29 GNOCCHI_NAME = 'gnocchi'
30 ID_RSA_SRC = '/root/.ssh/id_rsa'
31 ID_RSA_DST_DIR = '/root/.ssh'
32 ID_RSA_DST = ID_RSA_DST_DIR + '/id_rsa'
33 APEX_IP = subprocess.check_output("echo $INSTALLER_IP", shell=True)
35 APEX_USER_STACK = 'stack'
36 APEX_PKEY = '/root/.ssh/id_rsa'
39 class KeystoneException(Exception):
40 """Keystone exception class"""
41 def __init__(self, message, exc=None, response=None):
44 message -- error message
49 message += "\nReason: %s" % exc
50 super(KeystoneException, self).__init__(message)
52 self.response = response
56 class InvalidResponse(KeystoneException):
57 """Invalid Keystone exception class"""
58 def __init__(self, exc, response):
64 super(InvalidResponse, self).__init__(
65 "Invalid response", exc, response)
69 handler = factory.Factory.get_handler('apex',
73 nodes = handler.get_nodes()
77 class GnocchiClient(object):
78 # Gnocchi Client to authenticate and request meters
80 self._auth_token = None
81 self._gnocchi_url = None
82 self._meter_list = None
87 return self._auth_token
89 def get_gnocchi_url(self):
91 return self._gnocchi_url
93 def get_gnocchi_metrics(self, criteria=None):
94 # Subject to change if metric gathering is different for gnocchi
95 self._request_meters(criteria)
96 return self._meter_list
98 def _auth_server(self):
99 # Request token in authentication server
100 logger.debug('Connecting to the auth server {}'.format(
101 os.environ['OS_AUTH_URL']))
102 keystone = client.Client(username=os.environ['OS_USERNAME'],
103 password=os.environ['OS_PASSWORD'],
104 tenant_name=os.environ['OS_USERNAME'],
105 auth_url=os.environ['OS_AUTH_URL'])
106 self._auth_token = keystone.auth_token
107 for service in keystone.service_catalog.get_data():
108 if service['name'] == GNOCCHI_NAME:
109 for service_type in service['endpoints']:
110 if service_type['interface'] == 'internal':
111 self._gnocchi_url = service_type['url']
113 if self._gnocchi_url is None:
114 logger.warning('Gnocchi is not registered in service catalog')
116 def _request_meters(self, criteria):
117 """Request meter list values from ceilometer
120 criteria -- criteria for ceilometer meter list
123 url = self._gnocchi_url + ('/v2/metric?limit=400')
125 url = self._gnocchi_url \
126 + ('/v3/metric/%s?q.field=metric&limit=400' % criteria)
127 headers = {'X-Auth-Token': self._auth_token}
128 resp = requests.get(url, headers=headers)
130 resp.raise_for_status()
131 self._meter_list = resp.json()
132 except (KeyError, ValueError, requests.exceptions.HTTPError) as err:
133 raise InvalidResponse(err, resp)
136 class AodhClient(object):
137 # Gnocchi Client to authenticate and request meters
139 self._auth_token = None
140 self._aodh_url = None
141 self._meter_list = None
143 def auth_token(self):
146 return self._auth_token
148 def get_aodh_url(self):
150 return self._gnocchi_url
152 def get_aodh_metrics(self, criteria=None):
153 # Subject to change if metric gathering is different for gnocchi
154 self._request_meters(criteria)
155 return self._meter_list
157 def _auth_server(self):
158 # Request token in authentication server
159 logger.debug('Connecting to the AODH auth server {}'.format(
160 os.environ['OS_AUTH_URL']))
161 keystone = client.Client(username=os.environ['OS_USERNAME'],
162 password=os.environ['OS_PASSWORD'],
163 tenant_name=os.environ['OS_USERNAME'],
164 auth_url=os.environ['OS_AUTH_URL'])
165 self._auth_token = keystone.auth_token
166 for service in keystone.service_catalog.get_data():
167 if service['name'] == AODH_NAME:
168 for service_type in service['endpoints']:
169 if service_type['interface'] == 'internal':
170 self._gnocchi_url = service_type['url']
172 if self._aodh_url is None:
173 logger.warning('Aodh is not registered in service catalog')
176 class SNMPClient(object):
177 """Client to request SNMP meters"""
178 def __init__(self, conf, compute_node):
181 conf -- ConfigServer instance
182 compute_node -- Compute node object
185 self.compute_node = compute_node
187 def get_snmp_metrics(self, compute_node, mib_file, mib_strings):
190 cmd = "snmpwalk -v 2c -c public localhost IF-MIB::interfaces"
191 ip = compute_node.get_ip()
192 snmp_output = self.conf.execute_command(cmd, ip)
194 for mib_string in mib_strings:
195 snmp_output[mib_string] = self.conf.execute_command(
196 "snmpwalk -v2c -m {} -c public localhost {}".format(
197 mib_file, mib_string), compute_node.get_ip())
201 class CSVClient(object):
202 """Client to request CSV meters"""
203 def __init__(self, conf):
206 conf -- ConfigServer instance
211 self, compute_node, plugin_subdirectories, meter_categories):
215 compute_node -- compute node instance
216 plugin_subdirectories -- list of subdirectories of plug-in
217 meter_categories -- categories which will be tested
219 Return list of metrics.
221 compute_name = compute_node.get_name()
222 nodes = get_apex_nodes()
224 if compute_name == node.get_dict()['name']:
228 for plugin_subdir in plugin_subdirectories:
229 for meter_category in meter_categories:
230 stdout1 = node.run_cmd(
231 "tail -2 /var/lib/collectd/csv/"
232 + "{0}.jf.intel.com/{1}/{2}-{3}".format(
233 compute_node.get_name(), plugin_subdir,
234 meter_category, date))
235 stdout2 = node.run_cmd(
236 "tail -1 /var/lib/collectd/csv/"
237 + "{0}.jf.intel.com/{1}/{2}-{3}".format(
238 compute_node.get_name(), plugin_subdir,
239 meter_category, date))
240 # Storing last two values
244 'Getting last two CSV entries of meter category'
245 + ' {0} in {1} subdir failed'.format(
246 meter_category, plugin_subdir))
248 values = values.split(',')
249 old_value = float(values[0])
250 stdout2 = stdout2.split(',')
251 new_value = float(stdout2[0])
253 plugin_subdir, meter_category, old_value,
258 def get_csv_categories_for_ipmi(conf, compute_node):
262 compute_node -- compute node instance
264 Return list of categories.
266 stdout = conf.execute_command(
267 "date '+%Y-%m-%d'", compute_node.get_ip())
268 date = stdout[0].strip()
269 categories = conf.execute_command(
270 "ls /var/lib/collectd/csv/{0}.jf.intel.com/ipmi | grep {1}".format(
271 compute_node.get_name(), date), compute_node.get_ip())
272 return [category.strip()[:-11] for category in categories]
275 def _process_result(compute_node, test, result, results_list):
276 """Print test result and append it to results list.
279 test -- testcase name
280 result -- boolean test result
281 results_list -- results list
285 'Compute node {0} test case {1} PASSED.'.format(
289 'Compute node {0} test case {1} FAILED.'.format(
291 results_list.append((compute_node, test, result))
294 def _print_label(label):
295 """Print label on the screen
298 label -- label string
300 label = label.strip()
303 label = ' ' + label + ' '
304 length_label = len(label)
305 length1 = (length - length_label) / 2
306 length2 = length - length_label - length1
307 length1 = max(3, length1)
308 length2 = max(3, length2)
309 logger.info(('=' * length1) + label + ('=' * length2))
312 def _print_plugin_label(plugin, node_name):
313 """Print plug-in label.
316 plugin -- plug-in name
320 'Node {0}: Plug-in {1} Test case execution'.format(node_name, plugin))
323 def _print_final_result_of_plugin(
324 plugin, compute_ids, results, out_plugins, out_plugin):
325 """Print final results of plug-in.
328 plugin -- plug-in name
329 compute_ids -- list of compute node IDs
330 results -- results list
331 out_plugins -- list of out plug-ins
332 out_plugin -- used out plug-in
335 for id in compute_ids:
336 if out_plugins[id] == out_plugin:
337 if (id, plugin, True) in results:
338 print_line += ' PASS |'
339 elif (id, plugin, False) in results \
340 and out_plugins[id] == out_plugin:
341 print_line += ' FAIL |'
343 print_line += ' NOT EX |'
344 elif out_plugin == 'Gnocchi':
345 print_line += ' NOT EX |'
347 print_line += ' NOT EX |'
351 def print_overall_summary(compute_ids, tested_plugins, results, out_plugins):
352 """Print overall summary table.
355 compute_ids -- list of compute IDs
356 tested_plugins -- list of plug-ins
357 results -- results list
358 out_plugins -- list of used out plug-ins
360 compute_node_names = ['Node-{}'.format(i) for i in range(
362 # compute_node_names = ['Node-{}'.format(id) for id in compute_ids]
363 all_computes_in_line = ''
364 for compute in compute_node_names:
365 all_computes_in_line += '| ' + compute + (' ' * (7 - len(compute)))
366 line_of_nodes = '| Test ' + all_computes_in_line + '|'
367 logger.info('=' * 70)
368 logger.info('+' + ('-' * ((9 * len(compute_node_names))+16)) + '+')
370 '|' + ' ' * ((9*len(compute_node_names))/2)
373 9*len(compute_node_names) - (9*len(compute_node_names))/2)
376 '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
377 logger.info(line_of_nodes)
379 '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
380 out_plugins_print = ['Gnocchi']
381 if 'SNMP' in out_plugins.values():
382 out_plugins_print.append('SNMP')
383 if 'AODH' in out_plugins.values():
384 out_plugins_print.append('AODH')
385 if 'CSV' in out_plugins.values():
386 out_plugins_print.append('CSV')
387 for out_plugin in out_plugins_print:
388 output_plugins_line = ''
389 for id in compute_ids:
390 out_plugin_result = 'FAIL'
391 if out_plugin == 'Gnocchi':
392 out_plugin_result = \
393 'PASS' if out_plugins[id] == out_plugin else 'FAIL'
394 if out_plugin == 'AODH':
395 if out_plugins[id] == out_plugin:
396 out_plugin_result = \
397 'PASS' if out_plugins[id] == out_plugin else 'FAIL'
398 if out_plugin == 'SNMP':
399 if out_plugins[id] == out_plugin:
400 out_plugin_result = \
401 'PASS' if out_plugins[id] == out_plugin else 'FAIL'
402 if out_plugin == 'CSV':
403 if out_plugins[id] == out_plugin:
404 out_plugin_result = \
406 plugin for comp_id, plugin, res in results
407 if comp_id == id and res] else 'FAIL'
409 out_plugin_result = 'SKIP'
410 output_plugins_line += '| ' + out_plugin_result + ' '
412 '| OUT:{}'.format(out_plugin) + (' ' * (11 - len(out_plugin)))
413 + output_plugins_line + '|')
414 for plugin in sorted(tested_plugins.values()):
415 line_plugin = _print_final_result_of_plugin(
416 plugin, compute_ids, results, out_plugins, out_plugin)
418 '| IN:{}'.format(plugin) + (' ' * (11-len(plugin)))
421 '+' + ('-' * 16) + '+'
422 + (('-' * 8) + '+') * len(compute_node_names))
423 logger.info('=' * 70)
427 test_labels, name, gnocchi_running, aodh_running, snmp_running,
428 controllers, compute_node, conf, results, error_plugins, out_plugins):
429 """Execute the testcase.
432 test_labels -- dictionary of plug-in IDs and their display names
433 name -- plug-in ID, key of test_labels dictionary
434 ceilometer_running -- boolean indicating whether Ceilometer is running
435 compute_node -- compute node ID
436 conf -- ConfigServer instance
437 results -- results list
438 error_plugins -- list of tuples with plug-in errors
439 (plugin, error_description, is_critical):
440 plugin -- plug-in ID, key of test_labels dictionary
441 error_decription -- description of the error
442 is_critical -- boolean value indicating whether error is critical
444 ovs_interfaces = conf.get_ovs_interfaces(compute_node)
445 ovs_configured_interfaces = conf.get_plugin_config_values(
446 compute_node, 'ovs_events', 'Interfaces')
447 ovs_configured_bridges = conf.get_plugin_config_values(
448 compute_node, 'ovs_stats', 'Bridges')
449 ovs_existing_configured_int = [
450 interface for interface in ovs_interfaces
451 if interface in ovs_configured_interfaces]
452 ovs_existing_configured_bridges = [
453 bridge for bridge in ovs_interfaces
454 if bridge in ovs_configured_bridges]
455 plugin_prerequisites = {
457 conf.is_libpqos_on_node(compute_node),
458 'libpqos must be installed.')],
460 conf.is_installed(compute_node, 'mcelog'),
461 'mcelog must be installed.')],
463 len(ovs_existing_configured_int) > 0 or len(ovs_interfaces) > 0,
464 'Interfaces must be configured.')],
466 len(ovs_existing_configured_bridges) > 0,
467 'Bridges must be configured.')]}
468 gnocchi_criteria_lists = {
469 'hugepages': ['hugepages'],
470 'mcelog': ['mcelog'],
471 'ovs_events': ['interface-ovs-system'],
472 'ovs_stats': ['ovs_stats-br0.br0']}
473 aodh_criteria_lists = {
474 'mcelog': ['mcelog.errors'],
475 'ovs_events': ['ovs_events.gauge']}
477 'intel_rdt': '/usr/share/snmp/mibs/Intel-Rdt.txt',
478 'hugepages': '/usr/share/snmp/mibs/Intel-Hugepages.txt',
479 'mcelog': '/usr/share/snmp/mibs/Intel-Mcelog.txt'}
482 'INTEL-RDT-MIB::rdtLlc.1',
483 'INTEL-RDT-MIB::rdtIpc.1',
484 'INTEL-RDT-MIB::rdtMbmRemote.1',
485 'INTEL-RDT-MIB::rdtMbmLocal.1'],
487 'INTEL-HUGEPAGES-MIB::hugepagesPageFree'],
489 'INTEL-MCELOG-MIB::memoryCorrectedErrors.1',
490 'INTEL-MCELOG-MIB::memoryCorrectedErrors.2']}
491 nr_hugepages = int(time.time()) % 10000
494 'hugepages': 'echo {} > /sys/kernel/'.format(nr_hugepages)
495 + 'mm/hugepages/hugepages-2048kB/nr_hugepages',
496 'mcelog': '/root/mce-inject_df < /root/corrected'}
499 'intel_rdt-{}'.format(core)
500 for core in conf.get_plugin_config_values(
501 compute_node, 'intel_rdt', 'Cores')],
503 'hugepages-mm-2048Kb', 'hugepages-node0-2048Kb',
504 'hugepages-node1-2048Kb', 'hugepages-mm-1048576Kb',
505 'hugepages-node0-1048576Kb', 'hugepages-node1-1048576Kb'],
508 'mcelog-SOCKET_0_CHANNEL_0_DIMM_any',
509 'mcelog-SOCKET_0_CHANNEL_any_DIMM_any'],
511 'ovs_stats-br0.br0'],
514 csv_meter_categories_ipmi = get_csv_categories_for_ipmi(conf, compute_node)
515 csv_meter_categories = {
517 'bytes-llc', 'ipc', 'memory_bandwidth-local',
518 'memory_bandwidth-remote'],
519 'hugepages': ['vmpage_number-free', 'vmpage_number-used'],
520 'ipmi': csv_meter_categories_ipmi,
522 'errors-corrected_memory_errors',
523 'errors-uncorrected_memory_errors',
524 'errors-corrected_memory_errors_in_24h',
525 'errors-uncorrected_memory_errors_in_24h'],
527 'if_collisions', 'if_dropped', 'if_errors', 'if_packets',
528 'if_rx_errors-crc', 'if_rx_errors-frame', 'if_rx_errors-over',
529 'if_rx_octets', 'if_tx_octets'],
530 'ovs_events': ['gauge-link_status']}
533 test_labels[name] if name in test_labels else name,
534 compute_node.get_name())
535 plugin_critical_errors = [
536 error for plugin, error, critical in error_plugins
537 if plugin == name and critical]
538 if plugin_critical_errors:
539 logger.error('Following critical errors occurred:'.format(name))
540 for error in plugin_critical_errors:
541 logger.error(' * ' + error)
543 compute_node.get_id(), test_labels[name], False, results)
546 error for plugin, error, critical in error_plugins
547 if plugin == name and not critical]
549 logger.warning('Following non-critical errors occured:')
550 for error in plugin_errors:
551 logger.warning(' * ' + error)
552 failed_prerequisites = []
553 if name in plugin_prerequisites:
554 failed_prerequisites = [
555 prerequisite_name for prerequisite_passed,
556 prerequisite_name in plugin_prerequisites[name]
557 if not prerequisite_passed]
558 if failed_prerequisites:
560 '{} test will not be executed, '.format(name)
561 + 'following prerequisites failed:')
562 for prerequisite in failed_prerequisites:
563 logger.error(' * {}'.format(prerequisite))
566 plugin_interval = conf.get_plugin_interval(compute_node, name)
567 res = conf.test_plugins_with_gnocchi(
568 compute_node.get_id(), plugin_interval, logger,
569 criteria_list=gnocchi_criteria_lists[name])
571 res = conf.test_plugins_with_aodh(
572 compute_node.get_id(), plugin_interval,
573 logger, creteria_list=aodh_criteria_lists[name])
576 name in snmp_mib_files and name in snmp_mib_strings \
577 and tests.test_snmp_sends_data(
579 conf.get_plugin_interval(compute_node, name), logger,
580 SNMPClient(conf, compute_node), snmp_mib_files[name],
581 snmp_mib_strings[name], snmp_in_commands[name], conf)
583 res = tests.test_csv_handles_plugin_data(
584 compute_node, conf.get_plugin_interval(compute_node, name),
585 name, csv_subdirs[name], csv_meter_categories[name],
586 logger, CSVClient(conf))
587 if res and plugin_errors:
589 'Test works, but will be reported as failure,'
590 + 'because of non-critical errors.')
593 compute_node.get_id(), test_labels[name], res, results)
596 def get_results_for_ovs_events(
597 plugin_labels, plugin_name, gnocchi_running,
598 compute_node, conf, results, error_plugins):
599 """ Testing OVS Events with python plugin
601 plugin_label = 'OVS events'
602 res = conf.enable_ovs_events(
603 compute_node, plugin_label, error_plugins, create_backup=False)
605 compute_node.get_id(), plugin_label, res, results)
606 logger.info("Results for OVS Events = {}" .format(results))
609 def create_ovs_bridge():
610 """Create OVS brides on compute nodes"""
611 handler = factory.Factory.get_handler('apex',
615 nodes = handler.get_nodes()
617 if node.is_compute():
618 node.run_cmd('sudo ovs-vsctl add-br br0')
619 node.run_cmd('sudo ovs-vsctl set-manager ptcp:6640')
620 logger.info('OVS Bridges created on compute nodes')
623 def mcelog_install():
624 """Install mcelog on compute nodes."""
625 _print_label('Enabling mcelog on compute nodes')
626 handler = factory.Factory.get_handler('apex',
630 nodes = handler.get_nodes()
632 if node.is_compute():
633 centos_release = node.run_cmd('uname -r')
634 if '3.10.0-514.26.2.el7.x86_64' not in centos_release:
636 'Mcelog will not be enabled '
637 + 'on node-{0}, '.format(node.get_dict()['id'])
638 + 'unsupported CentOS release found ({1}).'.format(
642 'Checking if mcelog is enabled'
643 + ' on node-{}...'.format(node.get_dict()['id']))
644 res = node.run_cmd('ls')
645 if 'mce-inject_ea' and 'corrected' in res:
647 'Mcelog seems to be already installed '
648 + 'on node-{}.'.format(node.get_dict()['id']))
649 node.run_cmd('sudo modprobe mce-inject')
650 node.run_cmd('sudo ./mce-inject_ea < corrected')
653 'Mcelog will be enabled on node-{}...'.format(
654 node.get_dict()['id']))
656 '/usr/local/lib/python2.7/dist-packages/baro_tests/'
657 + 'mce-inject_ea', 'mce-inject_ea')
658 node.run_cmd('chmod a+x mce-inject_ea')
659 node.run_cmd('echo "CPU 0 BANK 0" > corrected')
661 'echo "STATUS 0xcc00008000010090" >>'
664 'echo "ADDR 0x0010FFFFFFF" >> corrected')
665 node.run_cmd('sudo modprobe mce-inject')
666 node.run_cmd('sudo ./mce-inject_ea < corrected')
667 logger.info('Mcelog is installed on all compute nodes')
671 """Uninstall mcelog from compute nodes."""
672 handler = factory.Factory.get_handler(
673 'apex', APEX_IP, APEX_USER, APEX_PKEY)
674 nodes = handler.get_nodes()
676 if node.is_compute():
677 output = node.run_cmd('ls')
678 if 'mce-inject_ea' in output:
679 node.run_cmd('rm mce-inject_ea')
680 if 'corrected' in output:
681 node.run_cmd('rm corrected')
682 node.run_cmd('sudo systemctl restart mcelog')
683 logger.info('Mcelog is deleted from all compute nodes')
687 if not os.path.isdir(ID_RSA_DST_DIR):
688 os.makedirs(ID_RSA_DST_DIR)
689 if not os.path.isfile(ID_RSA_DST):
691 "RSA key file {} doesn't exist".format(ID_RSA_DST)
692 + ", it will be downloaded from installer node.")
693 handler = factory.Factory.get_handler(
694 'apex', APEX_IP, APEX_USER, APEX_PKEY)
695 apex = handler.get_installer_node()
696 apex.get_file(ID_RSA_SRC, ID_RSA_DST)
698 logger.info("RSA key file {} exists.".format(ID_RSA_DST))
702 """Check whether there is global logger available and if not, define one."""
703 if 'logger' not in globals():
705 logger = logger.Logger("barometercollectd").getLogger()
708 def main(bt_logger=None):
709 """Check each compute node sends gnocchi metrics.
712 bt_logger -- logger instance
714 logging.getLogger("paramiko").setLevel(logging.WARNING)
715 logging.getLogger("stevedore").setLevel(logging.WARNING)
716 logging.getLogger("opnfv.deployment.manager").setLevel(logging.WARNING)
717 if bt_logger is None:
722 _print_label("Starting barometer tests suite")
724 conf = config_server.ConfigServer(APEX_IP, APEX_USER, logger)
725 controllers = conf.get_controllers()
726 if len(controllers) == 0:
727 logger.error('No controller nodes found!')
729 computes = conf.get_computes()
730 if len(computes) == 0:
731 logger.error('No compute nodes found!')
735 'Display of Control and Compute nodes available in the set up')
736 logger.info('controllers: {}'.format([('{0}: {1} ({2})'.format(
737 node.get_id(), node.get_name(),
738 node.get_ip())) for node in controllers]))
739 logger.info('computes: {}'.format([('{0}: {1} ({2})'.format(
740 node.get_id(), node.get_name(), node.get_ip()))
741 for node in computes]))
745 gnocchi_running_on_con = False
746 aodh_running_on_con = False
748 _print_label('Testing Gnocchi, AODH and SNMP on controller nodes')
750 for controller in controllers:
751 gnocchi_client = GnocchiClient()
752 gnocchi_client.auth_token()
754 gnocchi_running_on_con and conf.is_gnocchi_running(controller))
755 aodh_client = AodhClient()
756 aodh_client.auth_token()
758 aodh_running_on_con and conf.is_aodh_running(controller))
760 logger.info("Gnocchi is running on controller.")
762 logger.error("Gnocchi is not running on controller.")
763 logger.info("AODH is running on controller.")
765 logger.error("Gnocchi is not running on Controller")
766 logger.error("AODH is not running on controller.")
767 logger.info("SNMP is running on controller.")
769 logger.error("Gnocchi is not running on Controller")
770 logger.error("AODH is not running on controller.")
771 logger.error("SNMP is not running on controller.")
772 logger.info("CSV will be enabled on compute nodes.")
775 compute_node_names = []
778 'intel_rdt': 'Intel RDT',
779 'hugepages': 'Hugepages',
782 'ovs_stats': 'OVS stats',
783 'ovs_events': 'OVS events'}
785 'gnocchi': 'Gnocchi',
789 for compute_node in computes:
790 node_id = compute_node.get_id()
791 node_name = compute_node.get_name()
792 out_plugins[node_id] = 'CSV'
793 compute_ids.append(node_id)
794 compute_node_names.append(node_name)
795 plugins_to_enable = []
796 _print_label('NODE {}: Test Gnocchi Plug-in'.format(node_name))
797 logger.info('Checking if gnocchi plug-in is included in compute nodes.')
798 if not conf.check_gnocchi_plugin_included(compute_node):
799 logger.error('Gnocchi plug-in is not included.')
801 'Testcases on node {} will not be executed'.format(node_name))
803 collectd_restarted, collectd_warnings = \
804 conf.restart_collectd(compute_node)
807 'Sleeping for {} seconds after collectd restart...'.format(
809 time.sleep(sleep_time)
810 if not collectd_restarted:
811 for warning in collectd_warnings:
812 logger.warning(warning)
814 'Restart of collectd on node {} failed'.format(node_name))
816 'Testcases on node {} will not be executed'.format(
819 for warning in collectd_warnings:
820 logger.warning(warning)
823 out_plugins[node_id] = 'Gnocchi'
824 logger.info("Gnocchi is active and collecting data")
826 out_plugins[node_id] = 'AODH'
827 logger.info("AODH withh be tested")
828 _print_label('Node {}: Test AODH' .format(node_name))
829 logger.info("Checking if AODH is running")
830 logger.info("AODH is running")
832 out_plugins[node_id] = 'SNMP'
833 logger.info("SNMP will be tested.")
834 _print_label('NODE {}: Test SNMP'.format(node_id))
835 logger.info("Checking if SNMP is running.")
836 logger.info("SNMP is running.")
838 plugins_to_enable.append('csv')
839 out_plugins[node_id] = 'CSV'
840 logger.error("Gnocchi, AODH, SNMP are not running")
842 "CSV will be enabled for verification "
843 + "of test plugins.")
844 if plugins_to_enable:
846 'NODE {}: Enabling Test Plug-in '.format(node_name)
847 + 'and Test case execution')
849 if plugins_to_enable and not conf.enable_plugins(
850 compute_node, plugins_to_enable, error_plugins,
851 create_backup=False):
853 'Failed to test plugins on node {}.'.format(node_id))
855 'Testcases on node {} will not be executed'.format(
858 if plugins_to_enable:
859 collectd_restarted, collectd_warnings = \
860 conf.restart_collectd(compute_node)
863 'Sleeping for {} seconds'.format(sleep_time)
864 + ' after collectd restart...')
865 time.sleep(sleep_time)
866 if plugins_to_enable and not collectd_restarted:
867 for warning in collectd_warnings:
868 logger.warning(warning)
870 'Restart of collectd on node {} failed'.format(
873 'Testcases on node {}'.format(node_id)
874 + ' will not be executed.')
876 if collectd_warnings:
877 for warning in collectd_warnings:
878 logger.warning(warning)
880 for plugin_name in sorted(plugin_labels.keys()):
882 plugin_labels, plugin_name, gnocchi_running,
883 aodh_running, snmp_running, controllers,
884 compute_node, conf, results, error_plugins,
885 out_plugins[node_id])
887 # _print_label('NODE {}: Restoring config file'.format(node_name))
888 # conf.restore_config(compute_node)
890 print_overall_summary(compute_ids, plugin_labels, results, out_plugins)
892 if ((len([res for res in results if not res[2]]) > 0)
893 or (len(results) < len(computes) * len(plugin_labels))):
894 logger.error('Some tests have failed or have not been executed')
899 if __name__ == '__main__':