1 # -*- coding: utf-8 -*-
3 # Licensed under the Apache License, Version 2.0 (the "License"); you may
4 # not use this file except in compliance with the License. You may obtain
5 # a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 # License for the specific language governing permissions and limitations
15 """Executing test of plugins"""
18 from keystoneclient.v3 import client
26 from opnfv.deployment import factory
29 GNOCCHI_NAME = 'gnocchi'
30 ID_RSA_SRC = '/root/.ssh/id_rsa'
31 ID_RSA_DST_DIR = '/root/.ssh'
32 ID_RSA_DST = ID_RSA_DST_DIR + '/id_rsa'
33 APEX_IP = subprocess.check_output("echo $INSTALLER_IP", shell=True)
35 APEX_USER_STACK = 'stack'
36 APEX_PKEY = '/root/.ssh/id_rsa'
39 class KeystoneException(Exception):
40 """Keystone exception class"""
41 def __init__(self, message, exc=None, response=None):
44 message -- error message
49 message += "\nReason: %s" % exc
50 super(KeystoneException, self).__init__(message)
52 self.response = response
56 class InvalidResponse(KeystoneException):
57 """Invalid Keystone exception class"""
58 def __init__(self, exc, response):
64 super(InvalidResponse, self).__init__(
65 "Invalid response", exc, response)
69 handler = factory.Factory.get_handler('apex',
73 nodes = handler.get_nodes()
77 class GnocchiClient(object):
78 # Gnocchi Client to authenticate and request meters
80 self._auth_token = None
81 self._gnocchi_url = None
82 self._meter_list = None
87 return self._auth_token
89 def get_gnocchi_url(self):
91 return self._gnocchi_url
93 def get_gnocchi_metrics(self, criteria=None):
94 # Subject to change if metric gathering is different for gnocchi
95 self._request_meters(criteria)
96 return self._meter_list
98 def _auth_server(self):
99 # Request token in authentication server
100 logger.debug('Connecting to the auth server {}'.format(
101 os.environ['OS_AUTH_URL']))
102 keystone = client.Client(username=os.environ['OS_USERNAME'],
103 password=os.environ['OS_PASSWORD'],
104 tenant_name=os.environ['OS_USERNAME'],
105 auth_url=os.environ['OS_AUTH_URL'])
106 self._auth_token = keystone.auth_token
107 for service in keystone.service_catalog.get_data():
108 if service['name'] == GNOCCHI_NAME:
109 for service_type in service['endpoints']:
110 if service_type['interface'] == 'internal':
111 self._gnocchi_url = service_type['url']
113 if self._gnocchi_url is None:
114 logger.warning('Gnocchi is not registered in service catalog')
116 def _request_meters(self, criteria):
117 """Request meter list values from ceilometer
120 criteria -- criteria for ceilometer meter list
123 url = self._gnocchi_url + ('/v2/metric?limit=400')
125 url = self._gnocchi_url \
126 + ('/v3/metric/%s?q.field=metric&limit=400' % criteria)
127 headers = {'X-Auth-Token': self._auth_token}
128 resp = requests.get(url, headers=headers)
130 resp.raise_for_status()
131 self._meter_list = resp.json()
132 except (KeyError, ValueError, requests.exceptions.HTTPError) as err:
133 raise InvalidResponse(err, resp)
136 class AodhClient(object):
137 # Gnocchi Client to authenticate and request meters
139 self._auth_token = None
140 self._aodh_url = None
141 self._meter_list = None
143 def auth_token(self):
146 return self._auth_token
148 def get_aodh_url(self):
150 return self._gnocchi_url
152 def get_aodh_metrics(self, criteria=None):
153 # Subject to change if metric gathering is different for gnocchi
154 self._request_meters(criteria)
155 return self._meter_list
157 def _auth_server(self):
158 # Request token in authentication server
159 logger.debug('Connecting to the AODH auth server {}'.format(
160 os.environ['OS_AUTH_URL']))
161 keystone = client.Client(username=os.environ['OS_USERNAME'],
162 password=os.environ['OS_PASSWORD'],
163 tenant_name=os.environ['OS_USERNAME'],
164 auth_url=os.environ['OS_AUTH_URL'])
165 self._auth_token = keystone.auth_token
166 for service in keystone.service_catalog.get_data():
167 if service['name'] == AODH_NAME:
168 for service_type in service['endpoints']:
169 if service_type['interface'] == 'internal':
170 self._gnocchi_url = service_type['url']
172 if self._aodh_url is None:
173 logger.warning('Aodh is not registered in service catalog')
176 class SNMPClient(object):
177 """Client to request SNMP meters"""
178 def __init__(self, conf, compute_node):
181 conf -- ConfigServer instance
182 compute_node -- Compute node object
185 self.compute_node = compute_node
187 def get_snmp_metrics(self, compute_node, mib_file, mib_strings):
190 cmd = "snmpwalk -v 2c -c public localhost IF-MIB::interfaces"
191 ip = compute_node.get_ip()
192 snmp_output = self.conf.execute_command(cmd, ip)
194 for mib_string in mib_strings:
195 snmp_output[mib_string] = self.conf.execute_command(
196 "snmpwalk -v2c -m {} -c public localhost {}".format(
197 mib_file, mib_string), compute_node.get_ip())
201 class CSVClient(object):
202 """Client to request CSV meters"""
203 def __init__(self, conf):
206 conf -- ConfigServer instance
211 self, compute_node, plugin_subdirectories, meter_categories):
215 compute_node -- compute node instance
216 plugin_subdirectories -- list of subdirectories of plug-in
217 meter_categories -- categories which will be tested
219 Return list of metrics.
221 compute_name = compute_node.get_name()
222 nodes = get_apex_nodes()
224 if compute_name == node.get_dict()['name']:
228 for plugin_subdir in plugin_subdirectories:
229 for meter_category in meter_categories:
230 stdout1 = node.run_cmd(
231 "tail -2 /var/lib/collectd/csv/"
232 + "{0}.jf.intel.com/{1}/{2}-{3}".format(
233 compute_node.get_name(), plugin_subdir,
234 meter_category, date))
235 stdout2 = node.run_cmd(
236 "tail -1 /var/lib/collectd/csv/"
237 + "{0}.jf.intel.com/{1}/{2}-{3}".format(
238 compute_node.get_name(), plugin_subdir,
239 meter_category, date))
240 # Storing last two values
244 'Getting last two CSV entries of meter category'
245 + ' {0} in {1} subdir failed'.format(
246 meter_category, plugin_subdir))
248 values = values.split(',')
249 old_value = float(values[0])
250 stdout2 = stdout2.split(',')
251 new_value = float(stdout2[0])
253 plugin_subdir, meter_category, old_value,
258 def get_csv_categories_for_ipmi(conf, compute_node):
262 compute_node -- compute node instance
264 Return list of categories.
266 stdout = conf.execute_command(
267 "date '+%Y-%m-%d'", compute_node.get_ip())
268 date = stdout[0].strip()
269 categories = conf.execute_command(
270 "ls /var/lib/collectd/csv/{0}.jf.intel.com/ipmi | grep {1}".format(
271 compute_node.get_name(), date), compute_node.get_ip())
272 return [category.strip()[:-11] for category in categories]
275 def _process_result(compute_node, test, result, results_list):
276 """Print test result and append it to results list.
279 test -- testcase name
280 result -- boolean test result
281 results_list -- results list
285 'Compute node {0} test case {1} PASSED.'.format(
289 'Compute node {0} test case {1} FAILED.'.format(
291 results_list.append((compute_node, test, result))
294 def _print_label(label):
295 """Print label on the screen
298 label -- label string
300 label = label.strip()
303 label = ' ' + label + ' '
304 length_label = len(label)
305 length1 = (length - length_label) / 2
306 length2 = length - length_label - length1
307 length1 = max(3, length1)
308 length2 = max(3, length2)
309 logger.info(('=' * length1) + label + ('=' * length2))
312 def _print_plugin_label(plugin, node_name):
313 """Print plug-in label.
316 plugin -- plug-in name
320 'Node {0}: Plug-in {1} Test case execution'.format(node_name, plugin))
323 def _print_final_result_of_plugin(
324 plugin, compute_ids, results, out_plugins, out_plugin):
325 """Print final results of plug-in.
328 plugin -- plug-in name
329 compute_ids -- list of compute node IDs
330 results -- results list
331 out_plugins -- list of out plug-ins
332 out_plugin -- used out plug-in
335 for id in compute_ids:
336 if out_plugins[id] == out_plugin:
337 if (id, plugin, True) in results:
338 print_line += ' PASS |'
339 elif (id, plugin, False) in results \
340 and out_plugins[id] == out_plugin:
341 print_line += ' FAIL |'
343 print_line += ' NOT EX |'
344 elif out_plugin == 'Gnocchi':
345 print_line += ' NOT EX |'
347 print_line += ' NOT EX |'
351 def print_overall_summary(compute_ids, tested_plugins, results, out_plugins):
352 """Print overall summary table.
355 compute_ids -- list of compute IDs
356 tested_plugins -- list of plug-ins
357 results -- results list
358 out_plugins -- list of used out plug-ins
360 compute_node_names = ['Node-{}'.format(i) for i in range(
362 # compute_node_names = ['Node-{}'.format(id) for id in compute_ids]
363 all_computes_in_line = ''
364 for compute in compute_node_names:
365 all_computes_in_line += '| ' + compute + (' ' * (7 - len(compute)))
366 line_of_nodes = '| Test ' + all_computes_in_line + '|'
367 logger.info('=' * 70)
368 logger.info('+' + ('-' * ((9 * len(compute_node_names))+16)) + '+')
370 '|' + ' ' * ((9*len(compute_node_names))/2)
373 9*len(compute_node_names) - (9*len(compute_node_names))/2)
376 '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
377 logger.info(line_of_nodes)
379 '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
380 out_plugins_print = ['Gnocchi']
381 if 'SNMP' in out_plugins.values():
382 out_plugins_print.append('SNMP')
383 if 'AODH' in out_plugins.values():
384 out_plugins_print.append('AODH')
385 if 'CSV' in out_plugins.values():
386 out_plugins_print.append('CSV')
387 for out_plugin in out_plugins_print:
388 output_plugins_line = ''
389 for id in compute_ids:
390 out_plugin_result = 'FAIL'
391 if out_plugin == 'Gnocchi':
392 out_plugin_result = \
393 'PASS' if out_plugins[id] == out_plugin else 'FAIL'
394 if out_plugin == 'AODH':
395 if out_plugins[id] == out_plugin:
396 out_plugin_result = \
397 'PASS' if out_plugins[id] == out_plugin else 'FAIL'
398 if out_plugin == 'SNMP':
399 if out_plugins[id] == out_plugin:
400 out_plugin_result = \
401 'PASS' if out_plugins[id] == out_plugin else 'FAIL'
402 if out_plugin == 'CSV':
403 if out_plugins[id] == out_plugin:
404 out_plugin_result = \
406 plugin for comp_id, plugin, res in results
407 if comp_id == id and res] else 'FAIL'
409 out_plugin_result = 'SKIP'
410 output_plugins_line += '| ' + out_plugin_result + ' '
412 '| OUT:{}'.format(out_plugin) + (' ' * (11 - len(out_plugin)))
413 + output_plugins_line + '|')
414 for plugin in sorted(tested_plugins.values()):
415 line_plugin = _print_final_result_of_plugin(
416 plugin, compute_ids, results, out_plugins, out_plugin)
418 '| IN:{}'.format(plugin) + (' ' * (11-len(plugin)))
421 '+' + ('-' * 16) + '+'
422 + (('-' * 8) + '+') * len(compute_node_names))
423 logger.info('=' * 70)
427 test_labels, name, gnocchi_running, aodh_running, snmp_running,
428 controllers, compute_node, conf, results, error_plugins, out_plugins):
429 """Execute the testcase.
432 test_labels -- dictionary of plug-in IDs and their display names
433 name -- plug-in ID, key of test_labels dictionary
434 ceilometer_running -- boolean indicating whether Ceilometer is running
435 compute_node -- compute node ID
436 conf -- ConfigServer instance
437 results -- results list
438 error_plugins -- list of tuples with plug-in errors
439 (plugin, error_description, is_critical):
440 plugin -- plug-in ID, key of test_labels dictionary
441 error_decription -- description of the error
442 is_critical -- boolean value indicating whether error is critical
444 ovs_interfaces = conf.get_ovs_interfaces(compute_node)
445 ovs_configured_interfaces = conf.get_plugin_config_values(
446 compute_node, 'ovs_events', 'Interfaces')
447 ovs_configured_bridges = conf.get_plugin_config_values(
448 compute_node, 'ovs_stats', 'Bridges')
449 ovs_existing_configured_int = [
450 interface for interface in ovs_interfaces
451 if interface in ovs_configured_interfaces]
452 ovs_existing_configured_bridges = [
453 bridge for bridge in ovs_interfaces
454 if bridge in ovs_configured_bridges]
455 plugin_prerequisites = {
457 conf.is_libpqos_on_node(compute_node),
458 'libpqos must be installed.')],
460 conf.is_installed(compute_node, 'mcelog'),
461 'mcelog must be installed.')],
463 len(ovs_existing_configured_int) > 0 or len(ovs_interfaces) > 0,
464 'Interfaces must be configured.')],
466 len(ovs_existing_configured_bridges) > 0,
467 'Bridges must be configured.')]}
468 gnocchi_criteria_lists = {
469 'hugepages': ['hugepages'],
470 'mcelog': ['mcelog'],
471 'ovs_events': ['interface-ovs-system'],
472 'ovs_stats': ['ovs_stats-br0.br0']}
473 aodh_criteria_lists = {
474 'mcelog': ['mcelog.errors'],
475 'ovs_events': ['ovs_events.gauge']}
477 'intel_rdt': '/usr/share/snmp/mibs/Intel-Rdt.txt',
478 'hugepages': '/usr/share/snmp/mibs/Intel-Hugepages.txt',
479 'mcelog': '/usr/share/snmp/mibs/Intel-Mcelog.txt'}
482 'INTEL-RDT-MIB::rdtLlc.1',
483 'INTEL-RDT-MIB::rdtIpc.1',
484 'INTEL-RDT-MIB::rdtMbmRemote.1',
485 'INTEL-RDT-MIB::rdtMbmLocal.1'],
487 'INTEL-HUGEPAGES-MIB::hugepagesPageFree'],
489 'INTEL-MCELOG-MIB::memoryCorrectedErrors.1',
490 'INTEL-MCELOG-MIB::memoryCorrectedErrors.2']}
491 nr_hugepages = int(time.time()) % 10000
494 'hugepages': 'echo {} > /sys/kernel/'.format(nr_hugepages)
495 + 'mm/hugepages/hugepages-2048kB/nr_hugepages',
496 'mcelog': '/root/mce-inject_df < /root/corrected'}
499 'intel_rdt-{}'.format(core)
500 for core in conf.get_plugin_config_values(
501 compute_node, 'intel_rdt', 'Cores')],
503 'hugepages-mm-2048Kb', 'hugepages-node0-2048Kb',
504 'hugepages-node1-2048Kb', 'hugepages-mm-1048576Kb',
505 'hugepages-node0-1048576Kb', 'hugepages-node1-1048576Kb'],
508 'mcelog-SOCKET_0_CHANNEL_0_DIMM_any',
509 'mcelog-SOCKET_0_CHANNEL_any_DIMM_any'],
511 'ovs_stats-br0.br0'],
514 # csv_meter_categories_ipmi = get_csv_categories_for_ipmi(conf,
516 csv_meter_categories = {
518 'bytes-llc', 'ipc', 'memory_bandwidth-local',
519 'memory_bandwidth-remote'],
520 'hugepages': ['vmpage_number-free', 'vmpage_number-used'],
521 # 'ipmi': csv_meter_categories_ipmi,
523 'errors-corrected_memory_errors',
524 'errors-uncorrected_memory_errors',
525 'errors-corrected_memory_errors_in_24h',
526 'errors-uncorrected_memory_errors_in_24h'],
528 'if_collisions', 'if_dropped', 'if_errors', 'if_packets',
529 'if_rx_errors-crc', 'if_rx_errors-frame', 'if_rx_errors-over',
530 'if_rx_octets', 'if_tx_octets'],
531 'ovs_events': ['gauge-link_status']}
534 test_labels[name] if name in test_labels else name,
535 compute_node.get_name())
536 plugin_critical_errors = [
537 error for plugin, error, critical in error_plugins
538 if plugin == name and critical]
539 if plugin_critical_errors:
540 logger.error('Following critical errors occurred:'.format(name))
541 for error in plugin_critical_errors:
542 logger.error(' * ' + error)
544 compute_node.get_id(), test_labels[name], False, results)
547 error for plugin, error, critical in error_plugins
548 if plugin == name and not critical]
550 logger.warning('Following non-critical errors occured:')
551 for error in plugin_errors:
552 logger.warning(' * ' + error)
553 failed_prerequisites = []
554 if name in plugin_prerequisites:
555 failed_prerequisites = [
556 prerequisite_name for prerequisite_passed,
557 prerequisite_name in plugin_prerequisites[name]
558 if not prerequisite_passed]
559 if failed_prerequisites:
561 '{} test will not be executed, '.format(name)
562 + 'following prerequisites failed:')
563 for prerequisite in failed_prerequisites:
564 logger.error(' * {}'.format(prerequisite))
567 plugin_interval = conf.get_plugin_interval(compute_node, name)
568 res = conf.test_plugins_with_gnocchi(
569 compute_node.get_id(), plugin_interval, logger,
570 criteria_list=gnocchi_criteria_lists[name])
572 res = conf.test_plugins_with_aodh(
573 compute_node.get_id(), plugin_interval,
574 logger, creteria_list=aodh_criteria_lists[name])
577 name in snmp_mib_files and name in snmp_mib_strings \
578 and tests.test_snmp_sends_data(
580 conf.get_plugin_interval(compute_node, name), logger,
581 SNMPClient(conf, compute_node), snmp_mib_files[name],
582 snmp_mib_strings[name], snmp_in_commands[name], conf)
584 res = tests.test_csv_handles_plugin_data(
585 compute_node, conf.get_plugin_interval(compute_node, name),
586 name, csv_subdirs[name], csv_meter_categories[name],
587 logger, CSVClient(conf))
588 if res and plugin_errors:
590 'Test works, but will be reported as failure,'
591 + 'because of non-critical errors.')
594 compute_node.get_id(), test_labels[name], res, results)
597 def get_results_for_ovs_events(
598 plugin_labels, plugin_name, gnocchi_running,
599 compute_node, conf, results, error_plugins):
600 """ Testing OVS Events with python plugin
602 plugin_label = 'OVS events'
603 res = conf.enable_ovs_events(
604 compute_node, plugin_label, error_plugins, create_backup=False)
606 compute_node.get_id(), plugin_label, res, results)
607 logger.info("Results for OVS Events = {}" .format(results))
610 def create_ovs_bridge():
611 """Create OVS brides on compute nodes"""
612 handler = factory.Factory.get_handler('apex',
616 nodes = handler.get_nodes()
618 if node.is_compute():
619 node.run_cmd('sudo ovs-vsctl add-br br0')
620 node.run_cmd('sudo ovs-vsctl set-manager ptcp:6640')
621 logger.info('OVS Bridges created on compute nodes')
624 def mcelog_install():
625 """Install mcelog on compute nodes."""
626 _print_label('Enabling mcelog on compute nodes')
627 handler = factory.Factory.get_handler('apex',
631 nodes = handler.get_nodes()
633 if node.is_compute():
634 centos_release = node.run_cmd('uname -r')
635 if '3.10.0-514.26.2.el7.x86_64' not in centos_release:
637 'Mcelog will not be enabled '
638 + 'on node-{0}, '.format(node.get_dict()['id'])
639 + 'unsupported CentOS release found ({1}).'.format(
643 'Checking if mcelog is enabled'
644 + ' on node-{}...'.format(node.get_dict()['id']))
645 res = node.run_cmd('ls')
646 if 'mce-inject_ea' and 'corrected' in res:
648 'Mcelog seems to be already installed '
649 + 'on node-{}.'.format(node.get_dict()['id']))
650 node.run_cmd('sudo modprobe mce-inject')
651 node.run_cmd('sudo ./mce-inject_ea < corrected')
654 'Mcelog will be enabled on node-{}...'.format(
655 node.get_dict()['id']))
657 '/usr/local/lib/python2.7/dist-packages/baro_tests/'
658 + 'mce-inject_ea', 'mce-inject_ea')
659 node.run_cmd('chmod a+x mce-inject_ea')
660 node.run_cmd('echo "CPU 0 BANK 0" > corrected')
662 'echo "STATUS 0xcc00008000010090" >>'
665 'echo "ADDR 0x0010FFFFFFF" >> corrected')
666 node.run_cmd('sudo modprobe mce-inject')
667 node.run_cmd('sudo ./mce-inject_ea < corrected')
668 logger.info('Mcelog is installed on all compute nodes')
672 """Uninstall mcelog from compute nodes."""
673 handler = factory.Factory.get_handler(
674 'apex', APEX_IP, APEX_USER, APEX_PKEY)
675 nodes = handler.get_nodes()
677 if node.is_compute():
678 output = node.run_cmd('ls')
679 if 'mce-inject_ea' in output:
680 node.run_cmd('rm mce-inject_ea')
681 if 'corrected' in output:
682 node.run_cmd('rm corrected')
683 node.run_cmd('sudo systemctl restart mcelog')
684 logger.info('Mcelog is deleted from all compute nodes')
688 if not os.path.isdir(ID_RSA_DST_DIR):
689 os.makedirs(ID_RSA_DST_DIR)
690 if not os.path.isfile(ID_RSA_DST):
692 "RSA key file {} doesn't exist".format(ID_RSA_DST)
693 + ", it will be downloaded from installer node.")
694 handler = factory.Factory.get_handler(
695 'apex', APEX_IP, APEX_USER, APEX_PKEY)
696 apex = handler.get_installer_node()
697 apex.get_file(ID_RSA_SRC, ID_RSA_DST)
699 logger.info("RSA key file {} exists.".format(ID_RSA_DST))
703 """Check whether there is global logger available and if not, define one."""
704 if 'logger' not in globals():
706 logger = logger.Logger("barometercollectd").getLogger()
709 def main(bt_logger=None):
710 """Check each compute node sends gnocchi metrics.
713 bt_logger -- logger instance
715 logging.getLogger("paramiko").setLevel(logging.WARNING)
716 logging.getLogger("stevedore").setLevel(logging.WARNING)
717 logging.getLogger("opnfv.deployment.manager").setLevel(logging.WARNING)
718 if bt_logger is None:
723 _print_label("Starting barometer tests suite")
725 conf = config_server.ConfigServer(APEX_IP, APEX_USER, logger)
726 controllers = conf.get_controllers()
727 if len(controllers) == 0:
728 logger.error('No controller nodes found!')
730 computes = conf.get_computes()
731 if len(computes) == 0:
732 logger.error('No compute nodes found!')
736 'Display of Control and Compute nodes available in the set up')
737 logger.info('controllers: {}'.format([('{0}: {1} ({2})'.format(
738 node.get_id(), node.get_name(),
739 node.get_ip())) for node in controllers]))
740 logger.info('computes: {}'.format([('{0}: {1} ({2})'.format(
741 node.get_id(), node.get_name(), node.get_ip()))
742 for node in computes]))
746 gnocchi_running_on_con = False
747 aodh_running_on_con = False
749 _print_label('Testing Gnocchi, AODH and SNMP on controller nodes')
751 for controller in controllers:
752 gnocchi_client = GnocchiClient()
753 gnocchi_client.auth_token()
755 gnocchi_running_on_con and conf.is_gnocchi_running(controller))
756 aodh_client = AodhClient()
757 aodh_client.auth_token()
759 aodh_running_on_con and conf.is_aodh_running(controller))
761 logger.info("Gnocchi is running on controller.")
763 logger.error("Gnocchi is not running on controller.")
764 logger.info("AODH is running on controller.")
766 logger.error("Gnocchi is not running on Controller")
767 logger.error("AODH is not running on controller.")
768 logger.info("SNMP is running on controller.")
770 logger.error("Gnocchi is not running on Controller")
771 logger.error("AODH is not running on controller.")
772 logger.error("SNMP is not running on controller.")
773 logger.info("CSV will be enabled on compute nodes.")
776 compute_node_names = []
779 'intel_rdt': 'Intel RDT',
780 'hugepages': 'Hugepages',
783 'ovs_stats': 'OVS stats',
784 'ovs_events': 'OVS events'}
786 'gnocchi': 'Gnocchi',
790 for compute_node in computes:
791 node_id = compute_node.get_id()
792 node_name = compute_node.get_name()
793 out_plugins[node_id] = 'CSV'
794 compute_ids.append(node_id)
795 compute_node_names.append(node_name)
796 plugins_to_enable = []
797 _print_label('NODE {}: Test Gnocchi Plug-in'.format(node_name))
798 logger.info('Checking if gnocchi plug-in is included in compute nodes.')
799 if not conf.check_gnocchi_plugin_included(compute_node):
800 logger.error('Gnocchi plug-in is not included.')
802 'Testcases on node {} will not be executed'.format(node_name))
804 collectd_restarted, collectd_warnings = \
805 conf.restart_collectd(compute_node)
808 'Sleeping for {} seconds after collectd restart...'.format(
810 time.sleep(sleep_time)
811 if not collectd_restarted:
812 for warning in collectd_warnings:
813 logger.warning(warning)
815 'Restart of collectd on node {} failed'.format(node_name))
817 'Testcases on node {} will not be executed'.format(
820 for warning in collectd_warnings:
821 logger.warning(warning)
824 out_plugins[node_id] = 'Gnocchi'
825 logger.info("Gnocchi is active and collecting data")
827 out_plugins[node_id] = 'AODH'
828 logger.info("AODH withh be tested")
829 _print_label('Node {}: Test AODH' .format(node_name))
830 logger.info("Checking if AODH is running")
831 logger.info("AODH is running")
833 out_plugins[node_id] = 'SNMP'
834 logger.info("SNMP will be tested.")
835 _print_label('NODE {}: Test SNMP'.format(node_id))
836 logger.info("Checking if SNMP is running.")
837 logger.info("SNMP is running.")
839 plugins_to_enable.append('csv')
840 out_plugins[node_id] = 'CSV'
841 logger.error("Gnocchi, AODH, SNMP are not running")
843 "CSV will be enabled for verification "
844 + "of test plugins.")
845 if plugins_to_enable:
847 'NODE {}: Enabling Test Plug-in '.format(node_name)
848 + 'and Test case execution')
850 if plugins_to_enable and not conf.enable_plugins(
851 compute_node, plugins_to_enable, error_plugins,
852 create_backup=False):
854 'Failed to test plugins on node {}.'.format(node_id))
856 'Testcases on node {} will not be executed'.format(
859 if plugins_to_enable:
860 collectd_restarted, collectd_warnings = \
861 conf.restart_collectd(compute_node)
864 'Sleeping for {} seconds'.format(sleep_time)
865 + ' after collectd restart...')
866 time.sleep(sleep_time)
867 if plugins_to_enable and not collectd_restarted:
868 for warning in collectd_warnings:
869 logger.warning(warning)
871 'Restart of collectd on node {} failed'.format(
874 'Testcases on node {}'.format(node_id)
875 + ' will not be executed.')
877 if collectd_warnings:
878 for warning in collectd_warnings:
879 logger.warning(warning)
881 for plugin_name in sorted(plugin_labels.keys()):
883 plugin_labels, plugin_name, gnocchi_running,
884 aodh_running, snmp_running, controllers,
885 compute_node, conf, results, error_plugins,
886 out_plugins[node_id])
888 # _print_label('NODE {}: Restoring config file'.format(node_name))
889 # conf.restore_config(compute_node)
891 print_overall_summary(compute_ids, plugin_labels, results, out_plugins)
893 if ((len([res for res in results if not res[2]]) > 0)
894 or (len(results) < len(computes) * len(plugin_labels))):
895 logger.error('Some tests have failed or have not been executed')
900 if __name__ == '__main__':