1 # -*- coding: utf-8 -*-
3 # Licensed under the Apache License, Version 2.0 (the "License"); you may
4 # not use this file except in compliance with the License. You may obtain
5 # a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 # License for the specific language governing permissions and limitations
14 # Patch on October 10 2017
16 """Executing test of plugins"""
19 from keystoneclient.v3 import client
26 from distutils import version
27 from opnfv.deployment import factory
30 GNOCCHI_NAME = 'gnocchi'
31 ID_RSA_SRC = '/root/.ssh/id_rsa'
32 ID_RSA_DST_DIR = '/root/.ssh'
33 ID_RSA_DST = ID_RSA_DST_DIR + '/id_rsa'
34 APEX_IP = os.getenv("INSTALLER_IP").rstrip('\n')
36 APEX_USER_STACK = 'stack'
37 APEX_PKEY = '/root/.ssh/id_rsa'
40 class KeystoneException(Exception):
41 """Keystone exception class"""
42 def __init__(self, message, exc=None, response=None):
45 message -- error message
50 message += "\nReason: %s" % exc
51 super(KeystoneException, self).__init__(message)
53 self.response = response
57 class InvalidResponse(KeystoneException):
58 """Invalid Keystone exception class"""
59 def __init__(self, exc, response):
65 super(InvalidResponse, self).__init__(
66 "Invalid response", exc, response)
70 handler = factory.Factory.get_handler('apex',
74 nodes = handler.get_nodes()
78 class GnocchiClient(object):
79 # Gnocchi Client to authenticate and request meters
81 self._auth_token = None
82 self._gnocchi_url = None
83 self._meter_list = None
88 return self._auth_token
90 def get_gnocchi_url(self):
92 return self._gnocchi_url
94 def get_gnocchi_metrics(self, criteria=None):
95 # Subject to change if metric gathering is different for gnocchi
96 self._request_meters(criteria)
97 return self._meter_list
99 def _auth_server(self):
100 # Request token in authentication server
101 logger.debug('Connecting to the auth server {}'.format(
102 os.environ['OS_AUTH_URL']))
103 keystone = client.Client(username=os.environ['OS_USERNAME'],
104 password=os.environ['OS_PASSWORD'],
105 tenant_name=os.environ['OS_USERNAME'],
106 auth_url=os.environ['OS_AUTH_URL'])
107 self._auth_token = keystone.auth_token
108 for service in keystone.service_catalog.get_data():
109 if service['name'] == GNOCCHI_NAME:
110 for service_type in service['endpoints']:
111 if service_type['interface'] == 'internal':
112 self._gnocchi_url = service_type['url']
114 if self._gnocchi_url is None:
115 logger.warning('Gnocchi is not registered in service catalog')
117 def _request_meters(self, criteria):
118 """Request meter list values from ceilometer
121 criteria -- criteria for ceilometer meter list
124 url = self._gnocchi_url + ('/v2/metric?limit=400')
126 url = self._gnocchi_url \
127 + ('/v3/metric/%s?q.field=metric&limit=400' % criteria)
128 headers = {'X-Auth-Token': self._auth_token}
129 resp = requests.get(url, headers=headers)
131 resp.raise_for_status()
132 self._meter_list = resp.json()
133 except (KeyError, ValueError, requests.exceptions.HTTPError) as err:
134 raise InvalidResponse(err, resp)
137 class AodhClient(object):
138 # Gnocchi Client to authenticate and request meters
140 self._auth_token = None
141 self._aodh_url = None
142 self._meter_list = None
144 def auth_token(self):
147 return self._auth_token
149 def get_aodh_url(self):
151 return self._gnocchi_url
153 def get_aodh_metrics(self, criteria=None):
154 # Subject to change if metric gathering is different for gnocchi
155 self._request_meters(criteria)
156 return self._meter_list
158 def _auth_server(self):
159 # Request token in authentication server
160 logger.debug('Connecting to the AODH auth server {}'.format(
161 os.environ['OS_AUTH_URL']))
162 keystone = client.Client(username=os.environ['OS_USERNAME'],
163 password=os.environ['OS_PASSWORD'],
164 tenant_name=os.environ['OS_USERNAME'],
165 auth_url=os.environ['OS_AUTH_URL'])
166 self._auth_token = keystone.auth_token
167 for service in keystone.service_catalog.get_data():
168 if service['name'] == AODH_NAME:
169 for service_type in service['endpoints']:
170 if service_type['interface'] == 'internal':
171 self._gnocchi_url = service_type['url']
173 if self._aodh_url is None:
174 logger.warning('Aodh is not registered in service catalog')
177 class CSVClient(object):
178 """Client to request CSV meters"""
179 def __init__(self, conf):
182 conf -- ConfigServer instance
187 self, compute_node, plugin_subdirectories, meter_categories):
191 compute_node -- compute node instance
192 plugin_subdirectories -- list of subdirectories of plug-in
193 meter_categories -- categories which will be tested
195 Return list of metrics.
197 compute_name = compute_node.get_name()
198 nodes = get_apex_nodes()
200 if compute_name == node.get_dict()['name']:
203 hostname = node.run_cmd('hostname -A')
204 hostname = hostname.split()[0]
206 for plugin_subdir in plugin_subdirectories:
207 for meter_category in meter_categories:
208 stdout1 = node.run_cmd(
209 "tail -2 /var/lib/collectd/csv/"
210 + "{0}/{1}/{2}-{3}".format(
211 hostname, plugin_subdir,
212 meter_category, date))
213 stdout2 = node.run_cmd(
214 "tail -1 /var/lib/collectd/csv/"
215 + "{0}/{1}/{2}-{3}".format(
216 hostname, plugin_subdir,
217 meter_category, date))
218 # Storing last two values
223 'Getting last two CSV entries of meter category'
224 + ' {0} in {1} subdir failed'.format(
225 meter_category, plugin_subdir))
226 elif values2 is None:
228 'Getting last CSV entries of meter category'
229 + ' {0} in {1} subdir failed'.format(
230 meter_category, plugin_subdir))
232 values = values.split(',')
233 old_value = float(values[0])
234 values2 = values2.split(',')
235 new_value = float(values2[0])
237 plugin_subdir, meter_category, old_value,
242 def get_csv_categories_for_ipmi(conf, compute_node):
246 compute_node -- compute node instance
248 Return list of categories.
250 stdout = conf.execute_command(
251 "date '+%Y-%m-%d'", compute_node.get_ip())
252 date = stdout[0].strip()
253 categories = conf.execute_command(
254 "ls /var/lib/collectd/csv/{0}.jf.intel.com/ipmi | grep {1}".format(
255 compute_node.get_name(), date), compute_node.get_ip())
256 return [category.strip()[:-11] for category in categories]
259 def _process_result(compute_node, out_plugin, test, result, results_list, node):
260 """Print test result and append it to results list.
263 test -- testcase name
264 result -- boolean test result
265 results_list -- results list
269 'Test case for {0} with {1} PASSED on {2}.'.format(
270 node, out_plugin, test))
273 'Test case for {0} with {1} FAILED on {2}.'.format(
274 node, out_plugin, test))
275 results_list.append((compute_node, out_plugin, test, result))
278 def _print_label(label):
279 """Print label on the screen
282 label -- label string
284 label = label.strip()
287 label = ' ' + label + ' '
288 length_label = len(label)
289 length1 = (length - length_label) / 2
290 length2 = length - length_label - length1
291 length1 = max(3, length1)
292 length2 = max(3, length2)
293 logger.info(('=' * length1) + label + ('=' * length2))
296 def _print_plugin_label(plugin, node_name):
297 """Print plug-in label.
300 plugin -- plug-in name
304 'Node {0}: Plug-in {1} Test case execution'.format(node_name, plugin))
307 def _print_final_result_of_plugin(
308 plugin, compute_ids, results, out_plugins, out_plugin):
309 """Print final results of plug-in.
312 plugin -- plug-in name
313 compute_ids -- list of compute node IDs
314 results -- results list
315 out_plugins -- list of out plug-ins
316 out_plugin -- used out plug-in
319 for id in compute_ids:
320 if out_plugin == 'Gnocchi':
321 if (id, out_plugin, plugin, True) in results:
322 print_line += ' PASS |'
323 elif (id, out_plugin, plugin, False) in results:
324 print_line += ' FAIL |'
326 print_line += ' SKIP |'
327 elif out_plugin == 'AODH':
328 if (id, out_plugin, plugin, True) in results:
329 print_line += ' PASS |'
330 elif (id, out_plugin, plugin, False) in results:
331 print_line += ' FAIL |'
333 print_line += ' SKIP |'
334 elif out_plugin == 'SNMP':
335 if (id, out_plugin, plugin, True) in results:
336 print_line += ' PASS |'
337 elif (id, out_plugin, plugin, False) in results:
338 print_line += ' FAIL |'
340 print_line += ' SKIP |'
341 elif out_plugin == 'CSV':
342 if (id, out_plugin, plugin, True) in results:
343 print_line += ' PASS |'
344 elif (id, out_plugin, plugin, False) in results:
345 print_line += ' FAIL |'
347 print_line += ' SKIP |'
349 print_line += ' SKIP |'
353 def print_overall_summary(
354 compute_ids, tested_plugins, aodh_plugins, results, out_plugins):
355 """Print overall summary table.
358 compute_ids -- list of compute IDs
359 tested_plugins -- list of plug-ins
360 results -- results list
361 out_plugins -- list of used out plug-ins
363 compute_node_names = ['Node-{}'.format(i) for i in range(
365 all_computes_in_line = ''
366 for compute in compute_node_names:
367 all_computes_in_line += '| ' + compute + (' ' * (7 - len(compute)))
368 line_of_nodes = '| Test ' + all_computes_in_line + '|'
369 logger.info('=' * 70)
370 logger.info('+' + ('-' * ((9 * len(compute_node_names))+16)) + '+')
372 '|' + ' ' * ((9*len(compute_node_names))/2)
375 9*len(compute_node_names) - (9*len(compute_node_names))/2)
378 '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
379 logger.info(line_of_nodes)
381 '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
382 out_plugins_print = []
383 out_plugins_print1 = []
384 for key in out_plugins.keys():
385 if 'Gnocchi' in out_plugins[key]:
386 out_plugins_print1.append('Gnocchi')
387 if 'AODH' in out_plugins[key]:
388 out_plugins_print1.append('AODH')
389 if 'SNMP' in out_plugins[key]:
390 out_plugins_print1.append('SNMP')
391 if 'CSV' in out_plugins[key]:
392 out_plugins_print1.append('CSV')
393 for i in out_plugins_print1:
394 if i not in out_plugins_print:
395 out_plugins_print.append(i)
396 for out_plugin in out_plugins_print:
397 output_plugins_line = ''
398 for id in compute_ids:
399 out_plugin_result = '----'
400 if out_plugin == 'Gnocchi':
401 out_plugin_result = \
403 elif out_plugin == 'AODH':
404 out_plugin_result = \
406 elif out_plugin == 'SNMP':
407 out_plugin_result = \
409 elif out_plugin == 'CSV':
410 out_plugin_result = \
412 plugin for comp_id, out_pl, plugin, res in results
413 if comp_id == id and res] else 'FAIL'
415 out_plugin_result = \
417 output_plugins_line += '| ' + out_plugin_result + ' '
419 '| OUT:{}'.format(out_plugin) + (' ' * (11 - len(out_plugin)))
420 + output_plugins_line + '|')
422 if out_plugin == 'AODH':
423 for plugin in sorted(aodh_plugins.values()):
424 line_plugin = _print_final_result_of_plugin(
425 plugin, compute_ids, results, out_plugins, out_plugin)
427 '| IN:{}'.format(plugin) + (' ' * (11-len(plugin)))
430 for plugin in sorted(tested_plugins.values()):
431 line_plugin = _print_final_result_of_plugin(
432 plugin, compute_ids, results, out_plugins, out_plugin)
434 '| IN:{}'.format(plugin) + (' ' * (11-len(plugin)))
437 '+' + ('-' * 16) + '+'
438 + (('-' * 8) + '+') * len(compute_node_names))
439 logger.info('=' * 70)
443 test_labels, name, out_plugin, controllers, compute_node,
444 conf, results, error_plugins, out_plugins):
445 """Execute the testcase.
448 test_labels -- dictionary of plug-in IDs and their display names
449 name -- plug-in ID, key of test_labels dictionary
450 ceilometer_running -- boolean indicating whether Ceilometer is running
451 compute_node -- compute node ID
452 conf -- ConfigServer instance
453 results -- results list
454 error_plugins -- list of tuples with plug-in errors
455 (plugin, error_description, is_critical):
456 plugin -- plug-in ID, key of test_labels dictionary
457 error_decription -- description of the error
458 is_critical -- boolean value indicating whether error is critical
460 ovs_interfaces = conf.get_ovs_interfaces(compute_node)
461 ovs_configured_interfaces = conf.get_plugin_config_values(
462 compute_node, 'ovs_events', 'Interfaces')
463 ovs_configured_bridges = conf.get_plugin_config_values(
464 compute_node, 'ovs_stats', 'Bridges')
465 ovs_existing_configured_int = [
466 interface for interface in ovs_interfaces
467 if interface in ovs_configured_interfaces]
468 ovs_existing_configured_bridges = [
469 bridge for bridge in ovs_interfaces
470 if bridge in ovs_configured_bridges]
471 plugin_prerequisites = {
473 conf.is_mcelog_installed(compute_node, 'mcelog'),
474 'mcelog must be installed.')],
476 len(ovs_existing_configured_int) > 0 or len(ovs_interfaces) > 0,
477 'Interfaces must be configured.')],
479 len(ovs_existing_configured_bridges) > 0,
480 'Bridges must be configured.')]}
481 gnocchi_criteria_lists = {
482 'hugepages': 'hugepages',
485 'ovs_events': 'interface-ovs-system',
486 'ovs_stats': 'ovs_stats-br0.br0'}
487 aodh_criteria_lists = {
489 'ovs_events': 'ovs_events'}
491 'intel_rdt': '/usr/share/snmp/mibs/Intel-Rdt.txt',
492 'hugepages': '/usr/share/snmp/mibs/Intel-Hugepages.txt',
493 'mcelog': '/usr/share/snmp/mibs/Intel-Mcelog.txt'}
495 'intel_rdt': 'INTEL-RDT-MIB::rdtLlc.1',
496 'hugepages': 'INTEL-HUGEPAGES-MIB::hugepagesPageFree',
497 'mcelog': 'INTEL-MCELOG-MIB::memoryCorrectedErrors.1'}
498 nr_hugepages = int(time.time()) % 10000
501 'hugepages': 'echo {} > /sys/kernel/'.format(nr_hugepages)
502 + 'mm/hugepages/hugepages-2048kB/nr_hugepages',
503 'mcelog': '/root/mce-inject_df < /root/corrected'}
508 'hugepages-mm-2048Kb', 'hugepages-node0-2048Kb',],
511 'mcelog-SOCKET_0_CHANNEL_0_DIMM_any',
512 'mcelog-SOCKET_0_CHANNEL_any_DIMM_any'],
514 'ovs_stats-br0.br0'],
517 # csv_meter_categories_ipmi = get_csv_categories_for_ipmi(conf,
519 csv_meter_categories = {
522 'hugepages': ['vmpage_number-free', 'vmpage_number-used'],
523 # 'ipmi': csv_meter_categories_ipmi,
525 'errors-corrected_memory_errors',
526 'errors-uncorrected_memory_errors'],
528 'if_dropped', 'if_errors', 'if_packets'],
529 'ovs_events': ['gauge-link_status']}
532 test_labels[name] if name in test_labels else name,
533 compute_node.get_name())
534 plugin_critical_errors = [
535 error for plugin, error, critical in error_plugins
536 if plugin == name and critical]
537 if plugin_critical_errors:
538 logger.error('Following critical errors occurred:'.format(name))
539 for error in plugin_critical_errors:
540 logger.error(' * ' + error)
542 compute_node.get_id(), out_plugin, test_labels[name], False,
543 results, compute_node.get_name())
546 error for plugin, error, critical in error_plugins
547 if plugin == name and not critical]
549 logger.warning('Following non-critical errors occured:')
550 for error in plugin_errors:
551 logger.warning(' * ' + error)
552 failed_prerequisites = []
553 if name in plugin_prerequisites:
554 failed_prerequisites = [
555 prerequisite_name for prerequisite_passed,
556 prerequisite_name in plugin_prerequisites[name]
557 if not prerequisite_passed]
558 if failed_prerequisites:
560 '{} test will not be executed, '.format(name)
561 + 'following prerequisites failed:')
562 for prerequisite in failed_prerequisites:
563 logger.error(' * {}'.format(prerequisite))
565 elif "intel_rdt" == name and not conf.is_rdt_available(compute_node):
566 #TODO: print log message
567 logger.info("RDT is not available on virtual nodes, skipping test.")
569 print("Results for {}, pre-processing".format(str(test_labels[name])))
572 compute_node.get_id(), out_plugin, test_labels[name],
573 res, results, compute_node.get_name())
574 print("Results for {}, post-processing".format(str(test_labels[name])))
577 plugin_interval = conf.get_plugin_interval(compute_node, name)
578 if out_plugin == 'Gnocchi':
579 res = conf.test_plugins_with_gnocchi(
580 compute_node.get_name(), plugin_interval,
581 logger, criteria_list=gnocchi_criteria_lists[name])
582 if out_plugin == 'AODH':
583 res = conf.test_plugins_with_aodh(
584 compute_node.get_name(), plugin_interval,
585 logger, criteria_list=aodh_criteria_lists[name])
586 if out_plugin == 'SNMP':
588 name in snmp_mib_files and name in snmp_mib_strings \
589 and conf.test_plugins_with_snmp(
590 compute_node.get_name(), plugin_interval, logger, name,
591 snmp_mib_files[name], snmp_mib_strings[name],
592 snmp_in_commands[name])
593 if out_plugin == 'CSV':
594 res = tests.test_csv_handles_plugin_data(
595 compute_node, conf.get_plugin_interval(compute_node, name),
596 name, csv_subdirs[name], csv_meter_categories[name],
597 logger, CSVClient(conf))
599 if res and plugin_errors:
601 'Test works, but will be reported as failure,'
602 + 'because of non-critical errors.')
604 print("Results for {}, pre-processing".format(str(test_labels[name])))
607 compute_node.get_id(), out_plugin, test_labels[name],
608 res, results, compute_node.get_name())
609 print("Results for {}, post-processing".format(str(test_labels[name])))
613 def get_results_for_ovs_events(
614 plugin_labels, plugin_name, gnocchi_running,
615 compute_node, conf, results, error_plugins):
616 """ Testing OVS Events with python plugin
618 plugin_label = 'OVS events'
619 res = conf.enable_ovs_events(
620 compute_node, plugin_label, error_plugins, create_backup=False)
622 compute_node.get_id(), plugin_label, res, results)
623 logger.info("Results for OVS Events = {}" .format(results))
626 def create_ovs_bridge():
627 """Create OVS brides on compute nodes"""
628 handler = factory.Factory.get_handler('apex',
632 nodes = handler.get_nodes()
633 logger.info("Creating OVS bridges on computes nodes")
635 if node.is_compute():
636 node.run_cmd('sudo ovs-vsctl add-br br0')
637 node.run_cmd('sudo ovs-vsctl set-manager ptcp:6640')
638 logger.info('OVS Bridges created on compute nodes')
641 def mcelog_install():
642 """Install mcelog on compute nodes."""
643 _print_label('Enabling mcelog and OVS bridges on compute nodes')
644 handler = factory.Factory.get_handler('apex',
648 nodes = handler.get_nodes()
649 mce_bin = os.path.dirname(os.path.realpath(__file__)) + '/mce-inject_ea'
651 if node.is_compute():
652 centos_release = node.run_cmd('uname -r')
653 if version.LooseVersion(centos_release) < version.LooseVersion('3.10.0-514.26.2.el7.x86_64'):
655 'Mcelog will NOT be enabled on node-{}.'
656 + ' Unsupported CentOS release found ({}).'.format(
657 node.get_dict()['name'],centos_release))
660 'Checking if mcelog is enabled'
661 + ' on node-{}...'.format(node.get_dict()['name']))
662 res = node.run_cmd('ls')
663 if 'mce-inject_ea' and 'corrected' in res:
665 'Mcelog seems to be already installed '
666 + 'on node-{}.'.format(node.get_dict()['name']))
667 node.run_cmd('sudo modprobe mce-inject')
668 node.run_cmd('sudo ./mce-inject_ea < corrected')
671 'Mcelog will be enabled '
672 + 'on node-{}...'.format(node.get_dict()['name']))
673 node.put_file(mce_bin, 'mce-inject_ea')
674 node.run_cmd('chmod a+x mce-inject_ea')
675 node.run_cmd('echo "CPU 0 BANK 0" > corrected')
677 'echo "STATUS 0xcc00008000010090" >>'
680 'echo "ADDR 0x0010FFFFFFF" >> corrected')
681 node.run_cmd('sudo modprobe mce-inject')
682 node.run_cmd('sudo ./mce-inject_ea < corrected')
684 'Mcelog was installed '
685 + 'on node-{}.'.format(node.get_dict()['name']))
690 """Uninstall mcelog from compute nodes."""
691 handler = factory.Factory.get_handler(
692 'apex', APEX_IP, APEX_USER, APEX_PKEY)
693 nodes = handler.get_nodes()
695 if node.is_compute():
696 output = node.run_cmd('ls')
697 if 'mce-inject_ea' in output:
698 node.run_cmd('rm mce-inject_ea')
699 if 'corrected' in output:
700 node.run_cmd('rm corrected')
701 node.run_cmd('sudo systemctl restart mcelog')
702 logger.info('Mcelog is deleted from all compute nodes')
706 if not os.path.isdir(ID_RSA_DST_DIR):
707 os.makedirs(ID_RSA_DST_DIR)
708 if not os.path.isfile(ID_RSA_DST):
710 "RSA key file {} doesn't exist".format(ID_RSA_DST)
711 + ", it will be downloaded from installer node.")
712 handler = factory.Factory.get_handler(
713 'apex', APEX_IP, APEX_USER, APEX_PKEY)
714 apex = handler.get_installer_node()
715 apex.get_file(ID_RSA_SRC, ID_RSA_DST)
717 logger.info("RSA key file {} exists.".format(ID_RSA_DST))
721 """Check whether there is global logger available and if not, define one."""
722 if 'logger' not in globals():
724 logger = logger.Logger("barometercollectd").getLogger()
727 def main(bt_logger=None):
728 """Check each compute node sends gnocchi metrics.
731 bt_logger -- logger instance
733 logging.getLogger("paramiko").setLevel(logging.WARNING)
734 logging.getLogger("stevedore").setLevel(logging.WARNING)
735 logging.getLogger("opnfv.deployment.manager").setLevel(logging.WARNING)
736 if bt_logger is None:
741 _print_label("Starting barometer tests suite")
743 conf = config_server.ConfigServer(APEX_IP, APEX_USER, logger)
744 controllers = conf.get_controllers()
745 if len(controllers) == 0:
746 logger.error('No controller nodes found!')
748 computes = conf.get_computes()
749 if len(computes) == 0:
750 logger.error('No compute nodes found!')
754 'Display of Control and Compute nodes available in the set up')
755 logger.info('controllers: {}'.format([('{0}: {1}'.format(
756 node.get_name(), node.get_ip())) for node in controllers]))
757 logger.info('computes: {}'.format([('{0}: {1}'.format(
758 node.get_name(), node.get_ip())) for node in computes]))
762 gnocchi_running_on_con = False
763 aodh_running_on_con = False
764 # Disabling SNMP write plug-in
766 _print_label('Testing Gnocchi and AODH plugins on nodes')
768 for controller in controllers:
770 gnocchi_running_on_con or conf.is_gnocchi_running(controller))
772 aodh_running_on_con or conf.is_aodh_running(controller))
775 compute_node_names = []
778 'intel_rdt': 'Intel RDT',
779 'hugepages': 'Hugepages',
782 'ovs_stats': 'OVS stats',
783 'ovs_events': 'OVS events'}
784 aodh_plugin_labels = {
786 'ovs_events': 'OVS events'}
788 for compute_node in computes:
789 node_id = compute_node.get_id()
790 node_name = compute_node.get_name()
791 out_plugins[node_id] = []
792 compute_ids.append(node_id)
793 compute_node_names.append(node_name)
794 plugins_to_enable = []
797 gnocchi_running and conf.check_gnocchi_plugin_included(
800 aodh_running and conf.check_aodh_plugin_included(compute_node))
801 # logger.info("SNMP enabled on {}" .format(node_name))
803 out_plugins[node_id].append("Gnocchi")
805 out_plugins[node_id].append("AODH")
807 out_plugins[node_id].append("SNMP")
809 if 'Gnocchi' in out_plugins[node_id]:
810 plugins_to_enable.append('csv')
811 out_plugins[node_id].append("CSV")
812 if plugins_to_enable:
814 'NODE {}: Enabling Test Plug-in '.format(node_name)
815 + 'and Test case execution')
816 if plugins_to_enable and not conf.enable_plugins(
817 compute_node, plugins_to_enable, error_plugins,
818 create_backup=False):
820 'Failed to test plugins on node {}.'.format(node_id))
822 'Testcases on node {} will not be executed'.format(
825 for i in out_plugins[node_id]:
827 for plugin_name in sorted(aodh_plugin_labels.keys()):
829 aodh_plugin_labels, plugin_name, i,
830 controllers, compute_node, conf, results,
831 error_plugins, out_plugins[node_id])
833 _print_label("Node {}: Executing CSV Testcases".format(
835 logger.info("Restarting collectd for CSV tests")
836 collectd_restarted, collectd_warnings = \
837 conf.restart_collectd(compute_node)
840 'Sleeping for {} seconds'.format(sleep_time)
841 + ' after collectd restart...')
842 time.sleep(sleep_time)
843 if not collectd_restarted:
844 for warning in collectd_warnings:
845 logger.warning(warning)
847 'Restart of collectd on node {} failed'.format(
850 'CSV Testcases on node {}'.format(compute_node)
851 + ' will not be executed.')
852 for plugin_name in sorted(plugin_labels.keys()):
854 plugin_labels, plugin_name, i,
855 controllers, compute_node, conf, results,
856 error_plugins, out_plugins[node_id])
859 for plugin_name in sorted(plugin_labels.keys()):
861 plugin_labels, plugin_name, i,
862 controllers, compute_node, conf, results,
863 error_plugins, out_plugins[node_id])
866 print_overall_summary(
867 compute_ids, plugin_labels, aodh_plugin_labels, results, out_plugins)
871 logger.error('Some tests have failed or have not been executed')
872 logger.error('Overall Result is Fail')
879 if __name__ == '__main__':