2 # Licensed under the Apache License, Version 2.0 (the "License"); you may
3 # not use this file except in compliance with the License. You may obtain
4 # a copy of the License at
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
10 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
11 # License for the specific language governing permissions and limitations
20 required_params = ['EndpointMap', 'ServiceNetMap', 'DefaultPasswords',
21 'RoleName', 'RoleParameters', 'ServiceData']
23 # NOTE(bnemec): The duplication in this list is intentional. For the
24 # transition to generated environments we have two copies of these files,
25 # so they need to be listed twice. Once the deprecated version can be removed
26 # the duplicate entries can be as well.
27 envs_containing_endpoint_map = ['tls-endpoints-public-dns.yaml',
28 'tls-endpoints-public-ip.yaml',
29 'tls-everywhere-endpoints-dns.yaml',
30 'tls-endpoints-public-dns.yaml',
31 'tls-endpoints-public-ip.yaml',
32 'tls-everywhere-endpoints-dns.yaml']
33 ENDPOINT_MAP_FILE = 'endpoint_map.yaml'
34 OPTIONAL_SECTIONS = ['workflow_tasks', 'cellv2_discovery']
35 REQUIRED_DOCKER_SECTIONS = ['service_name', 'docker_config', 'puppet_config',
36 'config_settings', 'step_config']
37 OPTIONAL_DOCKER_SECTIONS = ['docker_puppet_tasks', 'upgrade_tasks',
38 'post_upgrade_tasks', 'update_tasks',
39 'service_config_settings',
40 'host_prep_tasks', 'metadata_settings',
41 'kolla_config', 'logging_source',
42 'logging_groups', 'docker_config_scripts']
43 REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'step_config',
45 OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS = [ 'puppet_tags', 'volumes' ]
46 # Mapping of parameter names to a list of the fields we should _not_ enforce
47 # consistency across files on. This should only contain parameters whose
48 # definition we cannot change for backwards compatibility reasons. New
49 # parameters to the templates should not be added to this list.
50 PARAMETER_DEFINITION_EXCLUSIONS = {'CephPools': ['description',
53 'ManagementNetCidr': ['default'],
54 'ManagementAllocationPools': ['default'],
55 'ExternalNetCidr': ['default'],
56 'ExternalAllocationPools': ['default'],
57 'StorageNetCidr': ['default'],
58 'StorageAllocationPools': ['default'],
59 'StorageMgmtNetCidr': ['default'],
60 'StorageMgmtAllocationPools': ['default'],
61 'TenantNetCidr': ['default'],
62 'TenantAllocationPools': ['default'],
63 'InternalApiNetCidr': ['default'],
64 'InternalApiAllocationPools': ['default'],
65 'UpdateIdentifier': ['description'],
66 'key_name': ['default'],
67 # There's one template that defines this
68 # differently, and I'm not sure if we can
70 'EC2MetadataIp': ['default'],
71 # Same as EC2MetadataIp
72 'ControlPlaneDefaultRoute': ['default'],
73 # TODO(bnemec): Address these existing
75 'ServiceNetMap': ['description', 'default'],
76 'network': ['default'],
77 'ControlPlaneIP': ['default',
79 'ControlPlaneIp': ['default',
81 'NeutronBigswitchLLDPEnabled': ['default'],
82 'NeutronWorkers': ['description'],
83 'ServerMetadata': ['description'],
84 'server': ['description'],
85 'servers': ['description'],
86 'ExtraConfig': ['description'],
87 'DefaultPasswords': ['description',
89 'BondInterfaceOvsOptions': ['description',
92 'KeyName': ['constraints'],
93 'OVNSouthboundServerPort': ['description'],
94 'ExternalInterfaceDefaultRoute':
95 ['description', 'default'],
96 'ManagementInterfaceDefaultRoute':
97 ['description', 'default'],
98 'IPPool': ['description'],
99 'SSLCertificate': ['description',
102 'HostCpusList': ['default', 'constraints'],
103 'NodeIndex': ['description'],
104 'name': ['description', 'default'],
105 'image': ['description', 'default'],
106 'NeutronBigswitchAgentEnabled': ['default'],
107 'EndpointMap': ['description', 'default'],
108 'DockerManilaConfigImage': ['description',
110 'replacement_policy': ['default'],
111 'CloudDomain': ['description', 'default'],
112 'EnableLoadBalancer': ['description'],
113 'ControllerExtraConfig': ['description'],
114 'NovaComputeExtraConfig': ['description'],
115 'controllerExtraConfig': ['description'],
116 'DockerSwiftConfigImage': ['default']
119 PREFERRED_CAMEL_CASE = {
121 'haproxy': 'HAProxy',
124 # Overrides for docker/puppet validation
125 # <filename>: True explicitly enables validation
126 # <filename>: False explicitly disables validation
128 # If a filename is not found in the overrides then the top level directory is
129 # used to determine which validation method to use.
130 VALIDATE_PUPPET_OVERRIDE = {
131 # docker/service/sshd.yaml is a variation of the puppet sshd service
132 './docker/services/sshd.yaml': True,
133 # qdr aliases rabbitmq service to provide alternative messaging backend
134 './puppet/services/qdr.yaml': False,
136 VALIDATE_DOCKER_OVERRIDE = {
137 # docker/service/sshd.yaml is a variation of the puppet sshd service
138 './docker/services/sshd.yaml': False,
142 print('Usage %s <yaml file or directory>' % sys.argv[0])
146 def to_camel_case(string):
147 return PREFERRED_CAMEL_CASE.get(string, ''.join(s.capitalize() or '_' for
148 s in string.split('_')))
151 def get_base_endpoint_map(filename):
153 tpl = yaml.load(open(filename).read())
154 return tpl['parameters']['EndpointMap']['default']
156 print(traceback.format_exc())
160 def get_endpoint_map_from_env(filename):
162 tpl = yaml.load(open(filename).read())
165 'map': tpl['parameter_defaults']['EndpointMap']
168 print(traceback.format_exc())
172 def validate_endpoint_map(base_map, env_map):
173 return sorted(base_map.keys()) == sorted(env_map.keys())
176 def validate_hci_compute_services_default(env_filename, env_tpl):
177 env_services_list = env_tpl['parameter_defaults']['ComputeServices']
178 env_services_list.remove('OS::TripleO::Services::CephOSD')
179 roles_filename = os.path.join(os.path.dirname(env_filename),
180 '../roles/Compute.yaml')
181 roles_tpl = yaml.load(open(roles_filename).read())
182 for role in roles_tpl:
183 if role['name'] == 'Compute':
184 roles_services_list = role['ServicesDefault']
185 if sorted(env_services_list) != sorted(roles_services_list):
186 print('ERROR: ComputeServices in %s is different from '
187 'ServicesDefault in roles/Compute.yaml' % env_filename)
192 def validate_hci_computehci_role(hci_role_filename, hci_role_tpl):
193 compute_role_filename = os.path.join(os.path.dirname(hci_role_filename),
195 compute_role_tpl = yaml.load(open(compute_role_filename).read())
196 compute_role_services = compute_role_tpl[0]['ServicesDefault']
197 for role in hci_role_tpl:
198 if role['name'] == 'ComputeHCI':
199 hci_role_services = role['ServicesDefault']
200 hci_role_services.remove('OS::TripleO::Services::CephOSD')
201 if sorted(hci_role_services) != sorted(compute_role_services):
202 print('ERROR: ServicesDefault in %s is different from'
203 'ServicesDefault in roles/Compute.yaml' % hci_role_filename)
208 def search(item, check_item, check_key):
211 elif isinstance(item, list):
213 if search(i, check_item, check_key):
215 elif isinstance(item, dict):
216 for k in item.keys():
217 if check_key(k, item[k]):
219 elif search(item[k], check_item, check_key):
224 def validate_mysql_connection(settings):
225 no_op = lambda *args: False
228 def mysql_protocol(items):
229 return items == ['EndpointMap', 'MysqlInternal', 'protocol']
231 def client_bind_address(item):
232 return 'read_default_file' in item and \
233 'read_default_group' in item
235 def validate_mysql_uri(key, items):
236 # Only consider a connection if it targets mysql
237 if key.endswith('connection') and \
238 search(items, mysql_protocol, no_op):
239 # Assume the "bind_address" option is one of
240 # the token that made up the uri
241 if not search(items, client_bind_address, no_op):
245 search(settings, no_op, validate_mysql_uri)
246 return error_status[0]
249 def validate_docker_service_mysql_usage(filename, tpl):
250 no_op = lambda *args: False
253 def match_included_res(item):
254 is_config_setting = isinstance(item, list) and len(item) > 1 and \
255 item[1:] == ['role_data', 'config_settings']
256 if is_config_setting:
257 included_res.append(item[0])
258 return is_config_setting
260 def match_use_mysql_protocol(items):
261 return items == ['EndpointMap', 'MysqlInternal', 'protocol']
265 def read_all(incfile, inctpl):
266 # search for included content
267 content = inctpl['outputs']['role_data']['value'].get('config_settings',{})
268 all_content.append(content)
270 if search(content, match_included_res, no_op):
271 files = [inctpl['resources'][x]['type'] for x in included_res]
272 # parse included content
273 for r, f in zip(included_res, files):
274 # disregard class names, only consider file names
277 newfile = os.path.normpath(os.path.dirname(incfile)+'/'+f)
278 newtmp = yaml.load(open(newfile).read())
279 read_all(newfile, newtmp)
281 read_all(filename, tpl)
282 if search(all_content, match_use_mysql_protocol, no_op):
283 # ensure this service includes the mysqlclient service
284 resources = tpl['resources']
285 mysqlclient = [x for x in resources
286 if resources[x]['type'].endswith('mysql-client.yaml')]
287 if len(mysqlclient) == 0:
288 print("ERROR: containerized service %s uses mysql but "
289 "resource mysql-client.yaml is not used"
293 # and that mysql::client puppet module is included in puppet-config
294 match_mysqlclient = \
295 lambda x: x == [mysqlclient[0], 'role_data', 'step_config']
296 role_data = tpl['outputs']['role_data']
297 puppet_config = role_data['value']['puppet_config']['step_config']
298 if not search(puppet_config, match_mysqlclient, no_op):
299 print("ERROR: containerized service %s uses mysql but "
300 "puppet_config section does not include "
301 "::tripleo::profile::base::database::mysql::client"
308 def validate_docker_service(filename, tpl):
309 if 'outputs' in tpl and 'role_data' in tpl['outputs']:
310 if 'value' not in tpl['outputs']['role_data']:
311 print('ERROR: invalid role_data for filename: %s'
314 role_data = tpl['outputs']['role_data']['value']
316 for section_name in REQUIRED_DOCKER_SECTIONS:
317 if section_name not in role_data:
318 print('ERROR: %s is required in role_data for %s.'
319 % (section_name, filename))
322 for section_name in role_data.keys():
323 if section_name in REQUIRED_DOCKER_SECTIONS:
326 if section_name in OPTIONAL_DOCKER_SECTIONS:
328 elif section_name in OPTIONAL_SECTIONS:
331 print('ERROR: %s is extra in role_data for %s.'
332 % (section_name, filename))
335 if 'puppet_config' in role_data:
336 if validate_docker_service_mysql_usage(filename, tpl):
337 print('ERROR: could not validate use of mysql service for %s.'
340 puppet_config = role_data['puppet_config']
341 for key in puppet_config:
342 if key in REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS:
345 if key in OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS:
348 print('ERROR: %s should not be in puppet_config section.'
351 for key in REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS:
352 if key not in puppet_config:
353 print('ERROR: %s is required in puppet_config for %s.'
357 config_volume = puppet_config.get('config_volume')
358 expected_config_image_parameter = "Docker%sConfigImage" % to_camel_case(config_volume)
359 if config_volume and not expected_config_image_parameter in tpl.get('parameters', []):
360 print('ERROR: Missing %s heat parameter for %s config_volume.'
361 % (expected_config_image_parameter, config_volume))
364 if 'docker_config' in role_data:
365 docker_config = role_data['docker_config']
366 for _, step in docker_config.items():
367 if not isinstance(step, dict):
368 # NOTE(mandre) this skips everything that is not a dict
369 # so we may ignore some containers definitions if they
370 # are in a map_merge for example
372 for _, container in step.items():
373 if not isinstance(container, dict):
375 command = container.get('command', '')
376 if isinstance(command, list):
377 command = ' '.join(map(str, command))
378 if 'bootstrap_host_exec' in command \
379 and container.get('user') != 'root':
380 print('ERROR: bootstrap_host_exec needs to run as the root user.')
383 if 'parameters' in tpl:
384 for param in required_params:
385 if param not in tpl['parameters']:
386 print('ERROR: parameter %s is required for %s.'
392 def validate_service(filename, tpl):
393 if 'outputs' in tpl and 'role_data' in tpl['outputs']:
394 if 'value' not in tpl['outputs']['role_data']:
395 print('ERROR: invalid role_data for filename: %s'
398 role_data = tpl['outputs']['role_data']['value']
399 if 'service_name' not in role_data:
400 print('ERROR: service_name is required in role_data for %s.'
403 # service_name must match the filename, but with an underscore
404 if (role_data['service_name'] !=
405 os.path.basename(filename).split('.')[0].replace("-", "_")):
406 print('ERROR: service_name should match file name for service: %s.'
409 # if service connects to mysql, the uri should use option
410 # bind_address to avoid issues with VIP failover
411 if 'config_settings' in role_data and \
412 validate_mysql_connection(role_data['config_settings']):
413 print('ERROR: mysql connection uri should use option bind_address')
415 if 'parameters' in tpl:
416 for param in required_params:
417 if param not in tpl['parameters']:
418 print('ERROR: parameter %s is required for %s.'
424 def validate(filename, param_map):
425 """Validate a Heat template
427 :param filename: The path to the file to validate
428 :param param_map: A dict which will be populated with the details of the
429 parameters in the template. The dict will have the
433 {'filename': ./file1.yaml,
434 'data': {'description': '',
439 {'filename': ./file2.yaml,
440 'data': {'description': '',
448 print('Validating %s' % filename)
451 tpl = yaml.load(open(filename).read())
453 # The template alias version should be used instead a date, this validation
454 # will be applied to all templates not just for those in the services folder.
455 if 'heat_template_version' in tpl and not str(tpl['heat_template_version']).isalpha():
456 print('ERROR: heat_template_version needs to be the release alias not a date: %s'
460 if VALIDATE_PUPPET_OVERRIDE.get(filename, False) or (
461 filename.startswith('./puppet/services/') and
462 VALIDATE_PUPPET_OVERRIDE.get(filename, True)):
463 retval = validate_service(filename, tpl)
465 if VALIDATE_DOCKER_OVERRIDE.get(filename, False) or (
466 filename.startswith('./docker/services/') and
467 VALIDATE_DOCKER_OVERRIDE.get(filename, True)):
468 retval = validate_docker_service(filename, tpl)
470 if filename.endswith('hyperconverged-ceph.yaml'):
471 retval = validate_hci_compute_services_default(filename, tpl)
473 if filename.startswith('./roles/ComputeHCI.yaml'):
474 retval = validate_hci_computehci_role(filename, tpl)
477 print(traceback.format_exc())
479 # yaml is OK, now walk the parameters and output a warning for unused ones
480 if 'heat_template_version' in tpl:
481 for p, data in tpl.get('parameters', {}).items():
482 definition = {'data': data, 'filename': filename}
483 param_map.setdefault(p, []).append(definition)
484 if p in required_params:
487 in_resources = str_p in str(tpl.get('resources', {}))
488 in_outputs = str_p in str(tpl.get('outputs', {}))
489 if not in_resources and not in_outputs:
490 print('Warning: parameter %s in template %s '
491 'appears to be unused' % (p, filename))
495 if len(sys.argv) < 2:
498 path_args = sys.argv[1:]
501 base_endpoint_map = None
502 env_endpoint_maps = list()
505 for base_path in path_args:
506 if os.path.isdir(base_path):
507 for subdir, dirs, files in os.walk(base_path):
511 if f.endswith('.yaml') and not f.endswith('.j2.yaml'):
512 file_path = os.path.join(subdir, f)
513 failed = validate(file_path, param_map)
515 failed_files.append(file_path)
517 if f == ENDPOINT_MAP_FILE:
518 base_endpoint_map = get_base_endpoint_map(file_path)
519 if f in envs_containing_endpoint_map:
520 env_endpoint_map = get_endpoint_map_from_env(file_path)
522 env_endpoint_maps.append(env_endpoint_map)
523 elif os.path.isfile(base_path) and base_path.endswith('.yaml'):
524 failed = validate(base_path, param_map)
526 failed_files.append(base_path)
529 print('Unexpected argument %s' % base_path)
532 if base_endpoint_map and \
533 len(env_endpoint_maps) == len(envs_containing_endpoint_map):
534 for env_endpoint_map in env_endpoint_maps:
535 matches = validate_endpoint_map(base_endpoint_map,
536 env_endpoint_map['map'])
538 print("ERROR: %s needs to be updated to match changes in base "
539 "endpoint map" % env_endpoint_map['file'])
540 failed_files.append(env_endpoint_map['file'])
543 print("%s matches base endpoint map" % env_endpoint_map['file'])
545 print("ERROR: Did not find expected number of environments containing the "
546 "EndpointMap parameter. If you meant to add or remove one of these "
547 "environments then you also need to update this tool.")
548 if not base_endpoint_map:
549 failed_files.append(ENDPOINT_MAP_FILE)
550 if len(env_endpoint_maps) != len(envs_containing_endpoint_map):
551 matched_files = set(os.path.basename(matched_env_file['file'])
552 for matched_env_file in env_endpoint_maps)
553 failed_files.extend(set(envs_containing_endpoint_map) - matched_files)
556 # Validate that duplicate parameters defined in multiple files all have the
559 for p, defs in param_map.items():
560 # Nothing to validate if the parameter is only defined once
563 check_data = [d['data'] for d in defs]
564 # Override excluded fields so they don't affect the result
565 exclusions = PARAMETER_DEFINITION_EXCLUSIONS.get(p, [])
567 for field in exclusions:
568 ex_dict[field] = 'IGNORED'
571 # If all items in the list are not == the first, then the check fails
572 if check_data.count(check_data[0]) != len(check_data):
575 failed_files.extend([d['filename'] for d in defs])
576 print('Mismatched parameter definitions found for "%s"' % p)
577 print('Definitions found:')
579 print(' %s:\n %s' % (d['filename'], d['data']))
580 print('Mismatched parameter definitions: %d' % mismatch_count)
583 print('Validation failed on:')
584 for f in failed_files:
587 print('Validation successful!')