Merge "Add composable services for the Veritas HyperScale."
[apex-tripleo-heat-templates.git] / tools / yaml-validate.py
1 #!/usr/bin/env python
2 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
3 #    not use this file except in compliance with the License. You may obtain
4 #    a copy of the License at
5 #
6 #         http://www.apache.org/licenses/LICENSE-2.0
7 #
8 #    Unless required by applicable law or agreed to in writing, software
9 #    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
10 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
11 #    License for the specific language governing permissions and limitations
12 #    under the License.
13
14 import os
15 import sys
16 import traceback
17 import yaml
18
19
20 required_params = ['EndpointMap', 'ServiceNetMap', 'DefaultPasswords',
21                    'RoleName', 'RoleParameters', 'ServiceData']
22
23 # NOTE(bnemec): The duplication in this list is intentional.  For the
24 # transition to generated environments we have two copies of these files,
25 # so they need to be listed twice.  Once the deprecated version can be removed
26 # the duplicate entries can be as well.
27 envs_containing_endpoint_map = ['tls-endpoints-public-dns.yaml',
28                                 'tls-endpoints-public-ip.yaml',
29                                 'tls-everywhere-endpoints-dns.yaml',
30                                 'tls-endpoints-public-dns.yaml',
31                                 'tls-endpoints-public-ip.yaml',
32                                 'tls-everywhere-endpoints-dns.yaml']
33 ENDPOINT_MAP_FILE = 'endpoint_map.yaml'
34 REQUIRED_DOCKER_SECTIONS = ['service_name', 'docker_config', 'puppet_config',
35                             'config_settings', 'step_config']
36 OPTIONAL_DOCKER_SECTIONS = ['docker_puppet_tasks', 'upgrade_tasks',
37                             'service_config_settings', 'host_prep_tasks',
38                             'metadata_settings', 'kolla_config']
39 REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'step_config',
40                                           'config_image']
41 OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS = [ 'puppet_tags', 'volumes' ]
42 # Mapping of parameter names to a list of the fields we should _not_ enforce
43 # consistency across files on.  This should only contain parameters whose
44 # definition we cannot change for backwards compatibility reasons.  New
45 # parameters to the templates should not be added to this list.
46 PARAMETER_DEFINITION_EXCLUSIONS = {'ManagementNetCidr': ['default'],
47                                    'ManagementAllocationPools': ['default'],
48                                    'ExternalNetCidr': ['default'],
49                                    'ExternalAllocationPools': ['default'],
50                                    'StorageNetCidr': ['default'],
51                                    'StorageAllocationPools': ['default'],
52                                    'StorageMgmtNetCidr': ['default',
53                                                           # FIXME
54                                                           'description'],
55                                    'StorageMgmtAllocationPools': ['default'],
56                                    'TenantNetCidr': ['default'],
57                                    'TenantAllocationPools': ['default'],
58                                    'InternalApiNetCidr': ['default'],
59                                    'UpdateIdentifier': ['description'],
60                                    # TODO(bnemec): Address these existing
61                                    # inconsistencies.
62                                    'NeutronMetadataProxySharedSecret': [
63                                        'description', 'hidden'],
64                                    'ServiceNetMap': ['description', 'default'],
65                                    'RedisPassword': ['description'],
66                                    'EC2MetadataIp': ['default'],
67                                    'network': ['default'],
68                                    'ControlPlaneIP': ['default',
69                                                       'description'],
70                                    'ControlPlaneIp': ['default',
71                                                       'description'],
72                                    'NeutronBigswitchLLDPEnabled': ['default'],
73                                    'NeutronEnableL2Pop': ['description'],
74                                    'NeutronWorkers': ['description'],
75                                    'TenantIpSubnet': ['description'],
76                                    'ExternalNetName': ['description'],
77                                    'AdminToken': ['description'],
78                                    'ControlPlaneDefaultRoute': ['default'],
79                                    'StorageMgmtNetName': ['description'],
80                                    'ServerMetadata': ['description'],
81                                    'InternalApiIpUri': ['description'],
82                                    'UpgradeLevelNovaCompute': ['default'],
83                                    'StorageMgmtIpUri': ['description'],
84                                    'server': ['description'],
85                                    'servers': ['description'],
86                                    'FixedIPs': ['description'],
87                                    'ExternalIpSubnet': ['description'],
88                                    'NeutronBridgeMappings': ['description'],
89                                    'ExtraConfig': ['description'],
90                                    'InternalApiIpSubnet': ['description'],
91                                    'DefaultPasswords': ['description',
92                                                         'default'],
93                                    'BondInterfaceOvsOptions': ['description',
94                                                                'default',
95                                                                'constraints'],
96                                    'KeyName': ['constraints'],
97                                    'TenantNetName': ['description'],
98                                    'StorageIpSubnet': ['description'],
99                                    'OVNSouthboundServerPort': ['description'],
100                                    'ExternalInterfaceDefaultRoute':
101                                        ['description', 'default'],
102                                    'ExternalIpUri': ['description'],
103                                    'IPPool': ['description'],
104                                    'ControlPlaneNetwork': ['description'],
105                                    'SSLCertificate': ['description',
106                                                       'default',
107                                                       'hidden'],
108                                    'HostCpusList': ['default', 'constraints'],
109                                    'InternalApiAllocationPools': ['default'],
110                                    'NodeIndex': ['description'],
111                                    'SwiftPassword': ['description'],
112                                    'name': ['description', 'default'],
113                                    'StorageNetName': ['description'],
114                                    'ManagementNetName': ['description'],
115                                    'NeutronPublicInterface': ['description'],
116                                    'RoleParameters': ['description'],
117                                    'AdminPassword': ['description', 'hidden'],
118                                    'ManagementInterfaceDefaultRoute':
119                                        ['default'],
120                                    'NovaPassword': ['description'],
121                                    'image': ['description', 'default'],
122                                    'NeutronBigswitchAgentEnabled': ['default'],
123                                    'EndpointMap': ['description', 'default'],
124                                    'DockerManilaConfigImage': ['description',
125                                                                'default'],
126                                    'NetworkName': ['default', 'description'],
127                                    'StorageIpUri': ['description'],
128                                    'InternalApiNetName': ['description'],
129                                    'NeutronTunnelTypes': ['description'],
130                                    'replacement_policy': ['default'],
131                                    'StorageMgmtIpSubnet': ['description'],
132                                    'CloudDomain': ['description', 'default'],
133                                    'key_name': ['default', 'description'],
134                                    'EnableLoadBalancer': ['description'],
135                                    'ControllerExtraConfig': ['description'],
136                                    'NovaComputeExtraConfig': ['description'],
137                                    'controllerExtraConfig': ['description'],
138                                    'DockerSwiftConfigImage': ['default'],
139                                    }
140
141 PREFERRED_CAMEL_CASE = {
142     'ec2api': 'Ec2Api',
143     'haproxy': 'HAProxy',
144 }
145
146
147 def exit_usage():
148     print('Usage %s <yaml file or directory>' % sys.argv[0])
149     sys.exit(1)
150
151
152 def to_camel_case(string):
153     return PREFERRED_CAMEL_CASE.get(string, ''.join(s.capitalize() or '_' for
154                                                     s in string.split('_')))
155
156
157 def get_base_endpoint_map(filename):
158     try:
159         tpl = yaml.load(open(filename).read())
160         return tpl['parameters']['EndpointMap']['default']
161     except Exception:
162         print(traceback.format_exc())
163     return None
164
165
166 def get_endpoint_map_from_env(filename):
167     try:
168         tpl = yaml.load(open(filename).read())
169         return {
170             'file': filename,
171             'map': tpl['parameter_defaults']['EndpointMap']
172         }
173     except Exception:
174         print(traceback.format_exc())
175     return None
176
177
178 def validate_endpoint_map(base_map, env_map):
179     return sorted(base_map.keys()) == sorted(env_map.keys())
180
181
182 def validate_hci_compute_services_default(env_filename, env_tpl):
183     env_services_list = env_tpl['parameter_defaults']['ComputeServices']
184     env_services_list.remove('OS::TripleO::Services::CephOSD')
185     roles_filename = os.path.join(os.path.dirname(env_filename),
186                                   '../roles/Compute.yaml')
187     roles_tpl = yaml.load(open(roles_filename).read())
188     for role in roles_tpl:
189         if role['name'] == 'Compute':
190             roles_services_list = role['ServicesDefault']
191             if sorted(env_services_list) != sorted(roles_services_list):
192                 print('ERROR: ComputeServices in %s is different from '
193                       'ServicesDefault in roles/Compute.yaml' % env_filename)
194                 return 1
195     return 0
196
197
198 def validate_hci_computehci_role(hci_role_filename, hci_role_tpl):
199     compute_role_filename = os.path.join(os.path.dirname(hci_role_filename),
200                                          './Compute.yaml')
201     compute_role_tpl = yaml.load(open(compute_role_filename).read())
202     compute_role_services = compute_role_tpl[0]['ServicesDefault']
203     for role in hci_role_tpl:
204         if role['name'] == 'ComputeHCI':
205             hci_role_services = role['ServicesDefault']
206             hci_role_services.remove('OS::TripleO::Services::CephOSD')
207             if sorted(hci_role_services) != sorted(compute_role_services):
208                 print('ERROR: ServicesDefault in %s is different from'
209                       'ServicesDefault in roles/Compute.yaml' % hci_role_filename)
210                 return 1
211     return 0
212
213
214 def validate_mysql_connection(settings):
215     no_op = lambda *args: False
216     error_status = [0]
217
218     def mysql_protocol(items):
219         return items == ['EndpointMap', 'MysqlInternal', 'protocol']
220
221     def client_bind_address(item):
222         return 'read_default_file' in item and \
223                'read_default_group' in item
224
225     def validate_mysql_uri(key, items):
226         # Only consider a connection if it targets mysql
227         if key.endswith('connection') and \
228            search(items, mysql_protocol, no_op):
229             # Assume the "bind_address" option is one of
230             # the token that made up the uri
231             if not search(items, client_bind_address, no_op):
232                 error_status[0] = 1
233         return False
234
235     def search(item, check_item, check_key):
236         if check_item(item):
237             return True
238         elif isinstance(item, list):
239             for i in item:
240                 if search(i, check_item, check_key):
241                     return True
242         elif isinstance(item, dict):
243             for k in item.keys():
244                 if check_key(k, item[k]):
245                     return True
246                 elif search(item[k], check_item, check_key):
247                     return True
248         return False
249
250     search(settings, no_op, validate_mysql_uri)
251     return error_status[0]
252
253
254 def validate_docker_service(filename, tpl):
255     if 'outputs' in tpl and 'role_data' in tpl['outputs']:
256         if 'value' not in tpl['outputs']['role_data']:
257             print('ERROR: invalid role_data for filename: %s'
258                   % filename)
259             return 1
260         role_data = tpl['outputs']['role_data']['value']
261
262         for section_name in REQUIRED_DOCKER_SECTIONS:
263             if section_name not in role_data:
264                 print('ERROR: %s is required in role_data for %s.'
265                       % (section_name, filename))
266                 return 1
267
268         for section_name in role_data.keys():
269             if section_name in REQUIRED_DOCKER_SECTIONS:
270                 continue
271             else:
272                 if section_name in OPTIONAL_DOCKER_SECTIONS:
273                     continue
274                 else:
275                     print('ERROR: %s is extra in role_data for %s.'
276                           % (section_name, filename))
277                     return 1
278
279         if 'puppet_config' in role_data:
280             puppet_config = role_data['puppet_config']
281             for key in puppet_config:
282                 if key in REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS:
283                     continue
284                 else:
285                     if key in OPTIONAL_DOCKER_PUPPET_CONFIG_SECTIONS:
286                         continue
287                     else:
288                       print('ERROR: %s should not be in puppet_config section.'
289                             % key)
290                       return 1
291             for key in REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS:
292               if key not in puppet_config:
293                   print('ERROR: %s is required in puppet_config for %s.'
294                         % (key, filename))
295                   return 1
296
297             config_volume = puppet_config.get('config_volume')
298             expected_config_image_parameter = "Docker%sConfigImage" % to_camel_case(config_volume)
299             if config_volume and not expected_config_image_parameter in tpl.get('parameters', []):
300                 print('ERROR: Missing %s heat parameter for %s config_volume.'
301                       % (expected_config_image_parameter, config_volume))
302                 return 1
303
304         if 'docker_config' in role_data:
305             docker_config = role_data['docker_config']
306             for _, step in docker_config.items():
307                 for _, container in step.items():
308                     if not isinstance(container, dict):
309                         # NOTE(mandre) this skips everything that is not a dict
310                         # so we may ignore some containers definitions if they
311                         # are in a map_merge for example
312                         continue
313                     command = container.get('command', '')
314                     if isinstance(command, list):
315                         command = ' '.join(map(str, command))
316                     if 'bootstrap_host_exec' in command \
317                             and container.get('user') != 'root':
318                       print('ERROR: bootstrap_host_exec needs to run as the root user.')
319                       return 1
320
321     if 'parameters' in tpl:
322         for param in required_params:
323             if param not in tpl['parameters']:
324                 print('ERROR: parameter %s is required for %s.'
325                       % (param, filename))
326                 return 1
327     return 0
328
329
330 def validate_service(filename, tpl):
331     if 'outputs' in tpl and 'role_data' in tpl['outputs']:
332         if 'value' not in tpl['outputs']['role_data']:
333             print('ERROR: invalid role_data for filename: %s'
334                   % filename)
335             return 1
336         role_data = tpl['outputs']['role_data']['value']
337         if 'service_name' not in role_data:
338             print('ERROR: service_name is required in role_data for %s.'
339                   % filename)
340             return 1
341         # service_name must match the filename, but with an underscore
342         if (role_data['service_name'] !=
343                 os.path.basename(filename).split('.')[0].replace("-", "_")):
344             print('ERROR: service_name should match file name for service: %s.'
345                   % filename)
346             return 1
347         # if service connects to mysql, the uri should use option
348         # bind_address to avoid issues with VIP failover
349         if 'config_settings' in role_data and \
350            validate_mysql_connection(role_data['config_settings']):
351             print('ERROR: mysql connection uri should use option bind_address')
352             return 1
353     if 'parameters' in tpl:
354         for param in required_params:
355             if param not in tpl['parameters']:
356                 print('ERROR: parameter %s is required for %s.'
357                       % (param, filename))
358                 return 1
359     return 0
360
361
362 def validate(filename, param_map):
363     """Validate a Heat template
364
365     :param filename: The path to the file to validate
366     :param param_map: A dict which will be populated with the details of the
367                       parameters in the template.  The dict will have the
368                       following structure:
369
370                           {'ParameterName': [
371                                {'filename': ./file1.yaml,
372                                 'data': {'description': '',
373                                          'type': string,
374                                          'default': '',
375                                          ...}
376                                 },
377                                {'filename': ./file2.yaml,
378                                 'data': {'description': '',
379                                          'type': string,
380                                          'default': '',
381                                          ...}
382                                 },
383                                 ...
384                            ]}
385     """
386     print('Validating %s' % filename)
387     retval = 0
388     try:
389         tpl = yaml.load(open(filename).read())
390
391         # The template alias version should be used instead a date, this validation
392         # will be applied to all templates not just for those in the services folder.
393         if 'heat_template_version' in tpl and not str(tpl['heat_template_version']).isalpha():
394             print('ERROR: heat_template_version needs to be the release alias not a date: %s'
395                   % filename)
396             return 1
397
398         # qdr aliases rabbitmq service to provide alternative messaging backend
399         if (filename.startswith('./puppet/services/') and
400                 filename not in ['./puppet/services/qdr.yaml']):
401             retval = validate_service(filename, tpl)
402
403         if filename.startswith('./docker/services/'):
404             retval = validate_docker_service(filename, tpl)
405
406         if filename.endswith('hyperconverged-ceph.yaml'):
407             retval = validate_hci_compute_services_default(filename, tpl)
408
409         if filename.startswith('./roles/ComputeHCI.yaml'):
410             retval = validate_hci_computehci_role(filename, tpl)
411
412     except Exception:
413         print(traceback.format_exc())
414         return 1
415     # yaml is OK, now walk the parameters and output a warning for unused ones
416     if 'heat_template_version' in tpl:
417         for p, data in tpl.get('parameters', {}).items():
418             definition = {'data': data, 'filename': filename}
419             param_map.setdefault(p, []).append(definition)
420             if p in required_params:
421                 continue
422             str_p = '\'%s\'' % p
423             in_resources = str_p in str(tpl.get('resources', {}))
424             in_outputs = str_p in str(tpl.get('outputs', {}))
425             if not in_resources and not in_outputs:
426                 print('Warning: parameter %s in template %s '
427                       'appears to be unused' % (p, filename))
428
429     return retval
430
431 if len(sys.argv) < 2:
432     exit_usage()
433
434 path_args = sys.argv[1:]
435 exit_val = 0
436 failed_files = []
437 base_endpoint_map = None
438 env_endpoint_maps = list()
439 param_map = {}
440
441 for base_path in path_args:
442     if os.path.isdir(base_path):
443         for subdir, dirs, files in os.walk(base_path):
444             if '.tox' in dirs:
445                 dirs.remove('.tox')
446             for f in files:
447                 if f.endswith('.yaml') and not f.endswith('.j2.yaml'):
448                     file_path = os.path.join(subdir, f)
449                     failed = validate(file_path, param_map)
450                     if failed:
451                         failed_files.append(file_path)
452                     exit_val |= failed
453                     if f == ENDPOINT_MAP_FILE:
454                         base_endpoint_map = get_base_endpoint_map(file_path)
455                     if f in envs_containing_endpoint_map:
456                         env_endpoint_map = get_endpoint_map_from_env(file_path)
457                         if env_endpoint_map:
458                             env_endpoint_maps.append(env_endpoint_map)
459     elif os.path.isfile(base_path) and base_path.endswith('.yaml'):
460         failed = validate(base_path, param_map)
461         if failed:
462             failed_files.append(base_path)
463         exit_val |= failed
464     else:
465         print('Unexpected argument %s' % base_path)
466         exit_usage()
467
468 if base_endpoint_map and \
469         len(env_endpoint_maps) == len(envs_containing_endpoint_map):
470     for env_endpoint_map in env_endpoint_maps:
471         matches = validate_endpoint_map(base_endpoint_map,
472                                         env_endpoint_map['map'])
473         if not matches:
474             print("ERROR: %s needs to be updated to match changes in base "
475                   "endpoint map" % env_endpoint_map['file'])
476             failed_files.append(env_endpoint_map['file'])
477             exit_val |= 1
478         else:
479             print("%s matches base endpoint map" % env_endpoint_map['file'])
480 else:
481     print("ERROR: Did not find expected number of environments containing the "
482           "EndpointMap parameter.  If you meant to add or remove one of these "
483           "environments then you also need to update this tool.")
484     if not base_endpoint_map:
485         failed_files.append(ENDPOINT_MAP_FILE)
486     if len(env_endpoint_maps) != len(envs_containing_endpoint_map):
487         matched_files = set(os.path.basename(matched_env_file['file'])
488                             for matched_env_file in env_endpoint_maps)
489         failed_files.extend(set(envs_containing_endpoint_map) - matched_files)
490     exit_val |= 1
491
492 # Validate that duplicate parameters defined in multiple files all have the
493 # same definition.
494 mismatch_count = 0
495 for p, defs in param_map.items():
496     # Nothing to validate if the parameter is only defined once
497     if len(defs) == 1:
498         continue
499     check_data = [d['data'] for d in defs]
500     # Override excluded fields so they don't affect the result
501     exclusions = PARAMETER_DEFINITION_EXCLUSIONS.get(p, [])
502     ex_dict = {}
503     for field in exclusions:
504         ex_dict[field] = 'IGNORED'
505     for d in check_data:
506         d.update(ex_dict)
507     # If all items in the list are not == the first, then the check fails
508     if check_data.count(check_data[0]) != len(check_data):
509         mismatch_count += 1
510         exit_val |= 1
511         failed_files.extend([d['filename'] for d in defs])
512         print('Mismatched parameter definitions found for "%s"' % p)
513         print('Definitions found:')
514         for d in defs:
515             print('  %s:\n    %s' % (d['filename'], d['data']))
516 print('Mismatched parameter definitions: %d' % mismatch_count)
517
518 if failed_files:
519     print('Validation failed on:')
520     for f in failed_files:
521         print(f)
522 else:
523     print('Validation successful!')
524 sys.exit(exit_val)