3 # Licensed under the Apache License, Version 2.0 (the "License"); you may
4 # not use this file except in compliance with the License. You may obtain
5 # a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 # License for the specific language governing permissions and limitations
15 # Shell script tool to run puppet inside of the given docker container image.
16 # Uses the config file at /var/lib/docker-puppet/docker-puppet.json as a source for a JSON
17 # array of [config_volume, puppet_tags, manifest, config_image, [volumes]] settings
18 # that can be used to generate config files or run ad-hoc puppet modules
19 # inside of a container.
30 import multiprocessing
37 logger = logging.getLogger()
38 ch = logging.StreamHandler(sys.stdout)
39 if os.environ.get('DEBUG', False):
40 logger.setLevel(logging.DEBUG)
41 ch.setLevel(logging.DEBUG)
43 logger.setLevel(logging.INFO)
44 ch.setLevel(logging.INFO)
45 formatter = logging.Formatter('%(asctime)s %(levelname)s: '
46 '%(process)s -- %(message)s')
47 ch.setFormatter(formatter)
52 # this is to match what we do in deployed-server
54 subproc = subprocess.Popen(['hostname', '-s'],
55 stdout=subprocess.PIPE,
56 stderr=subprocess.PIPE)
57 cmd_stdout, cmd_stderr = subproc.communicate()
58 return cmd_stdout.rstrip()
62 log.info('Pulling image: %s' % name)
67 subproc = subprocess.Popen(['/usr/bin/docker', 'pull', name],
68 stdout=subprocess.PIPE,
69 stderr=subprocess.PIPE)
71 cmd_stdout, cmd_stderr = subproc.communicate()
72 retval = subproc.returncode
75 log.warning('docker pull failed: %s' % cmd_stderr)
76 log.warning('retrying pulling image: %s' % name)
78 log.error('Failed to pull image: %s' % name)
86 def match_config_volume(prefix, config):
87 # Match the mounted config volume - we can't just use the
88 # key as e.g "novacomute" consumes config-data/nova
89 volumes = config.get('volumes', [])
92 if v.startswith(prefix):
93 config_volume = os.path.dirname(v.split(":")[0])
98 def get_config_hash(config_volume):
99 hashfile = "%s.md5sum" % config_volume
100 log.debug("Looking for hashfile %s for config_volume %s" % (hashfile, config_volume))
102 if os.path.isfile(hashfile):
103 log.debug("Got hashfile %s for config_volume %s" % (hashfile, config_volume))
104 with open(hashfile) as f:
105 hash_data = f.read().rstrip()
109 def rm_container(name):
110 if os.environ.get('SHOW_DIFF', None):
111 log.info('Diffing container: %s' % name)
112 subproc = subprocess.Popen(['/usr/bin/docker', 'diff', name],
113 stdout=subprocess.PIPE,
114 stderr=subprocess.PIPE)
115 cmd_stdout, cmd_stderr = subproc.communicate()
117 log.debug(cmd_stdout)
119 log.debug(cmd_stderr)
121 log.info('Removing container: %s' % name)
122 subproc = subprocess.Popen(['/usr/bin/docker', 'rm', name],
123 stdout=subprocess.PIPE,
124 stderr=subprocess.PIPE)
125 cmd_stdout, cmd_stderr = subproc.communicate()
127 log.debug(cmd_stdout)
129 cmd_stderr != 'Error response from daemon: ' \
130 'No such container: {}\n'.format(name):
131 log.debug(cmd_stderr)
133 process_count = int(os.environ.get('PROCESS_COUNT',
134 multiprocessing.cpu_count()))
136 log.info('Running docker-puppet')
137 config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json')
138 log.debug('CONFIG: %s' % config_file)
139 with open(config_file) as f:
140 json_data = json.load(f)
142 # To save time we support configuring 'shared' services at the same
143 # time. For example configuring all of the heat services
144 # in a single container pass makes sense and will save some time.
145 # To support this we merge shared settings together here.
147 # We key off of config_volume as this should be the same for a
148 # given group of services. We are also now specifying the container
149 # in which the services should be configured. This should match
150 # in all instances where the volume name is also the same.
154 for service in (json_data or []):
157 if isinstance(service, dict):
159 service.get('config_volume'),
160 service.get('puppet_tags'),
161 service.get('step_config'),
162 service.get('config_image'),
163 service.get('volumes', []),
166 config_volume = service[0] or ''
167 puppet_tags = service[1] or ''
168 manifest = service[2] or ''
169 config_image = service[3] or ''
170 volumes = service[4] if len(service) > 4 else []
172 if not manifest or not config_image:
175 log.info('config_volume %s' % config_volume)
176 log.info('puppet_tags %s' % puppet_tags)
177 log.info('manifest %s' % manifest)
178 log.info('config_image %s' % config_image)
179 log.info('volumes %s' % volumes)
180 # We key off of config volume for all configs.
181 if config_volume in configs:
182 # Append puppet tags and manifest.
183 log.info("Existing service, appending puppet tags and manifest")
185 configs[config_volume][1] = '%s,%s' % (configs[config_volume][1],
188 configs[config_volume][2] = '%s\n%s' % (configs[config_volume][2],
190 if configs[config_volume][3] != config_image:
191 log.warn("Config containers do not match even though"
192 " shared volumes are the same!")
194 log.info("Adding new service")
195 configs[config_volume] = service
197 log.info('Service compilation completed.')
199 def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volumes)):
201 log.info('Started processing puppet configs')
202 log.debug('config_volume %s' % config_volume)
203 log.debug('puppet_tags %s' % puppet_tags)
204 log.debug('manifest %s' % manifest)
205 log.debug('config_image %s' % config_image)
206 log.debug('volumes %s' % volumes)
207 sh_script = '/var/lib/docker-puppet/docker-puppet.sh'
209 with open(sh_script, 'w') as script_file:
210 os.chmod(script_file.name, 0755)
211 script_file.write("""#!/bin/bash
214 cp -a /tmp/puppet-etc/* /etc/puppet
215 rm -Rf /etc/puppet/ssl # not in use and causes permission errors
216 echo "{\\"step\\": $STEP}" > /etc/puppet/hieradata/docker.json
218 if [ -n "$PUPPET_TAGS" ]; then
219 TAGS="--tags \"$PUPPET_TAGS\""
222 # Create a reference timestamp to easily find all files touched by
223 # puppet. The sync ensures we get all the files we want due to
224 # different timestamp.
225 touch /tmp/the_origin_of_time
228 FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet apply \
229 --color=false --logdest syslog --logdest console $TAGS /etc/config.pp
232 if [ -z "$NO_ARCHIVE" ]; then
233 archivedirs=("/etc" "/root" "/opt" "/var/lib/ironic/tftpboot" "/var/lib/ironic/httpboot" "/var/www" "/var/spool/cron" "/var/lib/nova/.ssh")
235 for d in "${archivedirs[@]}"; do
240 rsync -a -R --delay-updates --delete-after $rsync_srcs /var/lib/config-data/${NAME}
242 # Also make a copy of files modified during puppet run
243 # This is useful for debugging
244 mkdir -p /var/lib/config-data/puppet-generated/${NAME}
245 rsync -a -R -0 --delay-updates --delete-after \
246 --files-from=<(find $rsync_srcs -newer /tmp/the_origin_of_time -not -path '/etc/puppet*' -print0) \
247 / /var/lib/config-data/puppet-generated/${NAME}
249 # Write a checksum of the config-data dir, this is used as a
250 # salt to trigger container restart when the config changes
251 tar -c -f - /var/lib/config-data/${NAME} --mtime='1970-01-01' | md5sum | awk '{print $1}' > /var/lib/config-data/${NAME}.md5sum
252 tar -c -f - /var/lib/config-data/puppet-generated/${NAME} --mtime='1970-01-01' | md5sum | awk '{print $1}' > /var/lib/config-data/puppet-generated/${NAME}.md5sum
256 with tempfile.NamedTemporaryFile() as tmp_man:
257 with open(tmp_man.name, 'w') as man_file:
258 man_file.write('include ::tripleo::packages\n')
259 man_file.write(manifest)
261 rm_container('docker-puppet-%s' % config_volume)
262 pull_image(config_image)
264 dcmd = ['/usr/bin/docker', 'run',
266 '--name', 'docker-puppet-%s' % config_volume,
267 '--health-cmd', '/bin/true',
268 '--env', 'PUPPET_TAGS=%s' % puppet_tags,
269 '--env', 'NAME=%s' % config_volume,
270 '--env', 'HOSTNAME=%s' % short_hostname(),
271 '--env', 'NO_ARCHIVE=%s' % os.environ.get('NO_ARCHIVE', ''),
272 '--env', 'STEP=%s' % os.environ.get('STEP', '6'),
273 '--volume', '%s:/etc/config.pp:ro' % tmp_man.name,
274 '--volume', '/etc/puppet/:/tmp/puppet-etc/:ro',
275 '--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro',
276 '--volume', '%s:/var/lib/config-data/:rw' % os.environ.get('CONFIG_VOLUME_PREFIX', '/var/lib/config-data'),
277 '--volume', 'tripleo_logs:/var/log/tripleo/',
278 # Syslog socket for puppet logs
279 '--volume', '/dev/log:/dev/log',
280 # OpenSSL trusted CA injection
281 '--volume', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro',
282 '--volume', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro',
283 '--volume', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro',
284 '--volume', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro',
286 '--volume', '%s:%s:rw' % (sh_script, sh_script) ]
288 for volume in volumes:
290 dcmd.extend(['--volume', volume])
292 dcmd.extend(['--entrypoint', sh_script])
295 # NOTE(flaper87): Always copy the DOCKER_* environment variables as
296 # they contain the access data for the docker daemon.
297 for k in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()):
298 env[k] = os.environ.get(k)
300 if os.environ.get('NET_HOST', 'false') == 'true':
301 log.debug('NET_HOST enabled')
302 dcmd.extend(['--net', 'host', '--volume',
303 '/etc/hosts:/etc/hosts:ro'])
304 dcmd.append(config_image)
305 log.debug('Running docker command: %s' % ' '.join(dcmd))
307 subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE,
308 stderr=subprocess.PIPE, env=env)
309 cmd_stdout, cmd_stderr = subproc.communicate()
310 if subproc.returncode != 0:
311 log.error('Failed running docker-puppet.py for %s' % config_volume)
313 log.error(cmd_stdout)
315 log.error(cmd_stderr)
318 log.debug(cmd_stdout)
320 log.debug(cmd_stderr)
321 # only delete successful runs, for debugging
322 rm_container('docker-puppet-%s' % config_volume)
324 log.info('Finished processing puppet configs')
325 return subproc.returncode
327 # Holds all the information for each process to consume.
328 # Instead of starting them all linearly we run them using a process
329 # pool. This creates a list of arguments for the above function
333 for config_volume in configs:
335 service = configs[config_volume]
336 puppet_tags = service[1] or ''
337 manifest = service[2] or ''
338 config_image = service[3] or ''
339 volumes = service[4] if len(service) > 4 else []
342 puppet_tags = "file,file_line,concat,augeas,cron,%s" % puppet_tags
344 puppet_tags = "file,file_line,concat,augeas,cron"
346 process_map.append([config_volume, puppet_tags, manifest, config_image, volumes])
348 for p in process_map:
349 log.debug('- %s' % p)
351 # Fire off processes to perform each configuration. Defaults
352 # to the number of CPUs on the system.
353 p = multiprocessing.Pool(process_count)
354 returncodes = list(p.map(mp_puppet_config, process_map))
355 config_volumes = [pm[0] for pm in process_map]
357 for returncode, config_volume in zip(returncodes, config_volumes):
359 log.error('ERROR configuring %s' % config_volume)
363 # Update the startup configs with the config hash we generated above
364 config_volume_prefix = os.environ.get('CONFIG_VOLUME_PREFIX', '/var/lib/config-data')
365 log.debug('CONFIG_VOLUME_PREFIX: %s' % config_volume_prefix)
366 startup_configs = os.environ.get('STARTUP_CONFIG_PATTERN', '/var/lib/tripleo-config/docker-container-startup-config-step_*.json')
367 log.debug('STARTUP_CONFIG_PATTERN: %s' % startup_configs)
368 infiles = glob.glob('/var/lib/tripleo-config/docker-container-startup-config-step_*.json')
369 for infile in infiles:
370 with open(infile) as f:
371 infile_data = json.load(f)
373 for k, v in infile_data.iteritems():
374 config_volume = match_config_volume(config_volume_prefix, v)
376 config_hash = get_config_hash(config_volume)
378 env = v.get('environment', [])
379 env.append("TRIPLEO_CONFIG_HASH=%s" % config_hash)
380 log.debug("Updating config hash for %s, config_volume=%s hash=%s" % (k, config_volume, config_hash))
381 infile_data[k]['environment'] = env
383 outfile = os.path.join(os.path.dirname(infile), "hashed-" + os.path.basename(infile))
384 with open(outfile, 'w') as out_f:
385 os.chmod(out_f.name, 0600)
386 json.dump(infile_data, out_f)