X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=docker%2Fdocker-puppet.py;h=132116765e724c10bfdbcffbd4c03f1aa414e787;hb=83ed275fae1b442fce10ef2b2faa12c2e62a1028;hp=eb6477371cb5f56aa43369ec9f38b908ac11c0c6;hpb=b07b4cc1a11499c01712a50bcbc5fa1aef5bef33;p=apex-tripleo-heat-templates.git diff --git a/docker/docker-puppet.py b/docker/docker-puppet.py index eb647737..13211676 100755 --- a/docker/docker-puppet.py +++ b/docker/docker-puppet.py @@ -18,13 +18,23 @@ # that can be used to generate config files or run ad-hoc puppet modules # inside of a container. +import glob import json +import logging import os +import sys import subprocess import sys import tempfile import multiprocessing +log = logging.getLogger() +log.setLevel(logging.DEBUG) +ch = logging.StreamHandler(sys.stdout) +ch.setLevel(logging.DEBUG) +formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s') +ch.setFormatter(formatter) +log.addHandler(ch) # this is to match what we do in deployed-server def short_hostname(): @@ -36,39 +46,69 @@ def short_hostname(): def pull_image(name): - print('Pulling image: %s' % name) + log.info('Pulling image: %s' % name) subproc = subprocess.Popen(['/usr/bin/docker', 'pull', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() - print(cmd_stdout) - print(cmd_stderr) + if cmd_stdout: + log.debug(cmd_stdout) + if cmd_stderr: + log.debug(cmd_stderr) + + +def match_config_volume(prefix, config): + # Match the mounted config volume - we can't just use the + # key as e.g "novacomute" consumes config-data/nova + volumes = config.get('volumes', []) + config_volume=None + for v in volumes: + if v.startswith(prefix): + config_volume = os.path.relpath( + v.split(":")[0], prefix).split("/")[0] + break + return config_volume + + +def get_config_hash(prefix, config_volume): + hashfile = os.path.join(prefix, "%s.md5sum" % config_volume) + hash_data = None + if os.path.isfile(hashfile): + with open(hashfile) as f: + hash_data = f.read().rstrip() + return hash_data def rm_container(name): if os.environ.get('SHOW_DIFF', None): - print('Diffing container: %s' % name) + log.info('Diffing container: %s' % name) subproc = subprocess.Popen(['/usr/bin/docker', 'diff', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() - print(cmd_stdout) - print(cmd_stderr) + if cmd_stdout: + log.debug(cmd_stdout) + if cmd_stderr: + log.debug(cmd_stderr) - print('Removing container: %s' % name) + log.info('Removing container: %s' % name) subproc = subprocess.Popen(['/usr/bin/docker', 'rm', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) cmd_stdout, cmd_stderr = subproc.communicate() - print(cmd_stdout) - print(cmd_stderr) + if cmd_stdout: + log.debug(cmd_stdout) + if cmd_stderr and \ + cmd_stderr != 'Error response from daemon: ' \ + 'No such container: {}\n'.format(name): + log.debug(cmd_stderr) process_count = int(os.environ.get('PROCESS_COUNT', multiprocessing.cpu_count())) +log.info('Running docker-puppet') config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json') -print('docker-puppet') -print('CONFIG: %s' % config_file) +log.debug('CONFIG: %s' % config_file) with open(config_file) as f: json_data = json.load(f) @@ -105,16 +145,15 @@ for service in (json_data or []): if not manifest or not config_image: continue - print('---------') - print('config_volume %s' % config_volume) - print('puppet_tags %s' % puppet_tags) - print('manifest %s' % manifest) - print('config_image %s' % config_image) - print('volumes %s' % volumes) + log.debug('config_volume %s' % config_volume) + log.debug('puppet_tags %s' % puppet_tags) + log.debug('manifest %s' % manifest) + log.debug('config_image %s' % config_image) + log.debug('volumes %s' % volumes) # We key off of config volume for all configs. if config_volume in configs: # Append puppet tags and manifest. - print("Existing service, appending puppet tags and manifest\n") + log.info("Existing service, appending puppet tags and manifest") if puppet_tags: configs[config_volume][1] = '%s,%s' % (configs[config_volume][1], puppet_tags) @@ -122,24 +161,22 @@ for service in (json_data or []): configs[config_volume][2] = '%s\n%s' % (configs[config_volume][2], manifest) if configs[config_volume][3] != config_image: - print("WARNING: Config containers do not match even though" - " shared volumes are the same!\n") + log.warn("Config containers do not match even though" + " shared volumes are the same!") else: - print("Adding new service\n") + log.info("Adding new service") configs[config_volume] = service -print('Service compilation completed.\n') +log.info('Service compilation completed.') def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volumes)): - print('---------') - print('config_volume %s' % config_volume) - print('puppet_tags %s' % puppet_tags) - print('manifest %s' % manifest) - print('config_image %s' % config_image) - print('volumes %s' % volumes) - hostname = short_hostname() - sh_script = '/var/lib/docker-puppet/docker-puppet-%s.sh' % config_volume + log.debug('config_volume %s' % config_volume) + log.debug('puppet_tags %s' % puppet_tags) + log.debug('manifest %s' % manifest) + log.debug('config_image %s' % config_image) + log.debug('volumes %s' % volumes) + sh_script = '/var/lib/docker-puppet/docker-puppet.sh' with open(sh_script, 'w') as script_file: os.chmod(script_file.name, 0755) @@ -148,43 +185,41 @@ def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volume mkdir -p /etc/puppet cp -a /tmp/puppet-etc/* /etc/puppet rm -Rf /etc/puppet/ssl # not in use and causes permission errors - echo '{"step": %(step)s}' > /etc/puppet/hieradata/docker.json + echo "{\\"step\\": $STEP}" > /etc/puppet/hieradata/docker.json TAGS="" - if [ -n "%(puppet_tags)s" ]; then - TAGS='--tags "%(puppet_tags)s"' + if [ -n "$PUPPET_TAGS" ]; then + TAGS="--tags \"$PUPPET_TAGS\"" fi - FACTER_hostname=%(hostname)s FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp + + # workaround LP1696283 + mkdir -p /etc/ssh + touch /etc/ssh/ssh_known_hosts + + FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp # Disables archiving - if [ -z "%(no_archive)s" ]; then - rm -Rf /var/lib/config-data/%(name)s - - # copying etc should be enough for most services - mkdir -p /var/lib/config-data/%(name)s/etc - cp -a /etc/* /var/lib/config-data/%(name)s/etc/ - - if [ -d /root/ ]; then - cp -a /root/ /var/lib/config-data/%(name)s/root/ - fi - if [ -d /var/lib/ironic/tftpboot/ ]; then - mkdir -p /var/lib/config-data/%(name)s/var/lib/ironic/ - cp -a /var/lib/ironic/tftpboot/ /var/lib/config-data/%(name)s/var/lib/ironic/tftpboot/ - fi - if [ -d /var/lib/ironic/httpboot/ ]; then - mkdir -p /var/lib/config-data/%(name)s/var/lib/ironic/ - cp -a /var/lib/ironic/httpboot/ /var/lib/config-data/%(name)s/var/lib/ironic/httpboot/ - fi - - # apache services may files placed in /var/www/ - if [ -d /var/www/ ]; then - mkdir -p /var/lib/config-data/%(name)s/var/www - cp -a /var/www/* /var/lib/config-data/%(name)s/var/www/ - fi + if [ -z "$NO_ARCHIVE" ]; then + archivedirs=("/etc" "/root" "/var/lib/ironic/tftpboot" "/var/lib/ironic/httpboot" "/var/www") + rsync_srcs="" + for d in "${archivedirs[@]}"; do + if [ -d "$d" ]; then + rsync_srcs+=" $d" + fi + done + rsync -a -R --delay-updates --delete-after $rsync_srcs /var/lib/config-data/${NAME} + + # Also make a copy of files modified during puppet run + # This is useful for debugging + mkdir -p /var/lib/config-data/puppet-generated/${NAME} + rsync -a -R -0 --delay-updates --delete-after \ + --files-from=<(find $rsync_srcs -newer /etc/ssh/ssh_known_hosts -print0) \ + / /var/lib/config-data/puppet-generated/${NAME} + + # Write a checksum of the config-data dir, this is used as a + # salt to trigger container restart when the config changes + tar cf - /var/lib/config-data/${NAME} | md5sum | awk '{print $1}' > /var/lib/config-data/${NAME}.md5sum fi - """ % {'puppet_tags': puppet_tags, 'name': config_volume, - 'hostname': hostname, - 'no_archive': os.environ.get('NO_ARCHIVE', ''), - 'step': os.environ.get('STEP', '6')}) + """) with tempfile.NamedTemporaryFile() as tmp_man: with open(tmp_man.name, 'w') as man_file: @@ -197,11 +232,22 @@ def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volume dcmd = ['/usr/bin/docker', 'run', '--user', 'root', '--name', 'docker-puppet-%s' % config_volume, + '--env', 'PUPPET_TAGS=%s' % puppet_tags, + '--env', 'NAME=%s' % config_volume, + '--env', 'HOSTNAME=%s' % short_hostname(), + '--env', 'NO_ARCHIVE=%s' % os.environ.get('NO_ARCHIVE', ''), + '--env', 'STEP=%s' % os.environ.get('STEP', '6'), '--volume', '%s:/etc/config.pp:ro' % tmp_man.name, '--volume', '/etc/puppet/:/tmp/puppet-etc/:ro', '--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro', '--volume', '/var/lib/config-data/:/var/lib/config-data/:rw', '--volume', 'tripleo_logs:/var/log/tripleo/', + # OpenSSL trusted CA injection + '--volume', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro', + '--volume', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro', + '--volume', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro', + '--volume', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro', + # script injection '--volume', '%s:%s:rw' % (sh_script, sh_script) ] for volume in volumes: @@ -217,19 +263,24 @@ def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volume env[k] = os.environ.get(k) if os.environ.get('NET_HOST', 'false') == 'true': - print('NET_HOST enabled') + log.debug('NET_HOST enabled') dcmd.extend(['--net', 'host', '--volume', '/etc/hosts:/etc/hosts:ro']) dcmd.append(config_image) + log.debug('Running docker command: %s' % ' '.join(dcmd)) subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) cmd_stdout, cmd_stderr = subproc.communicate() - print(cmd_stdout) - print(cmd_stderr) + if cmd_stdout: + log.debug(cmd_stdout) + if cmd_stderr: + log.debug(cmd_stderr) if subproc.returncode != 0: - print('Failed running docker-puppet.py for %s' % config_volume) - rm_container('docker-puppet-%s' % config_volume) + log.error('Failed running docker-puppet.py for %s' % config_volume) + else: + # only delete successful runs, for debugging + rm_container('docker-puppet-%s' % config_volume) return subproc.returncode # Holds all the information for each process to consume. @@ -254,9 +305,43 @@ for config_volume in configs: process_map.append([config_volume, puppet_tags, manifest, config_image, volumes]) for p in process_map: - print '--\n%s' % p + log.debug('- %s' % p) # Fire off processes to perform each configuration. Defaults # to the number of CPUs on the system. p = multiprocessing.Pool(process_count) -p.map(mp_puppet_config, process_map) +returncodes = list(p.map(mp_puppet_config, process_map)) +config_volumes = [pm[0] for pm in process_map] +success = True +for returncode, config_volume in zip(returncodes, config_volumes): + if returncode != 0: + log.error('ERROR configuring %s' % config_volume) + success = False + + +# Update the startup configs with the config hash we generated above +config_volume_prefix = os.environ.get('CONFIG_VOLUME_PREFIX', '/var/lib/config-data') +log.debug('CONFIG_VOLUME_PREFIX: %s' % config_volume_prefix) +startup_configs = os.environ.get('STARTUP_CONFIG_PATTERN', '/var/lib/tripleo-config/docker-container-startup-config-step_*.json') +log.debug('STARTUP_CONFIG_PATTERN: %s' % startup_configs) +infiles = glob.glob('/var/lib/tripleo-config/docker-container-startup-config-step_*.json') +for infile in infiles: + with open(infile) as f: + infile_data = json.load(f) + + for k, v in infile_data.iteritems(): + config_volume = match_config_volume(config_volume_prefix, v) + if config_volume: + config_hash = get_config_hash(config_volume_prefix, config_volume) + if config_hash: + env = v.get('environment', []) + env.append("TRIPLEO_CONFIG_HASH=%s" % config_hash) + log.debug("Updating config hash for %s, config_volume=%s hash=%s" % (k, config_volume, config_hash)) + infile_data[k]['environment'] = env + + outfile = os.path.join(os.path.dirname(infile), "hashed-" + os.path.basename(infile)) + with open(outfile, 'w') as out_f: + json.dump(infile_data, out_f) + +if not success: + sys.exit(1)