Attempting to fix NFS issues
[apex.git] / apex / builders / common_builder.py
1 ##############################################################################
2 # Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
3 #
4 # All rights reserved. This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 # http://www.apache.org/licenses/LICENSE-2.0
8 ##############################################################################
9
10 # Common building utilities for undercloud and overcloud
11
12 import datetime
13 import git
14 import json
15 import logging
16 import os
17 import platform
18 import pprint
19 import re
20 import urllib.parse
21 import yaml
22
23 import apex.builders.overcloud_builder as oc_builder
24 from apex import build_utils
25 from apex.builders import exceptions as exc
26 from apex.common import constants as con
27 from apex.common import utils
28 from apex.virtual import utils as virt_utils
29
30
31 def project_to_path(project, patch=None):
32     """
33     Translates project to absolute file path to use in patching
34     :param project: name of project
35     :param patch: the patch to applied to the project
36     :return: File path
37     """
38     if project.startswith('openstack/'):
39         project = os.path.basename(project)
40     if 'puppet' in project:
41         return "/etc/puppet/modules/{}".format(project.replace('puppet-', ''))
42     elif 'tripleo-heat-templates' in project:
43         return "/usr/share/openstack-tripleo-heat-templates"
44     elif ('tripleo-common' in project and
45           build_utils.is_path_in_patch(patch, 'container-images/')):
46         # tripleo-common has python and another component to it
47         # here we detect if there is a change to the yaml component and if so
48         # treat it like it is not python. This has the caveat of if there
49         # is a patch to both python and yaml this will not work
50         # FIXME(trozet): add ability to split tripleo-common patches that
51         # modify both python and yaml
52         return "/usr/share/openstack-tripleo-common-containers/"
53     else:
54         # assume python.  python patches will apply to a project name subdir.
55         # For example, python-tripleoclient patch will apply to the
56         # tripleoclient directory, which is the directory extracted during
57         # python install into the PYTHONPATH.  Therefore we need to just be
58         # in the PYTHONPATH directory to apply a patch
59         return "/usr/lib/python2.7/site-packages/"
60
61
62 def project_to_docker_image(project, docker_url):
63     """
64     Translates OpenStack project to OOO services that are containerized
65     :param project: short name of OpenStack project
66     :return: List of OOO docker service names
67     """
68     # Fetch all docker containers in docker hub with tripleo and filter
69     # based on project
70     logging.info("Checking for docker images matching project: {}".format(
71         project))
72     hub_output = utils.open_webpage(
73         urllib.parse.urljoin(docker_url,
74                              '?page_size=1024'), timeout=10)
75     try:
76         results = json.loads(hub_output.decode())['results']
77     except Exception as e:
78         logging.error("Unable to parse docker hub output for"
79                       "tripleoupstream repository")
80         logging.debug("HTTP response from dockerhub:\n{}".format(hub_output))
81         raise exc.ApexCommonBuilderException(
82             "Failed to parse docker image info from Docker Hub: {}".format(e))
83     logging.debug("Docker Hub tripleoupstream entities found: {}".format(
84         results))
85     docker_images = list()
86     for result in results:
87         if result['name'].startswith("centos-binary-{}".format(project)):
88             # add as docker image shortname (just service name)
89             logging.debug("Adding docker image {} for project {} for "
90                           "patching".format(result['name'], project))
91             docker_images.append(result['name'].replace('centos-binary-', ''))
92
93     return docker_images
94
95
96 def is_patch_promoted(change, branch, docker_url, docker_image=None):
97     """
98     Checks to see if a patch that is in merged exists in either the docker
99     container or the promoted tripleo images
100     :param change: gerrit change json output
101     :param branch: branch to use when polling artifacts (does not include
102     stable prefix)
103     :param docker_image: container this applies to if (defaults to None)
104     :return: True if the patch exists in a promoted artifact upstream
105     """
106     assert isinstance(change, dict)
107     assert 'status' in change
108
109     # if not merged we already know this is not closed/abandoned, so we know
110     # this is not promoted
111     if change['status'] != 'MERGED':
112         return False
113     assert 'submitted' in change
114     # drop microseconds cause who cares
115     stime = re.sub('\..*$', '', change['submitted'])
116     submitted_date = datetime.datetime.strptime(stime, "%Y-%m-%d %H:%M:%S")
117     # Patch applies to overcloud/undercloud
118     if docker_image is None:
119         oc_url = urllib.parse.urljoin(
120             con.UPSTREAM_RDO.replace('master', branch), 'overcloud-full.tar')
121         oc_mtime = utils.get_url_modified_date(oc_url)
122         if oc_mtime > submitted_date:
123             logging.debug("oc image was last modified at {}, which is"
124                           "newer than merge date: {}".format(oc_mtime,
125                                                              submitted_date))
126             return True
127     else:
128         # must be a docker patch, check docker tag modified time
129         docker_url = docker_url.replace('tripleomaster',
130                                         "tripleo{}".format(branch))
131         url_path = "{}/tags/{}".format(docker_image, con.DOCKER_TAG)
132         docker_url = urllib.parse.urljoin(docker_url, url_path)
133         logging.debug("docker url is: {}".format(docker_url))
134         docker_output = utils.open_webpage(docker_url, 10)
135         logging.debug('Docker web output: {}'.format(docker_output))
136         hub_mtime = json.loads(docker_output.decode())['last_updated']
137         hub_mtime = re.sub('\..*$', '', hub_mtime)
138         # docker modified time is in this format '2018-06-11T15:23:55.135744Z'
139         # and we drop microseconds
140         hub_dtime = datetime.datetime.strptime(hub_mtime, "%Y-%m-%dT%H:%M:%S")
141         if hub_dtime > submitted_date:
142             logging.debug("docker image: {} was last modified at {}, which is"
143                           "newer than merge date: {}".format(docker_image,
144                                                              hub_dtime,
145                                                              submitted_date))
146             return True
147     return False
148
149
150 def add_upstream_patches(patches, image, tmp_dir,
151                          default_branch=os.path.join('stable',
152                                                      con.DEFAULT_OS_VERSION),
153                          uc_ip=None, docker_tag=None):
154     """
155     Adds patches from upstream OpenStack gerrit to Undercloud for deployment
156     :param patches: list of patches
157     :param image: undercloud image
158     :param tmp_dir: to store temporary patch files
159     :param default_branch: default branch to fetch commit (if not specified
160     in patch)
161     :param uc_ip: undercloud IP (required only for docker patches)
162     :param docker_tag: Docker Tag (required only for docker patches)
163     :return: Set of docker services patched (if applicable)
164     """
165     virt_ops = [{con.VIRT_INSTALL: 'patch'}]
166     logging.debug("Evaluating upstream patches:\n{}".format(patches))
167     docker_services = set()
168     for patch in patches:
169         assert isinstance(patch, dict)
170         assert all(i in patch.keys() for i in ['project', 'change-id'])
171         if 'branch' in patch.keys():
172             branch = patch['branch']
173         else:
174             branch = default_branch
175         patch_diff = build_utils.get_patch(patch['change-id'],
176                                            patch['project'], branch)
177         project_path = project_to_path(patch['project'], patch_diff)
178         # If docker tag and python we know this patch belongs on docker
179         # container for a docker service. Therefore we build the dockerfile
180         # and move the patch into the containers directory.  We also assume
181         # this builder call is for overcloud, because we do not support
182         # undercloud containers
183         if platform.machine() == 'aarch64':
184             docker_url = con.DOCKERHUB_AARCH64
185         else:
186             docker_url = con.DOCKERHUB_OOO
187         if docker_tag and 'python' in project_path:
188             # Projects map to multiple THT services, need to check which
189             # are supported
190             project_short_name = os.path.basename(patch['project'])
191             ooo_docker_services = project_to_docker_image(project_short_name,
192                                                           docker_url)
193             if not ooo_docker_services:
194                 logging.error("Did not find any matching docker containers "
195                               "for project: {}".format(project_short_name))
196                 raise exc.ApexCommonBuilderException(
197                     'Unable to find docker services for python project in '
198                     'patch')
199             # Just use the first image to see if patch was promoted into it
200             docker_img = ooo_docker_services[0]
201         else:
202             ooo_docker_services = []
203             docker_img = None
204         change = build_utils.get_change(con.OPENSTACK_GERRIT,
205                                         patch['project'], branch,
206                                         patch['change-id'])
207         patch_promoted = is_patch_promoted(change,
208                                            branch.replace('stable/', ''),
209                                            docker_url,
210                                            docker_img)
211
212         if patch_diff and not patch_promoted:
213             patch_file = "{}.patch".format(patch['change-id'])
214             patch_file_paths = []
215             # If we found services, then we treat the patch like it applies to
216             # docker only
217             if ooo_docker_services:
218                 os_version = default_branch.replace('stable/', '')
219                 for service in ooo_docker_services:
220                     docker_services = docker_services.union({service})
221                     # We need to go root to be able to install patch and then
222                     # switch back to previous user. Some containers that
223                     # have the same name as the project do not necessarily
224                     # contain the project code. For example
225                     # novajoin-notifier does not contain nova package code.
226                     # Therefore we must try to patch and unfortunately
227                     # ignore failures until we have a better way of checking
228                     # this
229                     docker_cmds = [
230                         "WORKDIR {}".format(project_path),
231                         "USER root",
232                         "ARG REAL_USER",
233                         "RUN yum -y install patch",
234                         "ADD {} {}".format(patch_file, project_path),
235                         "RUN patch -p1 < {} || echo "
236                         "'Patching failed'".format(patch_file),
237                         "USER $REAL_USER"
238                     ]
239                     src_img_uri = "{}:8787/tripleo{}/centos-binary-{}:" \
240                                   "{}".format(uc_ip, os_version, service,
241                                               docker_tag)
242                     oc_builder.build_dockerfile(service, tmp_dir, docker_cmds,
243                                                 src_img_uri)
244                     patch_file_paths.append(os.path.join(
245                         tmp_dir, "containers/{}".format(service), patch_file))
246             else:
247                 patch_file_path = os.path.join(tmp_dir, patch_file)
248                 virt_ops.extend([
249                     {con.VIRT_UPLOAD: "{}:{}".format(patch_file_path,
250                                                      project_path)},
251                     {con.VIRT_RUN_CMD: "cd {} && patch -p1 < {}".format(
252                         project_path, patch_file)}])
253                 logging.info("Adding patch {} to {}".format(patch_file,
254                                                             image))
255                 patch_file_paths.append(patch_file_path)
256             for patch_fp in patch_file_paths:
257                 with open(patch_fp, 'w') as fh:
258                     fh.write(patch_diff)
259         else:
260             logging.info("Ignoring patch:\n{}".format(patch))
261     if len(virt_ops) > 1:
262         virt_utils.virt_customize(virt_ops, image)
263     return docker_services
264
265
266 def add_repo(repo_url, repo_name, image, tmp_dir):
267     assert repo_name is not None
268     assert repo_url is not None
269     repo_file = "{}.repo".format(repo_name)
270     repo_file_path = os.path.join(tmp_dir, repo_file)
271     content = [
272         "[{}]".format(repo_name),
273         "name={}".format(repo_name),
274         "baseurl={}".format(repo_url),
275         "gpgcheck=0"
276     ]
277     logging.debug("Creating repo file {}".format(repo_name))
278     with open(repo_file_path, 'w') as fh:
279         fh.writelines("{}\n".format(line) for line in content)
280     logging.debug("Adding repo {} to {}".format(repo_file, image))
281     virt_utils.virt_customize([
282         {con.VIRT_UPLOAD: "{}:/etc/yum.repos.d/".format(repo_file_path)}],
283         image
284     )
285
286
287 def create_git_archive(repo_url, repo_name, tmp_dir,
288                        branch='master', prefix=''):
289     repo = git.Repo.clone_from(repo_url, os.path.join(tmp_dir, repo_name))
290     repo_git = repo.git
291     if branch != str(repo.active_branch):
292         repo_git.checkout("origin/{}".format(branch))
293     archive_path = os.path.join(tmp_dir, "{}.tar".format(repo_name))
294     with open(archive_path, 'wb') as fh:
295         repo.archive(fh, prefix=prefix)
296     logging.debug("Wrote archive file: {}".format(archive_path))
297     return archive_path
298
299
300 def get_neutron_driver(ds_opts):
301     sdn = ds_opts.get('sdn_controller', None)
302
303     if sdn == 'opendaylight':
304         return 'odl'
305     elif sdn == 'ovn':
306         return sdn
307     elif ds_opts.get('vpp', False):
308         return 'vpp'
309     else:
310         return None
311
312
313 def prepare_container_images(prep_file, branch='master', neutron_driver=None):
314     if not os.path.isfile(prep_file):
315         raise exc.ApexCommonBuilderException("Prep file does not exist: "
316                                              "{}".format(prep_file))
317     with open(prep_file) as fh:
318         data = yaml.safe_load(fh)
319     try:
320         p_set = data['parameter_defaults']['ContainerImagePrepare'][0]['set']
321         if neutron_driver:
322             p_set['neutron_driver'] = neutron_driver
323         p_set['namespace'] = "docker.io/tripleo{}".format(branch)
324         if platform.machine() == 'aarch64':
325             p_set['namespace'] = "docker.io/armbandapex"
326             p_set['ceph_tag'] = 'v3.1.0-stable-3.1-luminous-centos-7-aarch64'
327
328     except KeyError:
329         logging.error("Invalid prep file format: {}".format(prep_file))
330         raise exc.ApexCommonBuilderException("Invalid format for prep file")
331
332     logging.debug("Writing new container prep file:\n{}".format(
333         pprint.pformat(data)))
334     with open(prep_file, 'w') as fh:
335         yaml.safe_dump(data, fh, default_flow_style=False)