Enables containerized overcloud deployments 31/48331/27
authorTim Rozet <trozet@redhat.com>
Mon, 4 Dec 2017 16:20:23 +0000 (11:20 -0500)
committerTim Rozet <trozet@redhat.com>
Fri, 16 Mar 2018 18:51:33 +0000 (14:51 -0400)
Changes Include:
  - For upstream deployments, Docker local registry will be updated with
    latest current RDO containers, regular deployments will use latest
    stable
  - Upstream container images will then be patched/modified and then
    re-uploaded into local docker registry with 'apex' tag
  - Deployment command modified to deploy with containers
  - Adds a --no-fetch deployment argument to disable pulling latest
    from upstream, and instead using what already exists in cache
  - Moves Undercloud NAT setup to just after undercloud is installed.
    This provides internet during overcloud install which is now
    required for upstream container deployments.
  - Creates loop device for Ceph deployment when no device is
    provided in deploy settings (for container deployment only)
  - Updates NIC J2 template to use the new format in OOO since
    the os-apply-config method is now deprecated in > Queens

JIRA: APEX-566
JIRA: APEX-549

Change-Id: I0652c194c059b915a942ac7401936e8f5c69d1fa
Signed-off-by: Tim Rozet <trozet@redhat.com>
34 files changed:
apex/build_utils.py
apex/builders/common_builder.py
apex/builders/exceptions.py [new file with mode: 0644]
apex/builders/overcloud_builder.py
apex/builders/undercloud_builder.py
apex/common/constants.py
apex/common/exceptions.py
apex/common/utils.py
apex/deploy.py
apex/overcloud/deploy.py
apex/settings/deploy_settings.py
apex/tests/config/98faaca.diff [new file with mode: 0644]
apex/tests/test_apex_build_utils.py
apex/tests/test_apex_common_builder.py
apex/tests/test_apex_common_utils.py
apex/tests/test_apex_deploy.py
apex/tests/test_apex_overcloud_builder.py
apex/tests/test_apex_overcloud_deploy.py
apex/tests/test_apex_undercloud.py
apex/undercloud/undercloud.py
build/nics-template.yaml.jinja2
build/rpm_specs/opnfv-apex-common.spec
build/upstream-environment.yaml
config/deploy/deploy_settings.yaml
config/deploy/os-nosdn-master_upstream-noha.yaml [new file with mode: 0644]
config/deploy/os-nosdn-pike_upstream-noha.yaml [moved from config/deploy/os-nosdn-pike-noha.yaml with 100% similarity]
config/deploy/os-nosdn-queens_upstream-noha.yaml [new file with mode: 0644]
config/deploy/os-odl-master_upstream-noha.yaml [moved from config/deploy/os-odl-pike-noha.yaml with 64% similarity]
config/deploy/os-odl-pike_upstream-noha.yaml [new file with mode: 0644]
config/deploy/os-odl-queens_upstream-noha.yaml [new file with mode: 0644]
docs/contributor/upstream-overcloud-container-design.rst [new file with mode: 0644]
lib/ansible/playbooks/configure_undercloud.yml
lib/ansible/playbooks/post_deploy_undercloud.yml
lib/ansible/playbooks/prepare_overcloud_containers.yml [new file with mode: 0644]

index c9d8472..1c413df 100644 (file)
@@ -90,6 +90,31 @@ def clone_fork(args):
         logging.info('Checked out commit:\n{}'.format(ws.head.commit.message))
 
 
         logging.info('Checked out commit:\n{}'.format(ws.head.commit.message))
 
 
+def strip_patch_sections(patch, sections=['releasenotes']):
+    """
+    Removes patch sections from a diff which contain a file path
+    :param patch:  patch to strip
+    :param sections: list of keywords to use to strip out of the patch file
+    :return: stripped patch
+    """
+
+    append_line = True
+    tmp_patch = []
+    for line in patch.split("\n"):
+        if re.match('diff\s', line):
+            for section in sections:
+                if re.search(section, line):
+                    logging.debug("Stripping {} from patch: {}".format(
+                        section, line))
+                    append_line = False
+                    break
+                else:
+                    append_line = True
+        if append_line:
+            tmp_patch.append(line)
+    return '\n'.join(tmp_patch)
+
+
 def get_patch(change_id, repo, branch, url=con.OPENSTACK_GERRIT):
     logging.info("Fetching patch for change id {}".format(change_id))
     change = get_change(url, repo, branch, change_id)
 def get_patch(change_id, repo, branch, url=con.OPENSTACK_GERRIT):
     logging.info("Fetching patch for change id {}".format(change_id))
     change = get_change(url, repo, branch, change_id)
@@ -100,7 +125,7 @@ def get_patch(change_id, repo, branch, url=con.OPENSTACK_GERRIT):
                                         change_id)
         patch_url = "changes/{}/revisions/{}/patch".format(change_path,
                                                            current_revision)
                                         change_id)
         patch_url = "changes/{}/revisions/{}/patch".format(change_path,
                                                            current_revision)
-        return rest.get(patch_url)
+        return strip_patch_sections(rest.get(patch_url))
 
 
 def get_parser():
 
 
 def get_parser():
index fd3bcc3..05a81ef 100644 (file)
 # Common building utilities for undercloud and overcloud
 
 import git
 # Common building utilities for undercloud and overcloud
 
 import git
+import json
 import logging
 import os
 import logging
 import os
+import re
 
 
+import apex.builders.overcloud_builder as oc_builder
 from apex import build_utils
 from apex import build_utils
+from apex.builders import exceptions as exc
 from apex.common import constants as con
 from apex.common import constants as con
+from apex.common import utils
 from apex.virtual import utils as virt_utils
 
 
 from apex.virtual import utils as virt_utils
 
 
@@ -35,9 +40,38 @@ def project_to_path(project):
         return "/usr/lib/python2.7/site-packages/{}".format(project)
 
 
         return "/usr/lib/python2.7/site-packages/{}".format(project)
 
 
+def project_to_docker_image(project):
+    """
+    Translates OpenStack project to OOO services that are containerized
+    :param project: name of OpenStack project
+    :return: List of OOO docker service names
+    """
+    # Fetch all docker containers in docker hub with tripleo and filter
+    # based on project
+    hub_output = utils.open_webpage(con.DOCKERHUB_OOO, timeout=10)
+    try:
+        results = json.loads(hub_output.decode())['results']
+    except Exception as e:
+        logging.error("Unable to parse docker hub output for"
+                      "tripleoupstream repository")
+        logging.debug("HTTP response from dockerhub:\n{}".format(hub_output))
+        raise exc.ApexCommonBuilderException(
+            "Failed to parse docker image info from Docker Hub: {}".format(e))
+    logging.debug("Docker Hub tripleoupstream entities found: {}".format(
+        results))
+    docker_images = list()
+    for result in results:
+        if result['name'].startswith("centos-binary-{}".format(project)):
+            # add as docker image shortname (just service name)
+            docker_images.append(result['name'].replace('centos-binary-', ''))
+
+    return docker_images
+
+
 def add_upstream_patches(patches, image, tmp_dir,
                          default_branch=os.path.join('stable',
 def add_upstream_patches(patches, image, tmp_dir,
                          default_branch=os.path.join('stable',
-                                                     con.DEFAULT_OS_VERSION)):
+                                                     con.DEFAULT_OS_VERSION),
+                         uc_ip=None, docker_tag=None):
     """
     Adds patches from upstream OpenStack gerrit to Undercloud for deployment
     :param patches: list of patches
     """
     Adds patches from upstream OpenStack gerrit to Undercloud for deployment
     :param patches: list of patches
@@ -45,10 +79,13 @@ def add_upstream_patches(patches, image, tmp_dir,
     :param tmp_dir: to store temporary patch files
     :param default_branch: default branch to fetch commit (if not specified
     in patch)
     :param tmp_dir: to store temporary patch files
     :param default_branch: default branch to fetch commit (if not specified
     in patch)
-    :return: None
+    :param uc_ip: undercloud IP (required only for docker patches)
+    :param docker_tag: Docker Tag (required only for docker patches)
+    :return: Set of docker services patched (if applicable)
     """
     virt_ops = [{con.VIRT_INSTALL: 'patch'}]
     logging.debug("Evaluating upstream patches:\n{}".format(patches))
     """
     virt_ops = [{con.VIRT_INSTALL: 'patch'}]
     logging.debug("Evaluating upstream patches:\n{}".format(patches))
+    docker_services = set()
     for patch in patches:
         assert isinstance(patch, dict)
         assert all(i in patch.keys() for i in ['project', 'change-id'])
     for patch in patches:
         assert isinstance(patch, dict)
         assert all(i in patch.keys() for i in ['project', 'change-id'])
@@ -60,21 +97,52 @@ def add_upstream_patches(patches, image, tmp_dir,
                                            patch['project'], branch)
         if patch_diff:
             patch_file = "{}.patch".format(patch['change-id'])
                                            patch['project'], branch)
         if patch_diff:
             patch_file = "{}.patch".format(patch['change-id'])
-            patch_file_path = os.path.join(tmp_dir, patch_file)
+            project_path = project_to_path(patch['project'])
+            # If docker tag and python we know this patch belongs on docker
+            # container for a docker service. Therefore we build the dockerfile
+            # and move the patch into the containers directory.  We also assume
+            # this builder call is for overcloud, because we do not support
+            # undercloud containers
+            if docker_tag and 'python' in project_path:
+                # Projects map to multiple THT services, need to check which
+                # are supported
+                ooo_docker_services = project_to_docker_image(patch['project'])
+            else:
+                ooo_docker_services = []
+            # If we found services, then we treat the patch like it applies to
+            # docker only
+            if ooo_docker_services:
+                os_version = default_branch.replace('stable/', '')
+                for service in ooo_docker_services:
+                    docker_services = docker_services.union({service})
+                    docker_cmds = [
+                        "WORKDIR {}".format(project_path),
+                        "ADD {} {}".format(patch_file, project_path),
+                        "RUN patch -p1 < {}".format(patch_file)
+                    ]
+                    src_img_uri = "{}:8787/{}/centos-binary-{}:" \
+                                  "{}".format(uc_ip, os_version, service,
+                                              docker_tag)
+                    oc_builder.build_dockerfile(service, tmp_dir, docker_cmds,
+                                                src_img_uri)
+                patch_file_path = os.path.join(tmp_dir, 'containers',
+                                               patch_file)
+            else:
+                patch_file_path = os.path.join(tmp_dir, patch_file)
+                virt_ops.extend([
+                    {con.VIRT_UPLOAD: "{}:{}".format(patch_file_path,
+                                                     project_path)},
+                    {con.VIRT_RUN_CMD: "cd {} && patch -p1 < {}".format(
+                        project_path, patch_file)}])
+                logging.info("Adding patch {} to {}".format(patch_file,
+                                                            image))
             with open(patch_file_path, 'w') as fh:
                 fh.write(patch_diff)
             with open(patch_file_path, 'w') as fh:
                 fh.write(patch_diff)
-            project_path = project_to_path(patch['project'])
-            virt_ops.extend([
-                {con.VIRT_UPLOAD: "{}:{}".format(patch_file_path,
-                                                 project_path)},
-                {con.VIRT_RUN_CMD: "cd {} && patch -p1 < {}".format(
-                    project_path, patch_file)}])
-            logging.info("Adding patch {} to {}".format(patch_file,
-                                                        image))
         else:
             logging.info("Ignoring patch:\n{}".format(patch))
     if len(virt_ops) > 1:
         virt_utils.virt_customize(virt_ops, image)
         else:
             logging.info("Ignoring patch:\n{}".format(patch))
     if len(virt_ops) > 1:
         virt_utils.virt_customize(virt_ops, image)
+    return docker_services
 
 
 def add_repo(repo_url, repo_name, image, tmp_dir):
 
 
 def add_repo(repo_url, repo_name, image, tmp_dir):
diff --git a/apex/builders/exceptions.py b/apex/builders/exceptions.py
new file mode 100644 (file)
index 0000000..b88f02b
--- /dev/null
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+class ApexCommonBuilderException(Exception):
+    pass
index e7b0796..a84d100 100644 (file)
 # Used to modify overcloud qcow2 image
 
 import logging
 # Used to modify overcloud qcow2 image
 
 import logging
+import os
+import tarfile
 
 
-from apex.builders import common_builder as c_builder
+import apex.builders.common_builder
 from apex.common import constants as con
 from apex.common import constants as con
+from apex.common.exceptions import ApexBuildException
 from apex.virtual import utils as virt_utils
 
 
 from apex.virtual import utils as virt_utils
 
 
-def inject_opendaylight(odl_version, image, tmp_dir):
+def inject_opendaylight(odl_version, image, tmp_dir, uc_ip,
+                        os_version, docker_tag=None):
     assert odl_version in con.VALID_ODL_VERSIONS
     # add repo
     if odl_version == 'master':
     assert odl_version in con.VALID_ODL_VERSIONS
     # add repo
     if odl_version == 'master':
@@ -28,18 +32,77 @@ def inject_opendaylight(odl_version, image, tmp_dir):
     odl_url = "https://nexus.opendaylight.org/content/repositories" \
               "/opendaylight-{}-epel-7-x86_64-devel/".format(odl_pkg_version)
     repo_name = "opendaylight-{}".format(odl_pkg_version)
     odl_url = "https://nexus.opendaylight.org/content/repositories" \
               "/opendaylight-{}-epel-7-x86_64-devel/".format(odl_pkg_version)
     repo_name = "opendaylight-{}".format(odl_pkg_version)
-    c_builder.add_repo(odl_url, repo_name, image, tmp_dir)
+    apex.builders.common_builder.add_repo(odl_url, repo_name, image, tmp_dir)
     # download puppet-opendaylight
     # download puppet-opendaylight
-    archive = c_builder.create_git_archive(
+    archive = apex.builders.common_builder.create_git_archive(
         repo_url=con.PUPPET_ODL_URL, repo_name='puppet-opendaylight',
         tmp_dir=tmp_dir, branch=branch, prefix='opendaylight/')
     # install ODL, puppet-odl
     virt_ops = [
         repo_url=con.PUPPET_ODL_URL, repo_name='puppet-opendaylight',
         tmp_dir=tmp_dir, branch=branch, prefix='opendaylight/')
     # install ODL, puppet-odl
     virt_ops = [
-        {con.VIRT_INSTALL: 'opendaylight'},
         {con.VIRT_UPLOAD: "{}:/etc/puppet/modules/".format(archive)},
         {con.VIRT_RUN_CMD: 'rm -rf /etc/puppet/modules/opendaylight'},
         {con.VIRT_RUN_CMD: "cd /etc/puppet/modules/ && tar xvf "
                            "puppet-opendaylight.tar"}
     ]
         {con.VIRT_UPLOAD: "{}:/etc/puppet/modules/".format(archive)},
         {con.VIRT_RUN_CMD: 'rm -rf /etc/puppet/modules/opendaylight'},
         {con.VIRT_RUN_CMD: "cd /etc/puppet/modules/ && tar xvf "
                            "puppet-opendaylight.tar"}
     ]
+    if docker_tag:
+        docker_cmds = [
+            "RUN yum remove opendaylight -y",
+            "RUN echo $'[opendaylight]\\n\\",
+            "baseurl={}\\n\\".format(odl_url),
+            "gpgcheck=0\\n\\",
+            "enabled=1' > /etc/yum.repos.d/opendaylight.repo",
+            "RUN yum -y install opendaylight"
+        ]
+        src_img_uri = "{}:8787/{}/centos-binary-{}:" \
+                      "{}".format(uc_ip, os_version, 'opendaylight',
+                                  docker_tag)
+        build_dockerfile('opendaylight', tmp_dir, docker_cmds, src_img_uri)
+    else:
+        virt_ops.append({con.VIRT_INSTALL: 'opendaylight'})
     virt_utils.virt_customize(virt_ops, image)
     logging.info("OpenDaylight injected into {}".format(image))
     virt_utils.virt_customize(virt_ops, image)
     logging.info("OpenDaylight injected into {}".format(image))
+
+
+def build_dockerfile(service, tmp_dir, docker_cmds, src_image_uri):
+    """
+    Builds docker file per service and stores it in a
+    tmp_dir/containers/<service> directory.  If the Dockerfile already exists,
+    simply append the docker cmds to it.
+    :param service: name of sub-directory to store Dockerfile in
+    :param tmp_dir: Temporary directory to store the container's dockerfile in
+    :param docker_cmds: List of commands to insert into the dockerfile
+    :param src_image_uri: Docker URI format for where the source image exists
+    :return: None
+    """
+    logging.debug("Building Dockerfile for {} with docker_cmds: {}".format(
+        service, docker_cmds))
+    c_dir = os.path.join(tmp_dir, 'containers')
+    service_dir = os.path.join(c_dir, service)
+    if not os.path.isdir(service_dir):
+        os.makedirs(service_dir, exist_ok=True)
+    from_cmd = "FROM {}\n".format(src_image_uri)
+    service_file = os.path.join(service_dir, 'Dockerfile')
+    assert isinstance(docker_cmds, list)
+    if os.path.isfile(service_file):
+        append_cmds = True
+    else:
+        append_cmds = False
+    with open(service_file, "a+") as fh:
+        if not append_cmds:
+            fh.write(from_cmd)
+        fh.write('\n'.join(docker_cmds))
+
+
+def archive_docker_patches(tmp_dir):
+    """
+    Archives Overcloud docker patches into a tar file for upload to Undercloud
+    :param tmp_dir: temporary directory where containers folder is stored
+    :return: None
+    """
+    container_path = os.path.join(tmp_dir, 'containers')
+    if not os.path.isdir(container_path):
+        raise ApexBuildException("Docker directory for patches not found: "
+                                 "{}".format(container_path))
+    archive_file = os.path.join(tmp_dir, 'docker_patches.tar.gz')
+    with tarfile.open(archive_file, "w:gz") as tar:
+        tar.add(container_path, arcname=os.path.basename(container_path))
index baba8a5..268bad7 100644 (file)
@@ -20,6 +20,11 @@ def add_upstream_packages(image):
     :return: None
     """
     virt_ops = list()
     :return: None
     """
     virt_ops = list()
+    # FIXME(trozet): we have to lock to this beta ceph ansible package because
+    # the current RPM versioning is wrong and an older package has a higher
+    # version than this package.  We should change to just 'ceph-ansible'
+    # once the package/repo has been fixed.  Note: luminous is fine here
+    # because Apex will only support container deployment for Queens and later
     pkgs = [
         'openstack-utils',
         'ceph-common',
     pkgs = [
         'openstack-utils',
         'ceph-common',
@@ -29,6 +34,8 @@ def add_upstream_packages(image):
         'docker-distribution',
         'openstack-tripleo-validations',
         'libguestfs-tools',
         'docker-distribution',
         'openstack-tripleo-validations',
         'libguestfs-tools',
+        'http://mirror.centos.org/centos/7/storage/x86_64/ceph-luminous' +
+        '/ceph-ansible-3.1.0-0.beta3.1.el7.noarch.rpm'
     ]
 
     for pkg in pkgs:
     ]
 
     for pkg in pkgs:
index a2b9a63..0aa6a6c 100644 (file)
@@ -39,10 +39,14 @@ VIRT_PW = '--root-password'
 
 THT_DIR = '/usr/share/openstack-tripleo-heat-templates'
 THT_ENV_DIR = os.path.join(THT_DIR, 'environments')
 
 THT_DIR = '/usr/share/openstack-tripleo-heat-templates'
 THT_ENV_DIR = os.path.join(THT_DIR, 'environments')
+THT_DOCKER_ENV_DIR = os.path.join(THT_ENV_DIR, 'services-docker')
 
 DEFAULT_OS_VERSION = 'pike'
 DEFAULT_ODL_VERSION = 'nitrogen'
 VALID_ODL_VERSIONS = ['carbon', 'nitrogen', 'oxygen', 'master']
 
 DEFAULT_OS_VERSION = 'pike'
 DEFAULT_ODL_VERSION = 'nitrogen'
 VALID_ODL_VERSIONS = ['carbon', 'nitrogen', 'oxygen', 'master']
+CEPH_VERSION_MAP = {'pike': 'jewel',
+                    'queens': 'luminous',
+                    'master': 'luminous'}
 PUPPET_ODL_URL = 'https://git.opendaylight.org/gerrit/integration/packaging' \
                  '/puppet-opendaylight'
 DEBUG_OVERCLOUD_PW = 'opnfvapex'
 PUPPET_ODL_URL = 'https://git.opendaylight.org/gerrit/integration/packaging' \
                  '/puppet-opendaylight'
 DEBUG_OVERCLOUD_PW = 'opnfvapex'
@@ -50,3 +54,15 @@ NET_ENV_FILE = 'network-environment.yaml'
 DEPLOY_TIMEOUT = 90
 UPSTREAM_RDO = 'https://images.rdoproject.org/pike/delorean/current-tripleo/'
 OPENSTACK_GERRIT = 'https://review.openstack.org'
 DEPLOY_TIMEOUT = 90
 UPSTREAM_RDO = 'https://images.rdoproject.org/pike/delorean/current-tripleo/'
 OPENSTACK_GERRIT = 'https://review.openstack.org'
+
+DOCKER_TAG = 'current-tripleo-rdo'
+# Maps regular service files to docker versions
+# None value means mapping is same as key
+VALID_DOCKER_SERVICES = {
+    'neutron-opendaylight.yaml': None,
+    'neutron-opendaylight-dpdk.yaml': None,
+    'neutron-opendaylight-sriov.yaml': None,
+    'neutron-ml2-ovn.yaml': 'neutron-ovn.yaml'
+}
+DOCKERHUB_OOO = ('https://registry.hub.docker.com/v2/repositories'
+                 '/tripleoupstream/?page_size=1024')
index 54d9983..a4d390a 100644 (file)
@@ -18,3 +18,7 @@ class JumpHostNetworkException(Exception):
 
 class ApexCleanException(Exception):
     pass
 
 class ApexCleanException(Exception):
     pass
+
+
+class ApexBuildException(Exception):
+    pass
index b727b11..cb7cbe1 100644 (file)
@@ -22,6 +22,8 @@ import urllib.request
 import urllib.parse
 import yaml
 
 import urllib.parse
 import yaml
 
+from apex.common import exceptions as exc
+
 
 def str2bool(var):
     if isinstance(var, bool):
 
 def str2bool(var):
     if isinstance(var, bool):
@@ -139,30 +141,45 @@ def run_ansible(ansible_vars, playbook, host='localhost', user='root',
         raise Exception(e)
 
 
         raise Exception(e)
 
 
-def fetch_upstream_and_unpack(dest, url, targets):
+def fetch_upstream_and_unpack(dest, url, targets, fetch=True):
     """
     Fetches targets from a url destination and downloads them if they are
     newer.  Also unpacks tar files in dest dir.
     :param dest: Directory to download and unpack files to
     :param url: URL where target files are located
     :param targets: List of target files to download
     """
     Fetches targets from a url destination and downloads them if they are
     newer.  Also unpacks tar files in dest dir.
     :param dest: Directory to download and unpack files to
     :param url: URL where target files are located
     :param targets: List of target files to download
+    :param fetch: Whether or not to fetch latest from internet (boolean)
     :return: None
     """
     os.makedirs(dest, exist_ok=True)
     assert isinstance(targets, list)
     for target in targets:
     :return: None
     """
     os.makedirs(dest, exist_ok=True)
     assert isinstance(targets, list)
     for target in targets:
-        download_target = True
         target_url = urllib.parse.urljoin(url, target)
         target_dest = os.path.join(dest, target)
         target_url = urllib.parse.urljoin(url, target)
         target_dest = os.path.join(dest, target)
-        logging.debug("Fetching and comparing upstream target: \n{}".format(
-            target_url))
-        try:
-            u = urllib.request.urlopen(target_url)
-        except urllib.error.URLError as e:
-            logging.error("Failed to fetch target url. Error: {}".format(
-                e.reason))
-            raise
-        if os.path.isfile(target_dest):
+        target_exists = os.path.isfile(target_dest)
+        if fetch:
+            download_target = True
+        elif not target_exists:
+            logging.warning("no-fetch requested but target: {} is not "
+                            "cached, will download".format(target_dest))
+            download_target = True
+        else:
+            logging.info("no-fetch requested and previous cache exists for "
+                         "target: {}.  Will skip download".format(target_dest))
+            download_target = False
+
+        if download_target:
+            logging.debug("Fetching and comparing upstream"
+                          " target: \n{}".format(target_url))
+            try:
+                u = urllib.request.urlopen(target_url)
+            except urllib.error.URLError as e:
+                logging.error("Failed to fetch target url. Error: {}".format(
+                    e.reason))
+                raise
+        # Check if previous file and fetch we need to compare files to
+        # determine if download is necessary
+        if target_exists and download_target:
             logging.debug("Previous file found: {}".format(target_dest))
             metadata = u.info()
             headers = metadata.items()
             logging.debug("Previous file found: {}".format(target_dest))
             metadata = u.info()
             headers = metadata.items()
@@ -186,6 +203,7 @@ def fetch_upstream_and_unpack(dest, url, targets):
                     download_target = False
             else:
                 logging.debug('Unable to find last modified url date')
                     download_target = False
             else:
                 logging.debug('Unable to find last modified url date')
+
         if download_target:
             urllib.request.urlretrieve(target_url, filename=target_dest)
             logging.info("Target downloaded: {}".format(target))
         if download_target:
             urllib.request.urlretrieve(target_url, filename=target_dest)
             logging.info("Target downloaded: {}".format(target))
@@ -220,3 +238,26 @@ def internet_connectivity():
     except (urllib.request.URLError, socket.timeout):
         logging.debug('No internet connectivity detected')
         return False
     except (urllib.request.URLError, socket.timeout):
         logging.debug('No internet connectivity detected')
         return False
+
+
+def open_webpage(url, timeout=5):
+    try:
+        response = urllib.request.urlopen(url, timeout=timeout)
+        return response.read()
+    except (urllib.request.URLError, socket.timeout):
+        logging.error("Unable to open URL: {}".format(url))
+        raise
+
+
+def edit_tht_env(env_file, section, settings):
+    assert isinstance(settings, dict)
+    with open(env_file) as fh:
+        data = yaml.safe_load(fh)
+
+    if section not in data.keys():
+        data[section] = {}
+    for setting, value in settings.items():
+        data[section][setting] = value
+    with open(env_file, 'w') as fh:
+        yaml.safe_dump(data, fh, default_flow_style=False)
+    logging.debug("Data written to env file {}:\n{}".format(env_file, data))
index b9267a3..7bc5568 100644 (file)
@@ -21,13 +21,13 @@ import tempfile
 
 import apex.virtual.configure_vm as vm_lib
 import apex.virtual.utils as virt_utils
 
 import apex.virtual.configure_vm as vm_lib
 import apex.virtual.utils as virt_utils
+import apex.builders.common_builder as c_builder
+import apex.builders.overcloud_builder as oc_builder
+import apex.builders.undercloud_builder as uc_builder
 from apex import DeploySettings
 from apex import Inventory
 from apex import NetworkEnvironment
 from apex import NetworkSettings
 from apex import DeploySettings
 from apex import Inventory
 from apex import NetworkEnvironment
 from apex import NetworkSettings
-from apex.builders import common_builder as c_builder
-from apex.builders import overcloud_builder as oc_builder
-from apex.builders import undercloud_builder as uc_builder
 from apex.common import utils
 from apex.common import constants
 from apex.common import parsers
 from apex.common import utils
 from apex.common import constants
 from apex.common import parsers
@@ -181,6 +181,10 @@ def create_deploy_parser():
                                default=False,
                                help='Force deployment to use upstream '
                                     'artifacts')
                                default=False,
                                help='Force deployment to use upstream '
                                     'artifacts')
+    deploy_parser.add_argument('--no-fetch', action='store_true',
+                               default=False,
+                               help='Ignore fetching latest upstream and '
+                                    'use what is in cache')
     return deploy_parser
 
 
     return deploy_parser
 
 
@@ -352,15 +356,9 @@ def main():
                 constants.DEFAULT_OS_VERSION, os_version)
             upstream_targets = ['overcloud-full.tar', 'undercloud.qcow2']
             utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
                 constants.DEFAULT_OS_VERSION, os_version)
             upstream_targets = ['overcloud-full.tar', 'undercloud.qcow2']
             utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
-                                            upstream_targets)
+                                            upstream_targets,
+                                            fetch=not args.no_fetch)
             sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
             sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
-            if ds_opts['sdn_controller'] == 'opendaylight':
-                logging.info("Preparing upstream image with OpenDaylight")
-                oc_builder.inject_opendaylight(
-                    odl_version=ds_opts['odl_version'],
-                    image=sdn_image,
-                    tmp_dir=APEX_TEMP_DIR
-                )
             # copy undercloud so we don't taint upstream fetch
             uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
             uc_fetch_img = os.path.join(args.image_dir, 'undercloud.qcow2')
             # copy undercloud so we don't taint upstream fetch
             uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
             uc_fetch_img = os.path.join(args.image_dir, 'undercloud.qcow2')
@@ -372,12 +370,12 @@ def main():
             patches = deploy_settings['global_params']['patches']
             c_builder.add_upstream_patches(patches['undercloud'], uc_image,
                                            APEX_TEMP_DIR, branch)
             patches = deploy_settings['global_params']['patches']
             c_builder.add_upstream_patches(patches['undercloud'], uc_image,
                                            APEX_TEMP_DIR, branch)
-            logging.info('Adding patches to overcloud')
-            c_builder.add_upstream_patches(patches['overcloud'], sdn_image,
-                                           APEX_TEMP_DIR, branch)
         else:
             sdn_image = os.path.join(args.image_dir, SDN_IMAGE)
             uc_image = 'undercloud.qcow2'
         else:
             sdn_image = os.path.join(args.image_dir, SDN_IMAGE)
             uc_image = 'undercloud.qcow2'
+            # patches are ignored in non-upstream deployments
+            patches = {'overcloud': [], 'undercloud': []}
+        # Create/Start Undercloud VM
         undercloud = uc_lib.Undercloud(args.image_dir,
                                        args.deploy_dir,
                                        root_pw=root_pw,
         undercloud = uc_lib.Undercloud(args.image_dir,
                                        args.deploy_dir,
                                        root_pw=root_pw,
@@ -385,6 +383,13 @@ def main():
                                        image_name=os.path.basename(uc_image),
                                        os_version=os_version)
         undercloud.start()
                                        image_name=os.path.basename(uc_image),
                                        os_version=os_version)
         undercloud.start()
+        undercloud_admin_ip = net_settings['networks'][
+            constants.ADMIN_NETWORK]['installer_vm']['ip']
+
+        if upstream and ds_opts['containers']:
+            tag = constants.DOCKER_TAG
+        else:
+            tag = None
 
         # Generate nic templates
         for role in 'compute', 'controller':
 
         # Generate nic templates
         for role in 'compute', 'controller':
@@ -394,7 +399,7 @@ def main():
         undercloud.configure(net_settings, deploy_settings,
                              os.path.join(args.lib_dir, ANSIBLE_PATH,
                                           'configure_undercloud.yml'),
         undercloud.configure(net_settings, deploy_settings,
                              os.path.join(args.lib_dir, ANSIBLE_PATH,
                                           'configure_undercloud.yml'),
-                             APEX_TEMP_DIR)
+                             APEX_TEMP_DIR, virtual_oc=args.virtual)
 
         # Prepare overcloud-full.qcow2
         logging.info("Preparing Overcloud for deployment...")
 
         # Prepare overcloud-full.qcow2
         logging.info("Preparing Overcloud for deployment...")
@@ -410,22 +415,57 @@ def main():
             args.env_file = 'upstream-environment.yaml'
         opnfv_env = os.path.join(args.deploy_dir, args.env_file)
         if not upstream:
             args.env_file = 'upstream-environment.yaml'
         opnfv_env = os.path.join(args.deploy_dir, args.env_file)
         if not upstream:
+            # TODO(trozet): Invoke with containers after Fraser migration
             oc_deploy.prep_env(deploy_settings, net_settings, inventory,
                                opnfv_env, net_env_target, APEX_TEMP_DIR)
             oc_deploy.prep_env(deploy_settings, net_settings, inventory,
                                opnfv_env, net_env_target, APEX_TEMP_DIR)
-            oc_deploy.prep_image(deploy_settings, net_settings, sdn_image,
-                                 APEX_TEMP_DIR, root_pw=root_pw)
         else:
         else:
-            shutil.copyfile(sdn_image, os.path.join(APEX_TEMP_DIR,
-                                                    'overcloud-full.qcow2'))
             shutil.copyfile(
                 opnfv_env,
                 os.path.join(APEX_TEMP_DIR, os.path.basename(opnfv_env))
             )
             shutil.copyfile(
                 opnfv_env,
                 os.path.join(APEX_TEMP_DIR, os.path.basename(opnfv_env))
             )
+        patched_containers = oc_deploy.prep_image(
+            deploy_settings, net_settings, sdn_image, APEX_TEMP_DIR,
+            root_pw=root_pw, docker_tag=tag, patches=patches['overcloud'],
+            upstream=upstream)
 
         oc_deploy.create_deploy_cmd(deploy_settings, net_settings, inventory,
                                     APEX_TEMP_DIR, args.virtual,
                                     os.path.basename(opnfv_env),
                                     net_data=net_data)
 
         oc_deploy.create_deploy_cmd(deploy_settings, net_settings, inventory,
                                     APEX_TEMP_DIR, args.virtual,
                                     os.path.basename(opnfv_env),
                                     net_data=net_data)
+        # Prepare undercloud with containers
+        docker_playbook = os.path.join(args.lib_dir, ANSIBLE_PATH,
+                                       'prepare_overcloud_containers.yml')
+        if ds_opts['containers']:
+            ceph_version = constants.CEPH_VERSION_MAP[ds_opts['os_version']]
+            ceph_docker_image = "ceph/daemon:tag-build-master-" \
+                                "{}-centos-7".format(ceph_version)
+            logging.info("Preparing Undercloud with Docker containers")
+            if patched_containers:
+                oc_builder.archive_docker_patches(APEX_TEMP_DIR)
+            container_vars = dict()
+            container_vars['apex_temp_dir'] = APEX_TEMP_DIR
+            container_vars['patched_docker_services'] = list(
+                patched_containers)
+            container_vars['container_tag'] = constants.DOCKER_TAG
+            container_vars['stackrc'] = 'source /home/stack/stackrc'
+            container_vars['upstream'] = upstream
+            container_vars['sdn'] = ds_opts['sdn_controller']
+            container_vars['undercloud_ip'] = undercloud_admin_ip
+            container_vars['os_version'] = os_version
+            container_vars['ceph_docker_image'] = ceph_docker_image
+            container_vars['sdn_env_file'] = \
+                oc_deploy.get_docker_sdn_file(ds_opts)
+            try:
+                utils.run_ansible(container_vars, docker_playbook,
+                                  host=undercloud.ip, user='stack',
+                                  tmp_dir=APEX_TEMP_DIR)
+                logging.info("Container preparation complete")
+            except Exception:
+                logging.error("Unable to complete container prep on "
+                              "Undercloud")
+                os.remove(os.path.join(APEX_TEMP_DIR, 'overcloud-full.qcow2'))
+                raise
+
         deploy_playbook = os.path.join(args.lib_dir, ANSIBLE_PATH,
                                        'deploy_overcloud.yml')
         virt_env = 'virtual-environment.yaml'
         deploy_playbook = os.path.join(args.lib_dir, ANSIBLE_PATH,
                                        'deploy_overcloud.yml')
         virt_env = 'virtual-environment.yaml'
@@ -494,19 +534,14 @@ def main():
         else:
             deploy_vars['congress'] = False
         deploy_vars['calipso'] = ds_opts.get('calipso', False)
         else:
             deploy_vars['congress'] = False
         deploy_vars['calipso'] = ds_opts.get('calipso', False)
-        deploy_vars['calipso_ip'] = net_settings['networks']['admin'][
-            'installer_vm']['ip']
-        # TODO(trozet): this is probably redundant with getting external
-        # network info from undercloud.py
-        if 'external' in net_settings.enabled_network_list:
-            ext_cidr = net_settings['networks']['external'][0]['cidr']
+        deploy_vars['calipso_ip'] = undercloud_admin_ip
+        # overcloudrc.v3 removed and set as default in queens and later
+        if os_version == 'pike':
+            deploy_vars['overcloudrc_files'] = ['overcloudrc',
+                                                'overcloudrc.v3']
         else:
         else:
-            ext_cidr = net_settings['networks']['admin']['cidr']
-        deploy_vars['external_cidr'] = str(ext_cidr)
-        if ext_cidr.version == 6:
-            deploy_vars['external_network_ipv6'] = True
-        else:
-            deploy_vars['external_network_ipv6'] = False
+            deploy_vars['overcloudrc_files'] = ['overcloudrc']
+
         post_undercloud = os.path.join(args.lib_dir, ANSIBLE_PATH,
                                        'post_deploy_undercloud.yml')
         logging.info("Executing post deploy configuration undercloud playbook")
         post_undercloud = os.path.join(args.lib_dir, ANSIBLE_PATH,
                                        'post_deploy_undercloud.yml')
         logging.info("Executing post deploy configuration undercloud playbook")
index 33641ed..0701c18 100644 (file)
@@ -16,10 +16,13 @@ import shutil
 import uuid
 import struct
 import time
 import uuid
 import struct
 import time
+import apex.builders.overcloud_builder as oc_builder
+import apex.builders.common_builder as c_builder
 
 from apex.common import constants as con
 from apex.common.exceptions import ApexDeployException
 from apex.common import parsers
 
 from apex.common import constants as con
 from apex.common.exceptions import ApexDeployException
 from apex.common import parsers
+from apex.common import utils
 from apex.virtual import utils as virt_utils
 from cryptography.hazmat.primitives import serialization as \
     crypto_serialization
 from apex.virtual import utils as virt_utils
 from cryptography.hazmat.primitives import serialization as \
     crypto_serialization
@@ -72,6 +75,21 @@ OVS_NSH_RPM = "openvswitch-2.6.1-1.el7.centos.x86_64.rpm"
 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
                       ".noarch.rpm"
 
 ODL_NETVIRT_VPP_RPM = "/root/opendaylight-7.0.0-0.1.20170531snap665.el7" \
                       ".noarch.rpm"
 
+LOSETUP_SERVICE = """[Unit]
+Description=Setup loop devices
+Before=network.target
+
+[Service]
+Type=oneshot
+ExecStart=/sbin/losetup /dev/loop3 /srv/data.img
+ExecStop=/sbin/losetup -d /dev/loop3
+TimeoutSec=60
+RemainAfterExit=yes
+
+[Install]
+WantedBy=multi-user.target
+"""
+
 
 def build_sdn_env_list(ds, sdn_map, env_list=None):
     """
 
 def build_sdn_env_list(ds, sdn_map, env_list=None):
     """
@@ -118,6 +136,25 @@ def build_sdn_env_list(ds, sdn_map, env_list=None):
     return env_list
 
 
     return env_list
 
 
+def get_docker_sdn_file(ds_opts):
+    """
+    Returns docker env file for detected SDN
+    :param ds_opts: deploy options
+    :return: docker THT env file for an SDN
+    """
+    # FIXME(trozet): We assume right now there is only one docker SDN file
+    docker_services = con.VALID_DOCKER_SERVICES
+    sdn_env_list = build_sdn_env_list(ds_opts, SDN_FILE_MAP)
+    for sdn_file in sdn_env_list:
+        sdn_base = os.path.basename(sdn_file)
+        if sdn_base in docker_services:
+            if docker_services[sdn_base] is not None:
+                return os.path.join(con.THT_DOCKER_ENV_DIR,
+                                    docker_services[sdn_base])
+            else:
+                return os.path.join(con.THT_DOCKER_ENV_DIR, sdn_base)
+
+
 def create_deploy_cmd(ds, ns, inv, tmp_dir,
                       virtual, env_file='opnfv-environment.yaml',
                       net_data=False):
 def create_deploy_cmd(ds, ns, inv, tmp_dir,
                       virtual, env_file='opnfv-environment.yaml',
                       net_data=False):
@@ -125,26 +162,47 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
     logging.info("Creating deployment command")
     deploy_options = ['network-environment.yaml']
 
     logging.info("Creating deployment command")
     deploy_options = ['network-environment.yaml']
 
+    ds_opts = ds['deploy_options']
+
+    if ds_opts['containers']:
+        deploy_options.append(os.path.join(con.THT_ENV_DIR,
+                                           'docker.yaml'))
+
+    if ds['global_params']['ha_enabled']:
+        if ds_opts['containers']:
+            deploy_options.append(os.path.join(con.THT_ENV_DIR,
+                                               'docker-ha.yaml'))
+        else:
+            deploy_options.append(os.path.join(con.THT_ENV_DIR,
+                                               'puppet-pacemaker.yaml'))
+
     if env_file:
         deploy_options.append(env_file)
     if env_file:
         deploy_options.append(env_file)
-    ds_opts = ds['deploy_options']
-    deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
+
+    if ds_opts['containers']:
+        deploy_options.append('docker-images.yaml')
+        sdn_docker_file = get_docker_sdn_file(ds_opts)
+        if sdn_docker_file:
+            deploy_options.append(sdn_docker_file)
+            deploy_options.append('sdn-images.yaml')
+    else:
+        deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
 
     for k, v in OTHER_FILE_MAP.items():
         if k in ds_opts and ds_opts[k]:
 
     for k, v in OTHER_FILE_MAP.items():
         if k in ds_opts and ds_opts[k]:
-            deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
+            if ds_opts['containers']:
+                deploy_options.append(os.path.join(con.THT_DOCKER_ENV_DIR,
+                                                   "{}.yaml".format(k)))
+            else:
+                deploy_options.append(os.path.join(con.THT_ENV_DIR, v))
 
     if ds_opts['ceph']:
 
     if ds_opts['ceph']:
-        prep_storage_env(ds, tmp_dir)
+        prep_storage_env(ds, ns, virtual, tmp_dir)
         deploy_options.append(os.path.join(con.THT_ENV_DIR,
                                            'storage-environment.yaml'))
     if ds_opts['sriov']:
         prep_sriov_env(ds, tmp_dir)
 
         deploy_options.append(os.path.join(con.THT_ENV_DIR,
                                            'storage-environment.yaml'))
     if ds_opts['sriov']:
         prep_sriov_env(ds, tmp_dir)
 
-    if ds['global_params']['ha_enabled']:
-        deploy_options.append(os.path.join(con.THT_ENV_DIR,
-                                           'puppet-pacemaker.yaml'))
-
     if virtual:
         deploy_options.append('virtual-environment.yaml')
     else:
     if virtual:
         deploy_options.append('virtual-environment.yaml')
     else:
@@ -190,7 +248,8 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
     return cmd
 
 
     return cmd
 
 
-def prep_image(ds, ns, img, tmp_dir, root_pw=None):
+def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
+               patches=None, upstream=False):
     """
     Locates sdn image and preps for deployment.
     :param ds: deploy settings
     """
     Locates sdn image and preps for deployment.
     :param ds: deploy settings
@@ -198,6 +257,9 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None):
     :param img: sdn image
     :param tmp_dir: dir to store modified sdn image
     :param root_pw: password to configure for overcloud image
     :param img: sdn image
     :param tmp_dir: dir to store modified sdn image
     :param root_pw: password to configure for overcloud image
+    :param docker_tag: Docker image tag for RDO version (default None)
+    :param patches: List of patches to apply to overcloud image
+    :param upstream: (boolean) Indicates if upstream deployment or not
     :return: None
     """
     # TODO(trozet): Come up with a better way to organize this logic in this
     :return: None
     """
     # TODO(trozet): Come up with a better way to organize this logic in this
@@ -210,6 +272,7 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None):
     ds_opts = ds['deploy_options']
     virt_cmds = list()
     sdn = ds_opts['sdn_controller']
     ds_opts = ds['deploy_options']
     virt_cmds = list()
     sdn = ds_opts['sdn_controller']
+    patched_containers = set()
     # we need this due to rhbz #1436021
     # fixed in systemd-219-37.el7
     if sdn is not False:
     # we need this due to rhbz #1436021
     # fixed in systemd-219-37.el7
     if sdn is not False:
@@ -298,7 +361,13 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None):
                                    "/root/nosdn_vpp_rpms/*.rpm"}
             ])
 
                                    "/root/nosdn_vpp_rpms/*.rpm"}
             ])
 
-    if sdn == 'opendaylight':
+    tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
+    shutil.copyfile(img, tmp_oc_image)
+    logging.debug("Temporary overcloud image stored as: {}".format(
+        tmp_oc_image))
+
+    # TODO (trozet): remove this if block after Fraser
+    if sdn == 'opendaylight' and not upstream:
         if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
             virt_cmds.extend([
                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
         if ds_opts['odl_version'] != con.DEFAULT_ODL_VERSION:
             virt_cmds.extend([
                 {con.VIRT_RUN_CMD: "yum -y remove opendaylight"},
@@ -325,6 +394,19 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None):
                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
                     ODL_NETVIRT_VPP_RPM)}
             ])
                 {con.VIRT_RUN_CMD: "yum -y install /root/{}/*".format(
                     ODL_NETVIRT_VPP_RPM)}
             ])
+    elif sdn == 'opendaylight':
+        undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
+            'installer_vm']['ip']
+        oc_builder.inject_opendaylight(
+            odl_version=ds_opts['odl_version'],
+            image=tmp_oc_image,
+            tmp_dir=tmp_dir,
+            uc_ip=undercloud_admin_ip,
+            os_version=ds_opts['os_version'],
+            docker_tag=docker_tag,
+        )
+        if docker_tag:
+            patched_containers = patched_containers.union({'opendaylight'})
 
     if sdn == 'ovn':
         virt_cmds.extend([
 
     if sdn == 'ovn':
         virt_cmds.extend([
@@ -334,12 +416,35 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None):
                                "*openvswitch*"}
         ])
 
                                "*openvswitch*"}
         ])
 
-    tmp_oc_image = os.path.join(tmp_dir, 'overcloud-full.qcow2')
-    shutil.copyfile(img, tmp_oc_image)
-    logging.debug("Temporary overcloud image stored as: {}".format(
-        tmp_oc_image))
+    if patches:
+        if ds_opts['os_version'] == 'master':
+            branch = ds_opts['os_version']
+        else:
+            branch = "stable/{}".format(ds_opts['os_version'])
+        logging.info('Adding patches to overcloud')
+        patched_containers = patched_containers.union(
+            c_builder.add_upstream_patches(patches,
+                                           tmp_oc_image, tmp_dir,
+                                           branch,
+                                           uc_ip=undercloud_admin_ip,
+                                           docker_tag=docker_tag))
+    # if containers with ceph, and no ceph device we need to use a
+    # persistent loop device for Ceph OSDs
+    if docker_tag and not ds_opts.get('ceph_device', None):
+        tmp_losetup = os.path.join(tmp_dir, 'losetup.service')
+        with open(tmp_losetup, 'w') as fh:
+            fh.write(LOSETUP_SERVICE)
+        virt_cmds.extend([
+            {con.VIRT_UPLOAD: "{}:/usr/lib/systemd/system/".format(tmp_losetup)
+             },
+            {con.VIRT_RUN_CMD: 'truncate /srv/data.img --size 10G'},
+            {con.VIRT_RUN_CMD: 'mkfs.ext4 -F /srv/data.img'},
+            {con.VIRT_RUN_CMD: 'systemctl daemon-reload'},
+            {con.VIRT_RUN_CMD: 'systemctl enable losetup.service'},
+        ])
     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
     logging.info("Overcloud image customization complete")
     virt_utils.virt_customize(virt_cmds, tmp_oc_image)
     logging.info("Overcloud image customization complete")
+    return patched_containers
 
 
 def make_ssh_key():
 
 
 def make_ssh_key():
@@ -541,11 +646,13 @@ def generate_ceph_key():
     return base64.b64encode(header + key)
 
 
     return base64.b64encode(header + key)
 
 
-def prep_storage_env(ds, tmp_dir):
+def prep_storage_env(ds, ns, virtual, tmp_dir):
     """
     Creates storage environment file for deployment.  Source file is copied by
     undercloud playbook to host.
     :param ds:
     """
     Creates storage environment file for deployment.  Source file is copied by
     undercloud playbook to host.
     :param ds:
+    :param ns:
+    :param virtual:
     :param tmp_dir:
     :return:
     """
     :param tmp_dir:
     :return:
     """
@@ -572,7 +679,40 @@ def prep_storage_env(ds, tmp_dir):
                 'utf-8')))
         else:
             print(line)
                 'utf-8')))
         else:
             print(line)
-    if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
+
+    if ds_opts['containers']:
+        undercloud_admin_ip = ns['networks'][con.ADMIN_NETWORK][
+            'installer_vm']['ip']
+        ceph_version = con.CEPH_VERSION_MAP[ds_opts['os_version']]
+        docker_image = "{}:8787/ceph/daemon:tag-build-master-" \
+                       "{}-centos-7".format(undercloud_admin_ip,
+                                            ceph_version)
+        ceph_params = {
+            'DockerCephDaemonImage': docker_image,
+        }
+        if not ds['global_params']['ha_enabled']:
+            ceph_params['CephPoolDefaultSize'] = 1
+
+        if virtual:
+            ceph_params['CephAnsibleExtraConfig'] = {
+                'centos_package_dependencies': [],
+                'ceph_osd_docker_memory_limit': '1g',
+                'ceph_mds_docker_memory_limit': '1g',
+            }
+            ceph_params['CephPoolDefaultPgNum'] = 32
+        if 'ceph_device' in ds_opts and ds_opts['ceph_device']:
+            ceph_device = ds_opts['ceph_device']
+        else:
+            # TODO(trozet): make this DS default after Fraser
+            ceph_device = '/dev/loop3'
+
+        ceph_params['CephAnsibleDisksConfig'] = {
+            'devices': [ceph_device],
+            'journal_size': 512,
+            'osd_scenario': 'collocated'
+        }
+        utils.edit_tht_env(storage_file, 'parameter_defaults', ceph_params)
+    elif 'ceph_device' in ds_opts and ds_opts['ceph_device']:
         with open(storage_file, 'a') as fh:
             fh.write('  ExtraConfig:\n')
             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
         with open(storage_file, 'a') as fh:
             fh.write('  ExtraConfig:\n')
             fh.write("    ceph::profile::params::osds:{{{}:{{}}}}\n".format(
index c05922b..4f887ed 100644 (file)
@@ -25,7 +25,8 @@ REQ_DEPLOY_SETTINGS = ['sdn_controller',
                        'rt_kvm',
                        'os_version',
                        'l2gw',
                        'rt_kvm',
                        'os_version',
                        'l2gw',
-                       'sriov']
+                       'sriov',
+                       'containers']
 
 OPT_DEPLOY_SETTINGS = ['performance',
                        'vsperf',
 
 OPT_DEPLOY_SETTINGS = ['performance',
                        'vsperf',
diff --git a/apex/tests/config/98faaca.diff b/apex/tests/config/98faaca.diff
new file mode 100644 (file)
index 0000000..68a66fb
--- /dev/null
@@ -0,0 +1,331 @@
+From 98faacad44e39a456d9fe1a1d21f5a65e8de4fc1 Mon Sep 17 00:00:00 2001
+From: Janki Chhatbar <jchhatba@redhat.com>
+Date: Tue, 23 Jan 2018 22:43:49 +0530
+Subject: [PATCH] Minor update steps for ODL
+
+Updating OpenStack (within release) means updating ODL from v1 to v1.1.
+This is done by "openstack overcloud update" which collects
+update_tasks. ODL needs 2 different steps to achieve this
+minor update. These are called Level1 and Level2. L1 is
+simple - stop ODL, update, start. This is taken care by paunch
+and no separate implementation is needed. L2 has extra steps
+which are implemented in update_tasks and post_update_tasks.
+
+Updating ODL within the same major release (1->1.1) consists of either
+L1 or L2 steps. These steps are decided from ODLUpdateLevel parameter
+specified in environments/services-docker/update-odl.yaml.
+
+Upgrading ODL to the next major release (1.1->2) requires
+only the L2 steps. These are implemented as upgrade_tasks and
+post_upgrade_tasks in https://review.openstack.org/489201.
+
+Steps involved in level 2 update are
+ 1. Block OVS instances to connect to ODL
+ 2. Set ODL upgrade flag to True
+ 3. Start ODL
+ 4. Start Neutron re-sync and wait for it to finish
+ 5. Delete OVS groups and ports
+ 6. Stop OVS
+ 7. Unblock OVS ports
+ 8. Start OVS
+ 9. Unset ODL upgrade flag
+
+These steps are exactly same as upgrade_tasks.
+The logic implemented is:
+follow upgrade_tasks; when update_level == 2
+
+Change-Id: Ie532800663dd24313a7350b5583a5080ddb796e7
+---
+
+diff --git a/common/deploy-steps.j2 b/common/deploy-steps.j2
+index 595e16c..c4fb05f 100644
+--- a/common/deploy-steps.j2
++++ b/common/deploy-steps.j2
+@@ -23,6 +23,7 @@
+ {% set post_upgrade_steps_max = 4 -%}
+ {% set fast_forward_upgrade_steps_max = 9 -%}
+ {% set fast_forward_upgrade_prep_steps_max = 3 -%}
++{% set post_update_steps_max = 4 -%}
+ heat_template_version: queens
+@@ -590,3 +591,15 @@
+                 - include_tasks: {{role.name}}/fast_forward_upgrade_tasks.yaml
+                   when: role_name == '{{role.name}}' and ansible_hostname == {{role.name}}[0]
+ {%- endfor %}
++      post_update_steps_tasks: |
++{%- for role in roles %}
++            - include: {{role.name}}/post_update_tasks.yaml
++              when: role_name == '{{role.name}}'
++{%- endfor %}
++      post_update_steps_playbook: |
++        - hosts: overcloud
++          tasks:
++            - include: post_update_steps_tasks.yaml
++              with_sequence: start=0 end={{post_update_steps_max-1}}
++              loop_control:
++                loop_var: step
+diff --git a/common/services.yaml b/common/services.yaml
+index 2a62c1b..c197b05 100644
+--- a/common/services.yaml
++++ b/common/services.yaml
+@@ -283,6 +283,16 @@
+           expression: coalesce($.data, []).where($ != null).select($.get('update_tasks')).where($ != null).flatten().distinct()
+           data: {get_attr: [ServiceChain, role_data]}
++  PostUpdateTasks:
++    type: OS::Heat::Value
++    properties:
++      type: comma_delimited_list
++      value:
++        yaql:
++          # Note we use distinct() here to filter any identical tasks, e.g yum update for all services
++          expression: coalesce($.data, []).where($ != null).select($.get('post_update_tasks')).where($ != null).flatten().distinct()
++          data: {get_attr: [ServiceChain, role_data]}
++
+   UpgradeBatchTasks:
+     type: OS::Heat::Value
+     properties:
+@@ -349,6 +359,7 @@
+       upgrade_tasks: {get_attr: [UpgradeTasks, value]}
+       post_upgrade_tasks: {get_attr: [PostUpgradeTasks, value]}
+       update_tasks: {get_attr: [UpdateTasks, value]}
++      post_update_tasks: {get_attr: [PostUpdateTasks, value]}
+       upgrade_batch_tasks: {get_attr: [UpgradeBatchTasks, value]}
+       service_metadata_settings: {get_attr: [ServiceServerMetadataHook, metadata]}
+diff --git a/docker/services/opendaylight-api.yaml b/docker/services/opendaylight-api.yaml
+index 6175db9..3cafe53 100644
+--- a/docker/services/opendaylight-api.yaml
++++ b/docker/services/opendaylight-api.yaml
+@@ -44,6 +44,14 @@
+     type: string
+     description: Specifies the default CA cert to use if TLS is used for
+                  services in the internal network.
++  ODLUpdateLevel:
++    default: 1
++    description: Specify the level of update
++    type: number
++    constraints:
++      - allowed_values:
++          - 1
++          - 2
+ conditions:
+@@ -167,23 +175,25 @@
+             - opendaylight_enabled.rc == 0
+           service: name=opendaylight state=stopped enabled=no
+         # Containarised deployment upgrade steps
+-        - name: remove journal and snapshots
+-          when: step|int == 0
+-          file:
+-            path: /var/lib/opendaylight/{{item}}
+-            state: absent
+-            with_items:
+-            - snapshots
+-            - journal
+-        - name: Set ODL upgrade flag to True
+-          copy:
+-            dest: /var/lib/opendaylight/etc/opendaylight/datastore/initial/config/genius-mdsalutil-config.xml
+-            content: |
+-              <config xmlns="urn:opendaylight:params:xml:ns:yang:mdsalutil">
+-                  <upgradeInProgress>true</upgradeInProgress>
+-              </config>
+-          when: step|int == 1
+-      post_upgrade_tasks:
++        - name: ODL container L2 update and upgrade tasks
++          block: &odl_container_upgrade_tasks
++            - name: remove journal and snapshots
++              when: step|int == 0
++              file:
++                path: /var/lib/opendaylight/{{item}}
++                state: absent
++                with_items:
++                - snapshots
++                - journal
++            - name: Set ODL upgrade flag to True
++              copy:
++                dest: /var/lib/opendaylight/etc/opendaylight/datastore/initial/config/genius-mdsalutil-config.xml
++                content: |
++                  <config xmlns="urn:opendaylight:params:xml:ns:yang:mdsalutil">
++                      <upgradeInProgress>true</upgradeInProgress>
++                  </config>
++              when: step|int == 1
++      post_upgrade_tasks: &odl_container_post_upgrade_tasks
+         - name: Unset upgrade flag in ODL
+           shell:
+             str_replace:
+@@ -192,7 +202,20 @@
+                          -H "Content-Type: application/json" \
+                          $ODL_URI/restconf/config/genius-mdsalutil:config'
+               params:
+-                $ODL_USERNAME: {get_param: [OpenDaylightBase, OpenDaylightUsername]}
+-                $ODL_PASSWORD: {get_param: [OpenDaylightBase, OpenDaylightPassword]}
++                $ODL_USERNAME: {get_attr: [OpenDaylightBase, role_data, config_settings, 'opendaylight::username']}
++                $ODL_PASSWORD: {get_attr: [OpenDaylightBase, role_data, config_settings, 'opendaylight::password']}
+                 $ODL_URI: {get_param: [EndpointMap, OpenDaylightInternal, uri]}
+           when: step|int == 0
++      update_tasks:
++        - name: Get ODL update level
++          block: &get_odl_update_level
++            - name: store update level to update_level variable
++              set_fact:
++                odl_update_level: {get_param: ODLUpdateLevel}
++        - name: Run L2 update tasks that are similar to upgrade_tasks when update level is 2
++          block: *odl_container_upgrade_tasks
++          when: odl_update_level == 2
++      post_update_tasks:
++        - block: *get_odl_update_level
++        - block: *odl_container_post_upgrade_tasks
++          when: odl_update_level == 2
+\ No newline at end of file
+diff --git a/environments/services-docker/update-odl.yaml b/environments/services-docker/update-odl.yaml
+new file mode 100644
+index 0000000..87d74ef
+--- /dev/null
++++ b/environments/services-docker/update-odl.yaml
+@@ -0,0 +1,11 @@
++# This file describes parameters needed for ODL update.
++# This file is to be used along with other env files during
++# level 2 minor update.
++# Level 2 update involves yang changes in ODL within same ODL release and
++# hence needs DB wipe and resync.
++# Level 1 is simple update - stop ODL, pull new image, start ODL
++# This file is not be used during level1 update or major upgrade.
++# In case doubt, please reach out to ODL developers on #tripleo IRC channel
++
++parameter_defaults:
++  ODLUpdateLevel: 2
+\ No newline at end of file
+diff --git a/puppet/services/opendaylight-ovs.yaml b/puppet/services/opendaylight-ovs.yaml
+index 3390645..958e1bb 100644
+--- a/puppet/services/opendaylight-ovs.yaml
++++ b/puppet/services/opendaylight-ovs.yaml
+@@ -104,6 +104,14 @@
+     type: string
+     description: Specifies the default CA cert to use if TLS is used for
+                  services in the internal network.
++  ODLUpdateLevel:
++    default: 1
++    description: Specify the level of update
++    type: number
++    constraints:
++      - allowed_values:
++          - 1
++          - 2
+ parameter_groups:
+ - label: deprecated
+@@ -230,14 +238,16 @@
+                 - openvswitch_enabled.rc == 0
+               service: name=openvswitch state=stopped
+               # Container upgrade steps.
+-            - name: Block connections to ODL. #This rule will be inserted at the top.
+-              iptables: chain=OUTPUT action=insert protocol=tcp destination_port={{ item }} jump=DROP
+-              when: step|int == 0
+-              with_items:
+-                - 6640
+-                - 6653
+-                - 6633
+-      post_upgrade_tasks:
++            - name: ODL container L2 update and upgrade tasks
++              block: &odl_container_upgrade_tasks
++              - name: Block connections to ODL. #This rule will be inserted at the top.
++                iptables: chain=OUTPUT action=insert protocol=tcp destination_port={{ item }} jump=DROP
++                when: step|int == 0
++                with_items:
++                  - 6640
++                  - 6653
++                  - 6633
++      post_upgrade_tasks: &odl_container_post_upgrade_tasks
+         - name: Check service openvswitch is running
+           command: systemctl is-active --quiet openvswitch
+           tags: common
+@@ -260,6 +270,20 @@
+         - name: start openvswitch service
+           when: step|int == 3
+           service : name=openvswitch state=started
++      update_tasks:
++        - name: Get ODL update level
++          block: &get_odl_update_level
++            - name: store update level to update_level variable
++              set_fact:
++                odl_update_level: {get_param: ODLUpdateLevel}
++        - name: Run L2 update tasks that are similar to upgrade_tasks when update level is 2
++          block: *odl_container_upgrade_tasks
++          when: odl_update_level == 2
++      post_update_tasks:
++        - block: *get_odl_update_level
++        - block: *odl_container_post_upgrade_tasks
++          when: odl_update_level == 2
++
+       metadata_settings:
+         if:
+           - internal_tls_enabled
+@@ -267,4 +291,4 @@
+             - service: ovs
+               network: {get_param: [ServiceNetMap, OpendaylightApiNetwork]}
+               type: node
+-          - null
++          - null
+\ No newline at end of file
+diff --git a/releasenotes/notes/odl_upgrade-f5540d242b9a6b52.yaml b/releasenotes/notes/odl_upgrade-f5540d242b9a6b52.yaml
+index 45703d0..e2943de 100644
+--- a/releasenotes/notes/odl_upgrade-f5540d242b9a6b52.yaml
++++ b/releasenotes/notes/odl_upgrade-f5540d242b9a6b52.yaml
+@@ -1,6 +1,6 @@
+ ---
+-features:
++upgrade:
+   - Add ODL upgradability
+     Steps of upgrade are as follows
+     1. Block OVS instances to connect to ODL done in upgrade_tasks
+diff --git a/releasenotes/notes/update_odl-cb997ce5c136ebb7.yaml b/releasenotes/notes/update_odl-cb997ce5c136ebb7.yaml
+new file mode 100644
+index 0000000..1bcf8ed
+--- /dev/null
++++ b/releasenotes/notes/update_odl-cb997ce5c136ebb7.yaml
+@@ -0,0 +1,19 @@
++---
++features:
++  - Minor update ODL steps are added. ODL minor update (within same ODL
++    release) can have 2 different workflow. These are called level 1 and
++    level2. Level 1 is simple - stop, update and start ODL. Level 2 is
++    complex and involved yang model changes. This requires wiping of
++    DB and resync to repopulate the data.
++    Steps involved in level 2 update are
++     1. Block OVS instances to connect to ODL
++     2. Set ODL upgrade flag to True
++     3. Start ODL
++     4. Start Neutron re-sync and wait for it to finish
++     5. Delete OVS groups and ports
++     6. Stop OVS
++     7. Unblock OVS ports
++     8. Start OVS
++     9. Unset ODL upgrade flag
++    To achieve L2 update, use "-e environments/services-docker/
++    update-odl.yaml" along with other env files to the update command.
+\ No newline at end of file
+diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py
+index 59473f5..9ab6a87 100755
+--- a/tools/yaml-validate.py
++++ b/tools/yaml-validate.py
+@@ -46,11 +46,11 @@
+ OPTIONAL_DOCKER_SECTIONS = ['docker_puppet_tasks', 'upgrade_tasks',
+                             'fast_forward_upgrade_tasks',
+                             'post_upgrade_tasks',  'update_tasks',
+-                            'service_config_settings', 'host_prep_tasks',
+-                            'metadata_settings', 'kolla_config',
+-                            'global_config_settings', 'logging_source',
+-                            'logging_groups', 'external_deploy_tasks',
+-                            'external_post_deploy_tasks',
++                            'post_update_tasks', 'service_config_settings',
++                            'host_prep_tasks', 'metadata_settings',
++                            'kolla_config', 'global_config_settings',
++                            'logging_source', 'logging_groups',
++                            'external_deploy_tasks', 'external_post_deploy_tasks',
+                             'docker_config_scripts', 'step_config']
+ REQUIRED_DOCKER_PUPPET_CONFIG_SECTIONS = ['config_volume', 'step_config',
+                                           'config_image']
index d9d542d..f18103c 100644 (file)
@@ -9,17 +9,20 @@
 
 import argparse
 import git
 
 import argparse
 import git
+import os
+import unittest
 
 from mock import patch
 
 from apex import build_utils
 
 from mock import patch
 
 from apex import build_utils
+from apex.tests import constants as con
 
 from nose.tools import (
     assert_is_instance,
     assert_raises)
 
 
 
 from nose.tools import (
     assert_is_instance,
     assert_raises)
 
 
-class TestBuildUtils(object):
+class TestBuildUtils(unittest.TestCase):
     @classmethod
     def setup_class(cls):
         """This method is run once for each class before any tests are run"""
     @classmethod
     def setup_class(cls):
         """This method is run once for each class before any tests are run"""
@@ -165,3 +168,19 @@ class TestBuildUtils(object):
     def test_main_debug(self, mock_get_parser):
         with patch.object(build_utils.sys, 'argv', self.sys_argv_debug):
             build_utils.main()
     def test_main_debug(self, mock_get_parser):
         with patch.object(build_utils.sys, 'argv', self.sys_argv_debug):
             build_utils.main()
+
+    def test_strip_patch_sections(self):
+        with open(os.path.join(con.TEST_DUMMY_CONFIG, '98faaca.diff')) as fh:
+            dummy_patch = fh.read()
+        tmp_patch = build_utils.strip_patch_sections(dummy_patch)
+        self.assertNotRegex(tmp_patch, 'releasenotes')
+        self.assertNotRegex(tmp_patch, 'Minor update ODL steps')
+        self.assertNotRegex(tmp_patch, 'Steps of upgrade are as follows')
+        self.assertNotRegex(tmp_patch, 'Steps invlolved in level 2 update')
+
+    def test_strip_no_patch_sections(self):
+        with open(os.path.join(con.TEST_DUMMY_CONFIG, '98faaca.diff')) as fh:
+            dummy_patch = fh.read()
+        tmp_patch = build_utils.strip_patch_sections(dummy_patch,
+                                                     sections=[])
+        self.assertEqual(dummy_patch, tmp_patch)
index c32f72c..d501746 100644 (file)
 import unittest
 
 from apex.builders import common_builder as c_builder
 import unittest
 
 from apex.builders import common_builder as c_builder
+from apex.builders import exceptions
 from apex.common import constants as con
 from mock import patch
 from mock import mock_open
 from mock import MagicMock
 
 from apex.common import constants as con
 from mock import patch
 from mock import mock_open
 from mock import MagicMock
 
+DOCKER_YAML = {
+    'resource_registry': {
+        'OS::TripleO::Services::NovaApi': '../docker/services/nova-api.yaml',
+        'OS::TripleO::Services::NovaConductor':
+            '../docker/services/nova-conductor.yaml'
+    }
+}
+
 
 class TestCommonBuilder(unittest.TestCase):
     @classmethod
 
 class TestCommonBuilder(unittest.TestCase):
     @classmethod
@@ -67,6 +76,54 @@ class TestCommonBuilder(unittest.TestCase):
         c_builder.add_upstream_patches(patches, 'dummy.qcow2', '/dummytmp/')
         mock_customize.assert_called_once_with(test_virt_ops, 'dummy.qcow2')
 
         c_builder.add_upstream_patches(patches, 'dummy.qcow2', '/dummytmp/')
         mock_customize.assert_called_once_with(test_virt_ops, 'dummy.qcow2')
 
+    @patch('builtins.open', mock_open())
+    @patch('apex.build_utils.get_patch')
+    @patch('apex.virtual.utils.virt_customize')
+    def test_add_upstream_patches_docker_puppet(
+            self, mock_customize, mock_get_patch):
+        change_id = 'I301370fbf47a71291614dd60e4c64adc7b5ebb42'
+        patches = [{
+            'change-id': change_id,
+            'project': 'openstack/puppet-tripleo'
+        }]
+        project_path = '/etc/puppet/modules/tripleo'
+        patch_file = "{}.patch".format(change_id)
+        patch_file_path = "/dummytmp/{}".format(patch_file)
+        test_virt_ops = [
+            {con.VIRT_INSTALL: 'patch'},
+            {con.VIRT_UPLOAD: "{}:{}".format(patch_file_path,
+                                             project_path)},
+            {con.VIRT_RUN_CMD: "cd {} && patch -p1 < {}".format(
+                project_path, patch_file)}]
+        mock_get_patch.return_value = 'some random diff'
+        c_builder.add_upstream_patches(patches, 'dummy.qcow2', '/dummytmp/',
+                                       uc_ip='192.0.2.1',
+                                       docker_tag='latest')
+        mock_customize.assert_called_once_with(test_virt_ops, 'dummy.qcow2')
+
+    @patch('builtins.open', mock_open())
+    @patch('apex.builders.common_builder.project_to_docker_image')
+    @patch('apex.builders.overcloud_builder.build_dockerfile')
+    @patch('apex.build_utils.get_patch')
+    @patch('apex.virtual.utils.virt_customize')
+    def test_add_upstream_patches_docker_python(
+            self, mock_customize, mock_get_patch, mock_build_docker_file,
+            mock_project2docker):
+        mock_project2docker.return_value = ['NovaApi']
+        change_id = 'I301370fbf47a71291614dd60e4c64adc7b5ebb42'
+        patches = [{
+            'change-id': change_id,
+            'project': 'openstack/nova'
+        }]
+        mock_get_patch.return_value = 'some random diff'
+        services = c_builder.add_upstream_patches(patches, 'dummy.qcow2',
+                                                  '/dummytmp/',
+                                                  uc_ip='192.0.2.1',
+                                                  docker_tag='latest')
+        assert mock_customize.not_called
+        assert mock_build_docker_file.called
+        self.assertSetEqual(services, {'NovaApi'})
+
     @patch('builtins.open', mock_open())
     @patch('apex.virtual.utils.virt_customize')
     def test_add_repo(self, mock_customize):
     @patch('builtins.open', mock_open())
     @patch('apex.virtual.utils.virt_customize')
     def test_add_repo(self, mock_customize):
@@ -85,3 +142,15 @@ class TestCommonBuilder(unittest.TestCase):
         self.assertEqual(c_builder.create_git_archive('fake/url', 'dummyrepo',
                                                       '/dummytmp/'),
                          '/dummytmp/dummyrepo.tar')
         self.assertEqual(c_builder.create_git_archive('fake/url', 'dummyrepo',
                                                       '/dummytmp/'),
                          '/dummytmp/dummyrepo.tar')
+
+    def test_project_to_docker_image(self):
+        found_services = c_builder.project_to_docker_image(project='nova')
+        assert 'nova-api' in found_services
+
+    @patch('apex.common.utils.open_webpage')
+    def test_project_to_docker_image_bad_web_content(
+            self, mock_open_web):
+        mock_open_web.return_value = b'{"blah": "blah"}'
+        self.assertRaises(exceptions.ApexCommonBuilderException,
+                          c_builder.project_to_docker_image,
+                          'nova')
index 6f2a947..0e4041c 100644 (file)
@@ -12,12 +12,14 @@ import os
 import shutil
 import urllib.error
 
 import shutil
 import urllib.error
 
+from apex.common import exceptions
 from apex.common import utils
 from apex.settings.network_settings import NetworkSettings
 from apex.tests.constants import (
     TEST_CONFIG_DIR,
     TEST_PLAYBOOK_DIR)
 
 from apex.common import utils
 from apex.settings.network_settings import NetworkSettings
 from apex.tests.constants import (
     TEST_CONFIG_DIR,
     TEST_PLAYBOOK_DIR)
 
+from mock import patch, mock_open
 from nose.tools import (
     assert_equal,
     assert_is_instance,
 from nose.tools import (
     assert_equal,
     assert_is_instance,
@@ -25,6 +27,7 @@ from nose.tools import (
     assert_raises)
 
 NET_SETS = os.path.join(TEST_CONFIG_DIR, 'network', 'network_settings.yaml')
     assert_raises)
 
 NET_SETS = os.path.join(TEST_CONFIG_DIR, 'network', 'network_settings.yaml')
+a_mock_open = mock_open(read_data=None)
 
 
 class TestCommonUtils:
 
 
 class TestCommonUtils:
@@ -100,3 +103,48 @@ class TestCommonUtils:
                                         url, ['dummy_test.tar'])
         assert os.path.isfile('/tmp/fetch_test/test.txt')
         shutil.rmtree('/tmp/fetch_test')
                                         url, ['dummy_test.tar'])
         assert os.path.isfile('/tmp/fetch_test/test.txt')
         shutil.rmtree('/tmp/fetch_test')
+
+    def test_nofetch_upstream_and_unpack(self):
+        test_file = 'overcloud-full.tar.md5'
+        url = 'https://images.rdoproject.org/master/delorean/' \
+              'current-tripleo/stable/'
+        os.makedirs('/tmp/fetch_test', exist_ok=True)
+        target = "/tmp/fetch_test/{}".format(test_file)
+        open(target, 'w').close()
+        target_mtime = os.path.getmtime(target)
+        utils.fetch_upstream_and_unpack('/tmp/fetch_test',
+                                        url, [test_file], fetch=False)
+        post_target_mtime = os.path.getmtime(target)
+        shutil.rmtree('/tmp/fetch_test')
+        assert_equal(target_mtime, post_target_mtime)
+
+    def test_nofetch_upstream_and_unpack_no_target(self):
+        test_file = 'overcloud-full.tar.md5'
+        url = 'https://images.rdoproject.org/master/delorean/' \
+              'current-tripleo/stable/'
+        utils.fetch_upstream_and_unpack('/tmp/fetch_test',
+                                        url, [test_file])
+        assert os.path.isfile("/tmp/fetch_test/{}".format(test_file))
+        shutil.rmtree('/tmp/fetch_test')
+
+    def test_open_webpage(self):
+        output = utils.open_webpage('http://opnfv.org')
+        assert output is not None
+
+    def test_open_invalid_webpage(self):
+        assert_raises(urllib.request.URLError, utils.open_webpage,
+                      'http://inv4lIdweb-page.com')
+
+    @patch('builtins.open', a_mock_open)
+    @patch('yaml.safe_dump')
+    @patch('yaml.safe_load')
+    def test_edit_tht_env(self, mock_yaml_load, mock_yaml_dump):
+        settings = {'SomeParameter': 'some_value'}
+        mock_yaml_load.return_value = {
+            'parameter_defaults': {'SomeParameter': 'dummy'}
+        }
+        utils.edit_tht_env('/dummy-environment.yaml', 'parameter_defaults',
+                           settings)
+        new_data = {'parameter_defaults': settings}
+        mock_yaml_dump.assert_called_once_with(new_data, a_mock_open(),
+                                               default_flow_style=False)
index 403b709..6c2a185 100644 (file)
@@ -143,7 +143,8 @@ class TestDeploy(unittest.TestCase):
                                            'sfc': False,
                                            'vpn': False,
                                            'yardstick': 'test',
                                            'sfc': False,
                                            'vpn': False,
                                            'yardstick': 'test',
-                                           'os_version': DEFAULT_OS_VERSION}}
+                                           'os_version': DEFAULT_OS_VERSION,
+                                           'containers': False}}
         args = mock_parser.return_value.parse_args.return_value
         args.virtual = False
         args.quickstart = False
         args = mock_parser.return_value.parse_args.return_value
         args.virtual = False
         args.quickstart = False
@@ -216,7 +217,8 @@ class TestDeploy(unittest.TestCase):
                                            'sfc': False,
                                            'vpn': False,
                                            'yardstick': 'test',
                                            'sfc': False,
                                            'vpn': False,
                                            'yardstick': 'test',
-                                           'os_version': DEFAULT_OS_VERSION}}
+                                           'os_version': DEFAULT_OS_VERSION,
+                                           'containers': False}}
         args = mock_parser.return_value.parse_args.return_value
         args.virtual = True
         args.quickstart = False
         args = mock_parser.return_value.parse_args.return_value
         args.virtual = True
         args.quickstart = False
@@ -236,3 +238,67 @@ class TestDeploy(unittest.TestCase):
         args.virt_compute_ram = 16
         args.virt_default_ram = 10
         main()
         args.virt_compute_ram = 16
         args.virt_default_ram = 10
         main()
+
+    @patch('apex.deploy.c_builder')
+    @patch('apex.deploy.uc_builder')
+    @patch('apex.deploy.oc_builder')
+    @patch('apex.deploy.network_data.create_network_data')
+    @patch('apex.deploy.shutil')
+    @patch('apex.deploy.oc_deploy')
+    @patch('apex.deploy.uc_lib')
+    @patch('apex.deploy.build_vms')
+    @patch('apex.deploy.Inventory')
+    @patch('apex.deploy.virt_utils')
+    @patch('apex.deploy.oc_cfg')
+    @patch('apex.deploy.parsers')
+    @patch('apex.deploy.utils')
+    @patch('apex.deploy.NetworkEnvironment')
+    @patch('apex.deploy.NetworkSettings')
+    @patch('apex.deploy.DeploySettings')
+    @patch('apex.deploy.os')
+    @patch('apex.deploy.json')
+    @patch('apex.deploy.jumphost')
+    @patch('apex.deploy.validate_cross_settings')
+    @patch('apex.deploy.validate_deploy_args')
+    @patch('apex.deploy.create_deploy_parser')
+    @patch('builtins.open', a_mock_open, create=True)
+    def test_main_virt_containers_upstream(
+            self, mock_parser, mock_val_args, mock_cross_sets, mock_jumphost,
+            mock_json, mock_os, mock_deploy_sets, mock_net_sets, mock_net_env,
+            mock_utils, mock_parsers, mock_oc_cfg, mock_virt_utils,
+            mock_inv, mock_build_vms, mock_uc_lib, mock_oc_deploy,
+            mock_shutil, mock_network_data, mock_oc_builder,
+            mock_uc_builder, mock_c_builder):
+
+        ds_opts_dict = {'global_params': MagicMock(),
+                        'deploy_options': {'gluon': False,
+                                           'congress': False,
+                                           'sdn_controller': 'opendaylight',
+                                           'dataplane': 'ovs',
+                                           'sfc': False,
+                                           'vpn': False,
+                                           'yardstick': 'test',
+                                           'os_version': DEFAULT_OS_VERSION,
+                                           'containers': True}}
+        args = mock_parser.return_value.parse_args.return_value
+        args.virtual = True
+        args.quickstart = False
+        args.debug = True
+        args.virt_default_ram = 10
+        args.ha_enabled = True
+        args.virt_compute_nodes = 1
+        args.virt_compute_ram = None
+        args.virt_default_ram = 12
+        args.upstream = True
+        net_sets = mock_net_sets.return_value
+        net_sets.enabled_network_list = ['admin']
+        deploy_sets = mock_deploy_sets.return_value
+        deploy_sets.__getitem__.side_effect = ds_opts_dict.__getitem__
+        deploy_sets.__contains__.side_effect = ds_opts_dict.__contains__
+        main()
+        args.virt_compute_ram = 16
+        args.virt_default_ram = 10
+        main()
+        mock_oc_deploy.prep_image.assert_called
+        # TODO(trozet) add assertions here with arguments for functions in
+        # deploy main
index e9a6e6c..46b5f87 100644 (file)
@@ -11,7 +11,9 @@ import unittest
 
 from apex.builders import overcloud_builder as oc_builder
 from apex.common import constants as con
 
 from apex.builders import overcloud_builder as oc_builder
 from apex.common import constants as con
-from mock import patch
+from mock import patch, mock_open
+
+a_mock_open = mock_open(read_data=None)
 
 
 class TestOvercloudBuilder(unittest.TestCase):
 
 
 class TestOvercloudBuilder(unittest.TestCase):
@@ -37,14 +39,69 @@ class TestOvercloudBuilder(unittest.TestCase):
         mock_git_archive.return_value = '/dummytmp/puppet-opendaylight.tar'
         archive = '/dummytmp/puppet-opendaylight.tar'
         test_virt_ops = [
         mock_git_archive.return_value = '/dummytmp/puppet-opendaylight.tar'
         archive = '/dummytmp/puppet-opendaylight.tar'
         test_virt_ops = [
-            {con.VIRT_INSTALL: 'opendaylight'},
             {con.VIRT_UPLOAD: "{}:/etc/puppet/modules/".format(archive)},
             {con.VIRT_RUN_CMD: 'rm -rf /etc/puppet/modules/opendaylight'},
             {con.VIRT_RUN_CMD: "cd /etc/puppet/modules/ && tar xvf "
             {con.VIRT_UPLOAD: "{}:/etc/puppet/modules/".format(archive)},
             {con.VIRT_RUN_CMD: 'rm -rf /etc/puppet/modules/opendaylight'},
             {con.VIRT_RUN_CMD: "cd /etc/puppet/modules/ && tar xvf "
-                               "puppet-opendaylight.tar"}
+                               "puppet-opendaylight.tar"},
+            {con.VIRT_INSTALL: 'opendaylight'}
         ]
         oc_builder.inject_opendaylight(con.DEFAULT_ODL_VERSION, 'dummy.qcow2',
         ]
         oc_builder.inject_opendaylight(con.DEFAULT_ODL_VERSION, 'dummy.qcow2',
-                                       '/dummytmp/')
+                                       '/dummytmp/', uc_ip='192.0.2.2',
+                                       os_version=con.DEFAULT_OS_VERSION)
+        assert mock_git_archive.called
+        assert mock_add_repo.called
+        mock_customize.assert_called_once_with(test_virt_ops, 'dummy.qcow2')
+
+    @patch('apex.builders.overcloud_builder.build_dockerfile')
+    @patch('apex.builders.common_builder.create_git_archive')
+    @patch('apex.builders.common_builder.add_repo')
+    @patch('apex.virtual.utils.virt_customize')
+    def test_inject_opendaylight_docker(self, mock_customize, mock_add_repo,
+                                        mock_git_archive, mock_build_docker):
+        mock_git_archive.return_value = '/dummytmp/puppet-opendaylight.tar'
+        archive = '/dummytmp/puppet-opendaylight.tar'
+        test_virt_ops = [
+            {con.VIRT_UPLOAD: "{}:/etc/puppet/modules/".format(archive)},
+            {con.VIRT_RUN_CMD: 'rm -rf /etc/puppet/modules/opendaylight'},
+            {con.VIRT_RUN_CMD: "cd /etc/puppet/modules/ && tar xvf "
+                               "puppet-opendaylight.tar"},
+        ]
+        oc_builder.inject_opendaylight('oxygen', 'dummy.qcow2',
+                                       '/dummytmp/', uc_ip='192.0.2.2',
+                                       os_version=con.DEFAULT_OS_VERSION,
+                                       docker_tag='latest')
+        odl_url = "https://nexus.opendaylight.org/content/repositories" \
+                  "/opendaylight-oxygen-epel-7-x86_64-devel/"
+        docker_cmds = [
+            "RUN yum remove opendaylight -y",
+            "RUN echo $'[opendaylight]\\n\\",
+            "baseurl={}\\n\\".format(odl_url),
+            "gpgcheck=0\\n\\",
+            "enabled=1' > /etc/yum.repos.d/opendaylight.repo",
+            "RUN yum -y install opendaylight"
+        ]
+        src_img_uri = "192.0.2.1:8787/nova-api/centos-binary-master:latest"
         assert mock_git_archive.called
         assert mock_add_repo.called
         assert mock_git_archive.called
         assert mock_add_repo.called
+        assert mock_build_docker.called_once_with(
+            'opendaylight', '/dummytmp', docker_cmds, src_img_uri
+        )
         mock_customize.assert_called_once_with(test_virt_ops, 'dummy.qcow2')
         mock_customize.assert_called_once_with(test_virt_ops, 'dummy.qcow2')
+
+    @patch('builtins.open', a_mock_open)
+    @patch('os.makedirs')
+    @patch('os.path.isfile')
+    @patch('os.path.isdir')
+    def test_build_dockerfile(self, mock_isdir, mock_isfile, mock_makedirs):
+        src_img_uri = "192.0.2.1:8787/nova-api/centos-binary-master:latest"
+        oc_builder.build_dockerfile('nova-api', '/tmpdummy/', ['RUN dummy'],
+                                    src_img_uri)
+        a_mock_open.assert_called_with(
+            '/tmpdummy/containers/nova-api/Dockerfile', 'a+')
+        a_mock_open().write.assert_called_once_with('RUN dummy')
+
+    @patch('tarfile.open')
+    @patch('os.path.isdir')
+    def test_archive_docker_patches(self, mock_isdir, mock_tarfile):
+        oc_builder.archive_docker_patches('/tmpdummy/')
+        assert mock_tarfile.assert_called
index b5b1b75..54f95bb 100644 (file)
@@ -29,6 +29,7 @@ from apex.overcloud.deploy import prep_sriov_env
 from apex.overcloud.deploy import external_network_cmds
 from apex.overcloud.deploy import create_congress_cmds
 from apex.overcloud.deploy import SDN_FILE_MAP
 from apex.overcloud.deploy import external_network_cmds
 from apex.overcloud.deploy import create_congress_cmds
 from apex.overcloud.deploy import SDN_FILE_MAP
+from apex.overcloud.deploy import get_docker_sdn_file
 
 from nose.tools import (
     assert_regexp_matches,
 
 from nose.tools import (
     assert_regexp_matches,
@@ -88,14 +89,17 @@ class TestOvercloudDeploy(unittest.TestCase):
     def test_create_deploy_cmd(self, mock_sdn_list, mock_prep_storage,
                                mock_prep_sriov):
         mock_sdn_list.return_value = []
     def test_create_deploy_cmd(self, mock_sdn_list, mock_prep_storage,
                                mock_prep_sriov):
         mock_sdn_list.return_value = []
-        ds = {'deploy_options': MagicMock(),
+        ds = {'deploy_options':
+              {'ha_enabled': True,
+               'congress': True,
+               'tacker': True,
+               'containers': False,
+               'barometer': True,
+               'ceph': False,
+               'sriov': False
+               },
               'global_params': MagicMock()}
               'global_params': MagicMock()}
-        ds['global_params'].__getitem__.side_effect = \
-            lambda i: True if i == 'ha_enabled' else MagicMock()
-        ds['deploy_options'].__getitem__.side_effect = \
-            lambda i: True if i == 'congress' else MagicMock()
-        ds['deploy_options'].__contains__.side_effect = \
-            lambda i: True if i == 'congress' else MagicMock()
+
         ns = {'ntp': ['ntp']}
         inv = MagicMock()
         inv.get_node_counts.return_value = (3, 2)
         ns = {'ntp': ['ntp']}
         inv = MagicMock()
         inv.get_node_counts.return_value = (3, 2)
@@ -109,6 +113,44 @@ class TestOvercloudDeploy(unittest.TestCase):
         assert_in('--control-scale 3', result_cmd)
         assert_in('--compute-scale 2', result_cmd)
 
         assert_in('--control-scale 3', result_cmd)
         assert_in('--compute-scale 2', result_cmd)
 
+    @patch('apex.overcloud.deploy.prep_sriov_env')
+    @patch('apex.overcloud.deploy.prep_storage_env')
+    @patch('builtins.open', mock_open())
+    def test_create_deploy_cmd_containers_sdn(self, mock_prep_storage,
+                                              mock_prep_sriov):
+        ds = {'deploy_options':
+              {'ha_enabled': True,
+               'congress': False,
+               'tacker': False,
+               'containers': True,
+               'barometer': False,
+               'ceph': True,
+               'sdn_controller': 'opendaylight',
+               'sriov': False
+               },
+              'global_params': MagicMock()}
+
+        ns = {'ntp': ['ntp']}
+        inv = MagicMock()
+        inv.get_node_counts.return_value = (3, 2)
+        virt = True
+        result_cmd = create_deploy_cmd(ds, ns, inv, '/tmp', virt)
+        assert_in('--ntp-server ntp', result_cmd)
+        assert_not_in('enable_tacker.yaml', result_cmd)
+        assert_not_in('enable_congress.yaml', result_cmd)
+        assert_not_in('enable_barometer.yaml', result_cmd)
+        assert_in('virtual-environment.yaml', result_cmd)
+        assert_in('--control-scale 3', result_cmd)
+        assert_in('--compute-scale 2', result_cmd)
+        assert_in('docker-images.yaml', result_cmd)
+        assert_in('sdn-images.yaml', result_cmd)
+        assert_in('/usr/share/openstack-tripleo-heat-templates/environments'
+                  '/docker.yaml', result_cmd)
+        assert_in('/usr/share/openstack-tripleo-heat-templates/environments/'
+                  'storage-environment.yaml', result_cmd)
+        assert_in('/usr/share/openstack-tripleo-heat-templates/environments'
+                  '/services-docker/neutron-opendaylight.yaml', result_cmd)
+
     @patch('apex.overcloud.deploy.prep_sriov_env')
     @patch('apex.overcloud.deploy.prep_storage_env')
     @patch('apex.overcloud.deploy.build_sdn_env_list')
     @patch('apex.overcloud.deploy.prep_sriov_env')
     @patch('apex.overcloud.deploy.prep_storage_env')
     @patch('apex.overcloud.deploy.build_sdn_env_list')
@@ -155,7 +197,8 @@ class TestOvercloudDeploy(unittest.TestCase):
     def test_prep_image(self, mock_os_path, mock_shutil, mock_virt_utils):
         ds_opts = {'dataplane': 'fdio',
                    'sdn_controller': 'opendaylight',
     def test_prep_image(self, mock_os_path, mock_shutil, mock_virt_utils):
         ds_opts = {'dataplane': 'fdio',
                    'sdn_controller': 'opendaylight',
-                   'odl_version': 'master'}
+                   'odl_version': 'master',
+                   'sriov': False}
         ds = {'deploy_options': MagicMock(),
               'global_params': MagicMock()}
         ds['deploy_options'].__getitem__.side_effect = \
         ds = {'deploy_options': MagicMock(),
               'global_params': MagicMock()}
         ds['deploy_options'].__getitem__.side_effect = \
@@ -200,6 +243,35 @@ class TestOvercloudDeploy(unittest.TestCase):
         prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
         mock_virt_utils.virt_customize.assert_called()
 
         prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
         mock_virt_utils.virt_customize.assert_called()
 
+    @patch('apex.overcloud.deploy.c_builder')
+    @patch('apex.overcloud.deploy.oc_builder')
+    @patch('apex.overcloud.deploy.virt_utils')
+    @patch('apex.overcloud.deploy.shutil')
+    @patch('apex.overcloud.deploy.os.path')
+    @patch('builtins.open', mock_open())
+    def test_prep_image_sdn_odl_upstream_containers_patches(
+            self, mock_os_path, mock_shutil, mock_virt_utils,
+            mock_oc_builder, mock_c_builder):
+        ds_opts = {'dataplane': 'ovs',
+                   'sdn_controller': 'opendaylight',
+                   'odl_version': con.DEFAULT_ODL_VERSION,
+                   'odl_vpp_netvirt': True}
+        ds = {'deploy_options': MagicMock(),
+              'global_params': MagicMock()}
+        ds['deploy_options'].__getitem__.side_effect = \
+            lambda i: ds_opts.get(i, MagicMock())
+        ds['deploy_options'].__contains__.side_effect = \
+            lambda i: True if i in ds_opts else MagicMock()
+        ns = MagicMock()
+        mock_c_builder.add_upstream_patches.return_value = ['nova-api']
+        patches = ['dummy_nova_patch']
+        rv = prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test',
+                        docker_tag='latest', patches=patches, upstream=True)
+        mock_oc_builder.inject_opendaylight.assert_called
+        mock_virt_utils.virt_customize.assert_called()
+        mock_c_builder.add_upstream_patches.assert_called
+        self.assertListEqual(sorted(rv), ['nova-api', 'opendaylight'])
+
     @patch('apex.overcloud.deploy.virt_utils')
     @patch('apex.overcloud.deploy.shutil')
     @patch('apex.overcloud.deploy.os.path')
     @patch('apex.overcloud.deploy.virt_utils')
     @patch('apex.overcloud.deploy.shutil')
     @patch('apex.overcloud.deploy.os.path')
@@ -380,19 +452,60 @@ class TestOvercloudDeploy(unittest.TestCase):
                               mock_ceph_key):
         mock_fileinput.input.return_value = \
             ['CephClusterFSID', 'CephMonKey', 'CephAdminKey', 'random_key']
                               mock_ceph_key):
         mock_fileinput.input.return_value = \
             ['CephClusterFSID', 'CephMonKey', 'CephAdminKey', 'random_key']
-        ds = {'deploy_options': MagicMock()}
-        ds['deploy_options'].__getitem__.side_effect = \
-            lambda i: '/dev/sdx' if i == 'ceph_device' else MagicMock()
-        ds['deploy_options'].__contains__.side_effect = \
-            lambda i: True if i == 'ceph_device' else MagicMock()
-        prep_storage_env(ds, '/tmp')
+        ds = {'deploy_options': {
+            'ceph_device': '/dev/sdx',
+            'containers': False
+        }}
+        ns = {}
+        prep_storage_env(ds, ns, virtual=False, tmp_dir='/tmp')
+
+    @patch('apex.overcloud.deploy.utils.edit_tht_env')
+    @patch('apex.overcloud.deploy.generate_ceph_key')
+    @patch('apex.overcloud.deploy.fileinput')
+    @patch('apex.overcloud.deploy.os.path.isfile')
+    @patch('builtins.open', mock_open())
+    def test_prep_storage_env_containers(self, mock_isfile, mock_fileinput,
+                                         mock_ceph_key, mock_edit_tht):
+        mock_fileinput.input.return_value = \
+            ['CephClusterFSID', 'CephMonKey', 'CephAdminKey', 'random_key']
+        ds = {'deploy_options': {
+              'ceph_device': '/dev/sdx',
+              'containers': True,
+              'os_version': 'master'
+              }, 'global_params': {'ha_enabled': False}}
+        ns = {'networks': {con.ADMIN_NETWORK: {'installer_vm':
+                                               {'ip': '192.0.2.1'}}}
+              }
+        prep_storage_env(ds, ns, virtual=True, tmp_dir='/tmp')
+        ceph_params = {
+            'DockerCephDaemonImage':
+                '192.0.2.1:8787/ceph/daemon:tag-build-master-luminous-centos'
+                '-7',
+            'CephPoolDefaultSize': 1,
+            'CephAnsibleExtraConfig': {
+                'centos_package_dependencies': [],
+                'ceph_osd_docker_memory_limit': '1g',
+                'ceph_mds_docker_memory_limit': '1g'
+            },
+            'CephPoolDefaultPgNum': 32,
+            'CephAnsibleDisksConfig': {
+                'devices': ['/dev/sdx'],
+                'journal_size': 512,
+                'osd_scenario': 'collocated'
+            }
+        }
+        mock_edit_tht.assert_called_with('/tmp/storage-environment.yaml',
+                                         'parameter_defaults',
+                                         ceph_params)
 
     @patch('apex.overcloud.deploy.os.path.isfile')
     @patch('builtins.open', mock_open())
     def test_prep_storage_env_raises(self, mock_isfile):
         mock_isfile.return_value = False
         ds = {'deploy_options': MagicMock()}
 
     @patch('apex.overcloud.deploy.os.path.isfile')
     @patch('builtins.open', mock_open())
     def test_prep_storage_env_raises(self, mock_isfile):
         mock_isfile.return_value = False
         ds = {'deploy_options': MagicMock()}
-        assert_raises(ApexDeployException, prep_storage_env, ds, '/tmp')
+        ns = {}
+        assert_raises(ApexDeployException, prep_storage_env, ds,
+                      ns, virtual=False, tmp_dir='/tmp')
 
     @patch('apex.overcloud.deploy.generate_ceph_key')
     @patch('apex.overcloud.deploy.fileinput')
 
     @patch('apex.overcloud.deploy.generate_ceph_key')
     @patch('apex.overcloud.deploy.fileinput')
@@ -487,3 +600,19 @@ class TestOvercloudDeploy(unittest.TestCase):
     def test_create_congress_cmds_raises(self, mock_parsers):
         mock_parsers.return_value.__getitem__.side_effect = KeyError()
         assert_raises(KeyError, create_congress_cmds, 'overcloud_file')
     def test_create_congress_cmds_raises(self, mock_parsers):
         mock_parsers.return_value.__getitem__.side_effect = KeyError()
         assert_raises(KeyError, create_congress_cmds, 'overcloud_file')
+
+    def test_get_docker_sdn_file(self):
+        ds_opts = {'ha_enabled': True,
+                   'congress': True,
+                   'tacker': True,
+                   'containers': False,
+                   'barometer': True,
+                   'ceph': False,
+                   'sdn_controller': 'opendaylight'
+                   }
+        output = get_docker_sdn_file(ds_opts)
+        self.assertEqual(output,
+                         ('/usr/share/openstack-tripleo-heat-templates'
+                          '/environments/services-docker/neutron-opendaylight'
+                          '.yaml')
+                         )
index 0df785f..9e2752a 100644 (file)
@@ -7,6 +7,7 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
+import ipaddress
 import libvirt
 import os
 import subprocess
 import libvirt
 import os
 import subprocess
@@ -187,13 +188,27 @@ class TestUndercloud(unittest.TestCase):
     @patch.object(Undercloud, '_get_vm', return_value=None)
     @patch.object(Undercloud, 'create')
     def test_generate_config(self, mock_get_vm, mock_create):
     @patch.object(Undercloud, '_get_vm', return_value=None)
     @patch.object(Undercloud, 'create')
     def test_generate_config(self, mock_get_vm, mock_create):
-        ns_net = MagicMock()
-        ns_net.__getitem__.side_effect = \
-            lambda i: '1234/24' if i is 'cidr' else MagicMock()
-        ns = {'apex': MagicMock(),
-              'dns-domain': 'dns',
-              'networks': {'admin': ns_net,
-                           'external': [ns_net]}}
+        ns = MagicMock()
+        ns.enabled_network_list = ['admin', 'external']
+        ns_dict = {
+            'apex': MagicMock(),
+            'dns-domain': 'dns',
+            'networks': {'admin':
+                         {'cidr': ipaddress.ip_network('192.0.2.0/24'),
+                          'installer_vm': {'ip': '192.0.2.1',
+                                           'vlan': 'native'},
+                          'dhcp_range': ['192.0.2.15', '192.0.2.30']
+                          },
+                         'external':
+                         [{'enabled': True,
+                           'cidr': ipaddress.ip_network('192.168.0.0/24'),
+                          'installer_vm': {'ip': '192.168.0.1',
+                                           'vlan': 'native'}
+                           }]
+                         }
+        }
+        ns.__getitem__.side_effect = ns_dict.__getitem__
+        ns.__contains__.side_effect = ns_dict.__contains__
         ds = {'global_params': {}}
 
         Undercloud('img_path', 'tplt_path').generate_config(ns, ds)
         ds = {'global_params': {}}
 
         Undercloud('img_path', 'tplt_path').generate_config(ns, ds)
index 915c85f..e799d37 100644 (file)
@@ -115,13 +115,14 @@ class Undercloud:
                 "correctly")
 
     def configure(self, net_settings, deploy_settings,
                 "correctly")
 
     def configure(self, net_settings, deploy_settings,
-                  playbook, apex_temp_dir):
+                  playbook, apex_temp_dir, virtual_oc=False):
         """
         Configures undercloud VM
         :param net_settings: Network settings for deployment
         :param deploy_settings: Deployment settings for deployment
         :param playbook: playbook to use to configure undercloud
         :param apex_temp_dir: temporary apex directory to hold configs/logs
         """
         Configures undercloud VM
         :param net_settings: Network settings for deployment
         :param deploy_settings: Deployment settings for deployment
         :param playbook: playbook to use to configure undercloud
         :param apex_temp_dir: temporary apex directory to hold configs/logs
+        :param virtual_oc: Boolean to determine if overcloud is virt
         :return: None
         """
 
         :return: None
         """
 
@@ -130,6 +131,7 @@ class Undercloud:
         ansible_vars = Undercloud.generate_config(net_settings,
                                                   deploy_settings)
         ansible_vars['apex_temp_dir'] = apex_temp_dir
         ansible_vars = Undercloud.generate_config(net_settings,
                                                   deploy_settings)
         ansible_vars['apex_temp_dir'] = apex_temp_dir
+        ansible_vars['virtual_overcloud'] = virtual_oc
         try:
             utils.run_ansible(ansible_vars, playbook, host=self.ip,
                               user='stack')
         try:
             utils.run_ansible(ansible_vars, playbook, host=self.ip,
                               user='stack')
@@ -239,7 +241,16 @@ class Undercloud:
             "prefix": str(ns_external['cidr']).split('/')[1],
             "enabled": ns_external['enabled']
         }
             "prefix": str(ns_external['cidr']).split('/')[1],
             "enabled": ns_external['enabled']
         }
-
+        # TODO(trozet): clean this logic up and merge with above
+        if 'external' in ns.enabled_network_list:
+            nat_cidr = ns_external['cidr']
+        else:
+            nat_cidr = ns['networks']['admin']['cidr']
+        config['nat_cidr'] = str(nat_cidr)
+        if nat_cidr.version == 6:
+            config['nat_network_ipv6'] = True
+        else:
+            config['nat_network_ipv6'] = False
         config['http_proxy'] = ns.get('http_proxy', '')
         config['https_proxy'] = ns.get('https_proxy', '')
 
         config['http_proxy'] = ns.get('http_proxy', '')
         config['https_proxy'] = ns.get('https_proxy', '')
 
index b1069d2..1608329 100644 (file)
@@ -1,4 +1,4 @@
-heat_template_version: 2015-04-30
+heat_template_version: 2016-10-14
 
 description: >
   Software Config to drive os-net-config to configure multiple interfaces
 
 description: >
   Software Config to drive os-net-config to configure multiple interfaces
@@ -78,196 +78,200 @@ parameters:
 
 resources:
   OsNetConfigImpl:
 
 resources:
   OsNetConfigImpl:
-    type: OS::Heat::StructuredConfig
+    type: OS::Heat::SoftwareConfig
     properties:
     properties:
-      group: os-apply-config
+      group: script
       config:
       config:
-        os_net_config:
-          network_config:
-            -
-            {%- if not nets['external'][0]['enabled'] or nets['tenant']['nic_mapping'][role]['vlan'] is number or nets['storage']['nic_mapping'][role]['vlan'] is number or nets['api']['nic_mapping'][role]['vlan'] is number or  nets['external'][0]['nic_mapping'][role]['vlan'] is number %}
-              type: ovs_bridge
-            {%- if nets['external'][0]['enabled'] and nets['external'][0]['nic_mapping'][role]['vlan'] == 'native' %}
-              name: br-isolated
-            {%- else %}
-              name: {get_input: bridge_name}
-            {%- endif %}
-              members:
-                -
-                  type: interface
-                  name: {{ nets['admin']['nic_mapping'][role]['members'][0] }}
-                  # force the MAC address of the bridge to this interface
-                  primary: true
-                {%- if nets['external'][0]['enabled'] and nets['external'][0]['nic_mapping'][role]['vlan'] is number %}
-                -
-                  type: vlan
-                  vlan_id: {get_param: ExternalNetworkVlanID}
-                  addresses:
+        str_replace:
+          template:
+            get_file: /usr/share/openstack-tripleo-heat-templates/network/scripts/run-os-net-config.sh
+          params:
+            $network_config:
+              network_config:
+              -
+              {%- if not nets['external'][0]['enabled'] or nets['tenant']['nic_mapping'][role]['vlan'] is number or nets['storage']['nic_mapping'][role]['vlan'] is number or nets['api']['nic_mapping'][role]['vlan'] is number or  nets['external'][0]['nic_mapping'][role]['vlan'] is number %}
+                type: ovs_bridge
+              {%- if nets['external'][0]['enabled'] and nets['external'][0]['nic_mapping'][role]['vlan'] == 'native' %}
+                name: br-isolated
+              {%- else %}
+                name: br-ex
+              {%- endif %}
+                members:
                   -
                   -
-                    ip_netmask: {get_param: ExternalIpSubnet}
-                  routes:
-                    -
-                      default: true
-                      next_hop: {get_param: ExternalInterfaceDefaultRoute}
-                {%- endif %}
-                {%- if nets['tenant']['enabled'] and nets['tenant']['nic_mapping'][role]['vlan'] is number %}
-                -
-                  type: vlan
-                  vlan_id: {get_param: TenantNetworkVlanID}
-                  addresses:
-                    -
-                      ip_netmask: {get_param: TenantIpSubnet}
-                {%- endif %}
-                {%- if nets['storage']['enabled'] and nets['storage']['nic_mapping'][role]['vlan'] is number %}
-                -
-                  type: vlan
-                  vlan_id: {get_param: StorageNetworkVlanID}
-                  addresses:
-                    -
-                      ip_netmask: {get_param: StorageIpSubnet}
-                {%- endif %}
-                {%- if nets['api']['enabled'] and nets['api']['nic_mapping'][role]['vlan'] is number %}
-                -
-                  type: vlan
-                  vlan_id: {get_param: InternalApiNetworkVlanID}
-                  addresses:
+                    type: interface
+                    name: {{ nets['admin']['nic_mapping'][role]['members'][0] }}
+                    # force the MAC address of the bridge to this interface
+                    primary: true
+                  {%- if nets['external'][0]['enabled'] and nets['external'][0]['nic_mapping'][role]['vlan'] is number %}
+                  -
+                    type: vlan
+                    vlan_id: {get_param: ExternalNetworkVlanID}
+                    addresses:
                     -
                     -
-                      ip_netmask: {get_param: InternalApiIpSubnet}
-                {%- endif %}
-            {%- else %}
-              type: {{ nets['admin']['nic_mapping'][role]['phys_type'] }}
-              {%- if nets['admin']['nic_mapping'][role]['phys_type'] == 'linux_bridge' %}
-              name: br-ctlplane
-              members:
-                -
-                  type: interface
-                  name: {{ nets['admin']['nic_mapping'][role]['members'][0] }}
-                  primary: true
+                      ip_netmask: {get_param: ExternalIpSubnet}
+                    routes:
+                      -
+                        default: true
+                        next_hop: {get_param: ExternalInterfaceDefaultRoute}
+                  {%- endif %}
+                  {%- if nets['tenant']['enabled'] and nets['tenant']['nic_mapping'][role]['vlan'] is number %}
+                  -
+                    type: vlan
+                    vlan_id: {get_param: TenantNetworkVlanID}
+                    addresses:
+                      -
+                        ip_netmask: {get_param: TenantIpSubnet}
+                  {%- endif %}
+                  {%- if nets['storage']['enabled'] and nets['storage']['nic_mapping'][role]['vlan'] is number %}
+                  -
+                    type: vlan
+                    vlan_id: {get_param: StorageNetworkVlanID}
+                    addresses:
+                      -
+                        ip_netmask: {get_param: StorageIpSubnet}
+                  {%- endif %}
+                  {%- if nets['api']['enabled'] and nets['api']['nic_mapping'][role]['vlan'] is number %}
+                  -
+                    type: vlan
+                    vlan_id: {get_param: InternalApiNetworkVlanID}
+                    addresses:
+                      -
+                        ip_netmask: {get_param: InternalApiIpSubnet}
+                  {%- endif %}
               {%- else %}
               {%- else %}
-              name: {{ nets['admin']['nic_mapping'][role]['members'][0] }}
-              {%- endif %}
-            {%- endif %}
-              use_dhcp: false
-              dns_servers: {get_param: DnsServers}
-              addresses:
-                -
-                  ip_netmask:
-                    list_join:
-                      - '/'
-                      - - {get_param: ControlPlaneIp}
-                        - {get_param: ControlPlaneSubnetCidr}
-              routes:
-                -
-                  ip_netmask: 169.254.169.254/32
-                  next_hop: {get_param: EC2MetadataIp}
-                {%- if external_net_af == 6 or role == 'compute' or not nets['external'][0]['enabled'] %}
-                -
-                  default: true
-                  next_hop: {get_param: ControlPlaneDefaultRoute}
+                type: {{ nets['admin']['nic_mapping'][role]['phys_type'] }}
+                {%- if nets['admin']['nic_mapping'][role]['phys_type'] == 'linux_bridge' %}
+                name: br-ctlplane
+                members:
+                  -
+                    type: interface
+                    name: {{ nets['admin']['nic_mapping'][role]['members'][0] }}
+                    primary: true
+                {%- else %}
+                name: {{ nets['admin']['nic_mapping'][role]['members'][0] }}
                 {%- endif %}
                 {%- endif %}
-
-            {%- if nets['tenant']['enabled'] and nets['tenant']['nic_mapping'][role]['vlan'] == 'native' %}
-            {%- if ovs_dpdk_bridge == 'br-phy' and role == 'compute' %}
-            -
-              type: ovs_user_bridge
-              name: {{ ovs_dpdk_bridge }}
-              use_dhcp: false
-              addresses:
-                -
-                  ip_netmask: {get_param: TenantIpSubnet}
-              members:
-                -
-                  type: ovs_dpdk_port
-                  name: dpdk0
-                  driver: {{ nets['tenant']['nic_mapping'][role]['uio_driver'] }}
-                  members:
-                    -
-                      type: interface
-                      name: {{ nets['tenant']['nic_mapping'][role]['members'][0] }}
-                      # force the MAC address of the bridge to this interface
-                      primary: true
-            {%- else %}
-            -
-              type: {{ nets['tenant']['nic_mapping'][role]['phys_type'] }}
-              name: {{ nets['tenant']['nic_mapping'][role]['members'][0] }}
-              {%- if 'uio-driver' in nets['tenant']['nic_mapping'][role] %}
-              uio_driver: {{ nets['tenant']['nic_mapping'][role]['uio-driver'] }}
-              {%- endif %}
-              {%- if 'interface-options' in nets['tenant']['nic_mapping'][role] %}
-              options: '{{ nets['tenant']['nic_mapping'][role]['interface-options'] }}'
-              {%- endif %}
-              use_dhcp: false
-              addresses:
-                -
-                  ip_netmask: {get_param: TenantIpSubnet}
-            {%- endif %}
-            {%- endif %}
-            {%- if nets['external'][0]['enabled'] and external_net_type != 'br-ex' and nets['external'][0]['nic_mapping'][role]['vlan'] == 'native' %}
-            -
-              type: {{ nets['external'][0]['nic_mapping'][role]['phys_type'] }}
-              name: {{ nets['external'][0]['nic_mapping'][role]['members'][0] }}
-              {%- if 'uio-driver' in nets['external'][0]['nic_mapping'][role] %}
-              uio_driver: {{ nets['external'][0]['nic_mapping'][role]['uio-driver'] }}
-              {%- endif %}
-              {%- if role == 'controller' %}
-              dns_servers: {get_param: DnsServers}
               {%- endif %}
               {%- endif %}
-              use_dhcp: false
-              addresses:
-                -
-                  ip_netmask: {get_param: ExternalIpSubnet}
-              routes:
-                -
-                  {%- if role == 'controller' %}
-                  default: true
+                use_dhcp: false
+                dns_servers: {get_param: DnsServers}
+                addresses:
+                  -
+                    ip_netmask:
+                      list_join:
+                        - '/'
+                        - - {get_param: ControlPlaneIp}
+                          - {get_param: ControlPlaneSubnetCidr}
+                routes:
+                  -
+                    ip_netmask: 169.254.169.254/32
+                    next_hop: {get_param: EC2MetadataIp}
+                  {%- if external_net_af == 6 or role == 'compute' or not nets['external'][0]['enabled'] %}
+                  -
+                    default: true
+                    next_hop: {get_param: ControlPlaneDefaultRoute}
                   {%- endif %}
                   {%- endif %}
-                  ip_netmask: 0.0.0.0/0
-                  next_hop: {get_param: ExternalInterfaceDefaultRoute}
-            {%- elif nets['external'][0]['enabled'] and external_net_type == 'br-ex' and nets['external'][0]['nic_mapping'][role]['vlan'] == 'native' %}
-            -
+
+              {%- if nets['tenant']['enabled'] and nets['tenant']['nic_mapping'][role]['vlan'] == 'native' %}
               {%- if ovs_dpdk_bridge == 'br-phy' and role == 'compute' %}
               {%- if ovs_dpdk_bridge == 'br-phy' and role == 'compute' %}
-              type: ovs_user_bridge
+              -
+                type: ovs_user_bridge
+                name: {{ ovs_dpdk_bridge }}
+                use_dhcp: false
+                addresses:
+                  -
+                    ip_netmask: {get_param: TenantIpSubnet}
+                members:
+                  -
+                    type: ovs_dpdk_port
+                    name: dpdk0
+                    driver: {{ nets['tenant']['nic_mapping'][role]['uio_driver'] }}
+                    members:
+                      -
+                        type: interface
+                        name: {{ nets['tenant']['nic_mapping'][role]['members'][0] }}
+                        # force the MAC address of the bridge to this interface
+                        primary: true
               {%- else %}
               {%- else %}
-              type: ovs_bridge
+              -
+                type: {{ nets['tenant']['nic_mapping'][role]['phys_type'] }}
+                name: {{ nets['tenant']['nic_mapping'][role]['members'][0] }}
+                {%- if 'uio-driver' in nets['tenant']['nic_mapping'][role] %}
+                uio_driver: {{ nets['tenant']['nic_mapping'][role]['uio-driver'] }}
+                {%- endif %}
+                {%- if 'interface-options' in nets['tenant']['nic_mapping'][role] %}
+                options: '{{ nets['tenant']['nic_mapping'][role]['interface-options'] }}'
+                {%- endif %}
+                use_dhcp: false
+                addresses:
+                  -
+                    ip_netmask: {get_param: TenantIpSubnet}
+              {%- endif %}
               {%- endif %}
               {%- endif %}
-              name: {get_input: bridge_name}
-              use_dhcp: false
-              members:
-                -
-                  type: interface
-                  name: {{ nets['external'][0]['nic_mapping'][role]['members'][0] }}
-                  # force the MAC address of the bridge to this interface
-                  primary: true
-              {%- if role == 'controller' %}
-              dns_servers: {get_param: DnsServers}
-              addresses:
-                -
-                  ip_netmask: {get_param: ExternalIpSubnet}
-              routes:
-                -
-                  default: true
-                  ip_netmask: 0.0.0.0/0
-                  next_hop: {get_param: ExternalInterfaceDefaultRoute}
+              {%- if nets['external'][0]['enabled'] and external_net_type != 'br-ex' and nets['external'][0]['nic_mapping'][role]['vlan'] == 'native' %}
+              -
+                type: {{ nets['external'][0]['nic_mapping'][role]['phys_type'] }}
+                name: {{ nets['external'][0]['nic_mapping'][role]['members'][0] }}
+                {%- if 'uio-driver' in nets['external'][0]['nic_mapping'][role] %}
+                uio_driver: {{ nets['external'][0]['nic_mapping'][role]['uio-driver'] }}
+                {%- endif %}
+                {%- if role == 'controller' %}
+                dns_servers: {get_param: DnsServers}
+                {%- endif %}
+                use_dhcp: false
+                addresses:
+                  -
+                    ip_netmask: {get_param: ExternalIpSubnet}
+                routes:
+                  -
+                    {%- if role == 'controller' %}
+                    default: true
+                    {%- endif %}
+                    ip_netmask: 0.0.0.0/0
+                    next_hop: {get_param: ExternalInterfaceDefaultRoute}
+              {%- elif nets['external'][0]['enabled'] and external_net_type == 'br-ex' and nets['external'][0]['nic_mapping'][role]['vlan'] == 'native' %}
+              -
+                {%- if ovs_dpdk_bridge == 'br-phy' and role == 'compute' %}
+                type: ovs_user_bridge
+                {%- else %}
+                type: ovs_bridge
+                {%- endif %}
+                name: br-ex
+                use_dhcp: false
+                members:
+                  -
+                    type: interface
+                    name: {{ nets['external'][0]['nic_mapping'][role]['members'][0] }}
+                    # force the MAC address of the bridge to this interface
+                    primary: true
+                {%- if role == 'controller' %}
+                dns_servers: {get_param: DnsServers}
+                addresses:
+                  -
+                    ip_netmask: {get_param: ExternalIpSubnet}
+                routes:
+                  -
+                    default: true
+                    ip_netmask: 0.0.0.0/0
+                    next_hop: {get_param: ExternalInterfaceDefaultRoute}
+                {%- endif %}
+              {%- endif %}
+              {%- if nets['storage']['enabled'] and nets['storage']['nic_mapping'][role]['vlan'] == 'native' %}
+              -
+                type: interface
+                name: {{ nets['storage']['nic_mapping'][role]['members'][0] }}
+                use_dhcp: false
+                addresses:
+                  -
+                    ip_netmask: {get_param: StorageIpSubnet}
+              {%- endif %}
+              {%- if nets['api']['enabled'] and nets['api']['nic_mapping'][role]['vlan'] == 'native' %}
+              -
+                type: interface
+                name: {{ nets['api']['nic_mapping'][role]['members'][0] }}
+                use_dhcp: false
+                addresses:
+                  -
+                    ip_netmask: {get_param: InternalApiIpSubnet}
               {%- endif %}
               {%- endif %}
-            {%- endif %}
-            {%- if nets['storage']['enabled'] and nets['storage']['nic_mapping'][role]['vlan'] == 'native' %}
-            -
-              type: interface
-              name: {{ nets['storage']['nic_mapping'][role]['members'][0] }}
-              use_dhcp: false
-              addresses:
-                -
-                  ip_netmask: {get_param: StorageIpSubnet}
-            {%- endif %}
-            {%- if nets['api']['enabled'] and nets['api']['nic_mapping'][role]['vlan'] == 'native' %}
-            -
-              type: interface
-              name: {{ nets['api']['nic_mapping'][role]['members'][0] }}
-              use_dhcp: false
-              addresses:
-                -
-                  ip_netmask: {get_param: InternalApiIpSubnet}
-            {%- endif %}
 
 outputs:
   OS::stack_id:
 
 outputs:
   OS::stack_id:
index 124f252..5b82b72 100644 (file)
@@ -76,8 +76,12 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
 %{_sysconfdir}/opnfv-apex/os-nosdn-ovs_dpdk-noha.yaml
 %{_sysconfdir}/opnfv-apex/os-nosdn-nofeature-ha.yaml
 %{_sysconfdir}/opnfv-apex/os-nosdn-performance-ha.yaml
 %{_sysconfdir}/opnfv-apex/os-nosdn-ovs_dpdk-noha.yaml
 %{_sysconfdir}/opnfv-apex/os-nosdn-nofeature-ha.yaml
 %{_sysconfdir}/opnfv-apex/os-nosdn-performance-ha.yaml
-%{_sysconfdir}/opnfv-apex/os-nosdn-pike-noha.yaml
-%{_sysconfdir}/opnfv-apex/os-odl-pike-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-nosdn-pike_upstream-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-nosdn-queens_upstream-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-nosdn-master_upstream-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-pike_upstream-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-queens_upstream-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-master_upstream-noha.yaml
 %{_sysconfdir}/opnfv-apex/os-nosdn-ovs_dpdk-ha.yaml
 %{_sysconfdir}/opnfv-apex/os-nosdn-kvm-ha.yaml
 %{_sysconfdir}/opnfv-apex/os-nosdn-kvm-noha.yaml
 %{_sysconfdir}/opnfv-apex/os-nosdn-ovs_dpdk-ha.yaml
 %{_sysconfdir}/opnfv-apex/os-nosdn-kvm-ha.yaml
 %{_sysconfdir}/opnfv-apex/os-nosdn-kvm-noha.yaml
@@ -120,6 +124,8 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
 %doc %{_docdir}/opnfv/inventory.yaml.example
 
 %changelog
 %doc %{_docdir}/opnfv/inventory.yaml.example
 
 %changelog
+* Fri Mar 09 2018 Tim Rozet <trozet@redhat.com> - 6.0-2
+  Add upstream deploy files with containers
 * Wed Feb 14 2018 Tim Rozet <trozet@redhat.com> - 6.0-1
   Fix docutils requirement and add python34-distro
 * Wed Nov 29 2017 Tim Rozet <trozet@redhat.com> - 6.0-0
 * Wed Feb 14 2018 Tim Rozet <trozet@redhat.com> - 6.0-1
   Fix docutils requirement and add python34-distro
 * Wed Nov 29 2017 Tim Rozet <trozet@redhat.com> - 6.0-0
index ef6cdb6..debe6f3 100644 (file)
@@ -6,6 +6,7 @@ parameters:
   CloudDomain: opnfvlf.org
 
 parameter_defaults:
   CloudDomain: opnfvlf.org
 
 parameter_defaults:
+  DockerPuppetProcessCount: 10
   NeutronNetworkVLANRanges: 'datacentre:500:525'
   SshServerOptions:
     HostKey:
   NeutronNetworkVLANRanges: 'datacentre:500:525'
   SshServerOptions:
     HostKey:
index a6721b4..b8f0100 100644 (file)
@@ -15,6 +15,9 @@ global_params:
   ipxe: true
 
 deploy_options:
   ipxe: true
 
 deploy_options:
+  # Whether or not to use containers for the overcloud services
+  containers: true
+
   # Which SDN controller to use. Valid options are 'opendaylight', 'onos',
   # 'opendaylight-external', 'opencontrail' or false. A value of false will
   # use Neutron's OVS ML2 controller.
   # Which SDN controller to use. Valid options are 'opendaylight', 'onos',
   # 'opendaylight-external', 'opencontrail' or false. A value of false will
   # use Neutron's OVS ML2 controller.
diff --git a/config/deploy/os-nosdn-master_upstream-noha.yaml b/config/deploy/os-nosdn-master_upstream-noha.yaml
new file mode 100644 (file)
index 0000000..e775811
--- /dev/null
@@ -0,0 +1,11 @@
+---
+global_params:
+  ha_enabled: false
+deploy_options:
+  containers: true
+  os_version: master
+  sdn_controller: false
+  tacker: false
+  congress: false
+  sfc: false
+  vpn: false
diff --git a/config/deploy/os-nosdn-queens_upstream-noha.yaml b/config/deploy/os-nosdn-queens_upstream-noha.yaml
new file mode 100644 (file)
index 0000000..efadc31
--- /dev/null
@@ -0,0 +1,11 @@
+---
+global_params:
+  ha_enabled: false
+deploy_options:
+  containers: true
+  os_version: queens
+  sdn_controller: false
+  tacker: false
+  congress: false
+  sfc: false
+  vpn: false
similarity index 64%
rename from config/deploy/os-odl-pike-noha.yaml
rename to config/deploy/os-odl-master_upstream-noha.yaml
index 44eff66..39ced49 100644 (file)
@@ -3,12 +3,13 @@ global_params:
   ha_enabled: false
   patches:
     undercloud:
   ha_enabled: false
   patches:
     undercloud:
-      - change-id: I301370fbf47a71291614dd60e4c64adc7b5ebb42
+      - change-id: Ie380cc41ca50a294a2647d673f339d02111bf6b3
         project: openstack/tripleo-heat-templates
 deploy_options:
         project: openstack/tripleo-heat-templates
 deploy_options:
-  os_version: pike
+  containers: true
+  os_version: master
   sdn_controller: opendaylight
   sdn_controller: opendaylight
-  odl_version: carbon
+  odl_version: master
   tacker: false
   congress: false
   sfc: false
   tacker: false
   congress: false
   sfc: false
diff --git a/config/deploy/os-odl-pike_upstream-noha.yaml b/config/deploy/os-odl-pike_upstream-noha.yaml
new file mode 100644 (file)
index 0000000..3fe1c73
--- /dev/null
@@ -0,0 +1,12 @@
+---
+global_params:
+  ha_enabled: false
+deploy_options:
+  containers: false
+  os_version: pike
+  sdn_controller: opendaylight
+  odl_version: carbon
+  tacker: false
+  congress: false
+  sfc: false
+  vpn: false
diff --git a/config/deploy/os-odl-queens_upstream-noha.yaml b/config/deploy/os-odl-queens_upstream-noha.yaml
new file mode 100644 (file)
index 0000000..75a7346
--- /dev/null
@@ -0,0 +1,16 @@
+---
+global_params:
+  ha_enabled: false
+  patches:
+    undercloud:
+      - change-id: Ie380cc41ca50a294a2647d673f339d02111bf6b3
+        project: openstack/tripleo-heat-templates
+deploy_options:
+  containers: true
+  os_version: queens
+  sdn_controller: opendaylight
+  odl_version: oxygen
+  tacker: false
+  congress: false
+  sfc: false
+  vpn: false
diff --git a/docs/contributor/upstream-overcloud-container-design.rst b/docs/contributor/upstream-overcloud-container-design.rst
new file mode 100644 (file)
index 0000000..4b368c2
--- /dev/null
@@ -0,0 +1,126 @@
+=======================================
+Overcloud Container Design/Architecture
+=======================================
+
+This document describes the changes done to implement container deployments in
+Apex.
+
+ * OOO container architecture
+ * Upstream vs Downstream deployment
+ * Apex container deployment overview
+
+OOO container architecture
+--------------------------
+
+Typically in OOO each OpenStack service is represented by a TripleO Heat
+Template stored under the puppet/services directory in the THT code base.  For
+containers, there are new templates created in the docker/services directory
+which include templates for most of the previously defined puppet services.
+These docker templates in almost all cases inherit their puppet template
+counterpart and then build off of that to provide OOO docker specific
+configuration.
+
+The containers configuration in OOO is still done via puppet, and config files
+are then copied into a host directory to be later mounted in the service
+container during deployment.  The docker template contains docker specific
+settings to the service, including what files to mount into the container,
+along with which puppet resources to execute, etc.  Note, the puppet code is
+still stored locally on the host, while the service python code is stored in
+the container image.
+
+RDO has its own registry which stores the Docker images per service to use in
+deployments.  The container image is usually just a CentOS 7 container with the
+relevant service RPM installed.
+
+In addition, Ceph no longer uses puppet to deploy.  puppet-ceph was previously
+used to configure Ceph on the overcloud, but has been replaced with
+Ceph-Ansible.  During container deployment, the undercloud calls a mistral
+workflow to initiate a Ceph-Ansible playbook that will download the Ceph Daemon
+container image to the overcloud and configure it.
+
+Upstream vs. Downstream deployment
+----------------------------------
+
+In Apex we typically build artifacts and then deploy from them.  This works in
+the past as we usually modify disk images (qcow2s) with files or patches and
+distribute them as RPMs.  However, with containers space becomes an issue.  The
+size of each container image ranges from 800 MB to over 2GB.  This makes it
+unfeasible to download all of the possible images and store them into a disk
+image for distribution.
+
+Therefore for container deployments the only option is to deploy using
+upstream.  This means that only upstream undercloud/overcloud images are pulled
+at deploy time, and the required containers are docker pulled during deployment
+into the undercloud.  For upstream deployments the modified time of the
+RDO images are checked and cached locally, to refrain from unnecessary
+downloading of artifacts.  Also, the optional '--no-fetch' argument may be
+provided at deploy time, to ignore pulling any new images, as long as previous
+artifacts are cached locally.
+
+Apex container deployment
+-------------------------
+
+For deploying containers with Apex, a new deploy setting is available,
+'containers'.  When this flag is used, along with '--upstream' the following
+workflow occurs:
+
+  1. The upstream RDO images for undercloud/overcloud are checked and
+     downloaded if necessary.
+  2. The undercloud VM is installed and configured as a normal deployment.
+  3. The overcloud prep image method is called which is modified now for
+     patches and containers.  The method will now return a set of container
+     images which are going to be patched.  These can be either due to a change
+     in OpenDaylight version for example, or patches included in the deploy
+     settings for the overcloud that include a python path.
+  4. During the overcloud image prep, a new directory in the Apex tmp dir is
+     created called 'containers' which then includes sub-directories for each
+     docker image which is being patched (for example, 'containers/nova-api').
+  5. A Dockerfile is created inside of the directory created in step 4, which
+     holds Dockerfile operations to rebuild the container with patches or any
+     required changes.  Several container images could be used for different
+     services inside of an OS project.  For example, there are different images
+     for each nova service (nova-api, nova-conductor, nova-compute). Therefore
+     a lookup is done to figure out all of the container images that a
+     hypothetically provided nova patch would apply to.  Then a directory and
+     Dockerfile is created for each image.  All of this is tar'ed and
+     compressed into an archive which will be copied to the undercloud.
+  6. Next, the deployment is checked to see if a Ceph devices was provided in
+     Apex settings.  If it is not, then a persistent loop device is created
+     in the overcloud image to serve as storage backend for Ceph OSDs.  Apex
+     previously used a directory '/srv/data' to serve as the backend to the
+     OSDs, but that is no longer supported with Ceph-Ansible.
+  7. The deployment command is then created, as usual, but with minor changes
+     to add docker.yaml and docker-ha.yaml files which are required to deploy
+     containers with OOO.
+  8. Next a new playbook is executed, 'prepare_overcloud_containers.yaml',
+     which includes several steps:
+
+     a. The previously archived docker image patches are copied and unpacked
+        into /home/stack.
+     b. 'overcloud_containers' and 'sdn_containers' image files are then
+        prepared which are basically just yaml files which indicate which
+        docker images to pull and where to store them.  Which in our case is a
+        local docker registry.
+     c. The docker images are then pulled and stored into the local registry.
+        The reason for using a local registry is to then have a static source
+        of images that do not change every time a user deploys.  This allows
+        for more control and predictability in deployments.
+     d. Next, the images in the local registry are cross-checked against
+        the images that were previously collected as requiring patches.  Any
+        image which then exists in the local registry and also requires changes
+        is then rebuilt by the docker build command, tagged with 'apex' and
+        then pushed into the local registry.  This helps the user distinguish
+        which containers have been modified by Apex, in case any debugging is
+        needed in comparing upstream docker images with Apex modifications.
+     e. Then new OOO image files are created, to indicate to OOO that the
+        docker images to use for deployment are the ones in the local registry.
+        Also, the ones modified by Apex are modified with the 'apex' tag.
+     f. The relevant Ceph Daemon Docker image is pulled and pushed into the
+        local registry for deployment.
+  9. At this point the OOO deployment command is initiated as in regular
+     Apex deployments.  Each container will be started on the overcloud and
+     puppet executed in it to gather the configuration files in Step 1.  This
+     leads to Step 1 taking longer than it used to in non-containerized
+     deployments.  Following this step, the containers are then brought up in
+     their regular step order, while mounting the previously generated
+     configuration files.
index 9ef0d88..fbac6ee 100644 (file)
         - external_network.enabled
         - aarch64
       become: yes
         - external_network.enabled
         - aarch64
       become: yes
+    - block:
+        - name: Undercloud NAT - MASQUERADE interface
+          iptables:
+            table: nat
+            chain: POSTROUTING
+            out_interface: eth0
+            jump: MASQUERADE
+        - name: Undercloud NAT - MASQUERADE interface with subnet
+          iptables:
+            table: nat
+            chain: POSTROUTING
+            out_interface: eth0
+            jump: MASQUERADE
+            source: "{{ nat_cidr }}"
+        - name: Undercloud NAT - Allow Forwarding
+          iptables:
+            chain: FORWARD
+            in_interface: eth2
+            jump: ACCEPT
+        - name: Undercloud NAT - Allow Stateful Forwarding
+          iptables:
+            chain: FORWARD
+            in_interface: eth2
+            jump: ACCEPT
+            source: "{{ nat_cidr }}"
+            ctstate: ESTABLISHED,RELATED
+        - name: Undercloud NAT - Save iptables
+          shell: service iptables save
+      become: yes
+      when:
+        - not nat_network_ipv6
+        - virtual_overcloud
     - name: fetch storage environment file
       fetch:
         src: /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
     - name: fetch storage environment file
       fetch:
         src: /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
index a8f1cd5..d0206f8 100644 (file)
@@ -26,9 +26,7 @@
         group: stack
         mode: 0644
       become: yes
         group: stack
         mode: 0644
       become: yes
-      with_items:
-        - overcloudrc
-        - overcloudrc.v3
+      with_items: "{{ overcloudrc_files }}"
     - name: Inject OS_PROJECT_ID and OS_TENANT_NAME into overcloudrc
       lineinfile:
         line: "{{ item }}"
     - name: Inject OS_PROJECT_ID and OS_TENANT_NAME into overcloudrc
       lineinfile:
         line: "{{ item }}"
@@ -74,9 +72,7 @@
       when: sdn != false
       become: yes
       become_user: stack
       when: sdn != false
       become: yes
       become_user: stack
-      with_items:
-        - overcloudrc
-        - overcloudrc.v3
+      with_items: "{{ overcloudrc_files }}"
     - name: Register OS Region
       shell: "{{ overcloudrc }} && openstack endpoint list -c Region -f json"
       register: region
     - name: Register OS Region
       shell: "{{ overcloudrc }} && openstack endpoint list -c Region -f json"
       register: region
         path: "/home/stack/{{ item }}"
       become: yes
       become_user: stack
         path: "/home/stack/{{ item }}"
       become: yes
       become_user: stack
-      with_items:
-        - overcloudrc
-        - overcloudrc.v3
-    - name: Undercloud NAT - MASQUERADE interface
-      iptables:
-        table: nat
-        chain: POSTROUTING
-        out_interface: eth0
-        jump: MASQUERADE
-      when:
-        - virtual
-        - not external_network_ipv6
-      become: yes
-    - name: Undercloud NAT - MASQUERADE interface with subnet
-      iptables:
-        table: nat
-        chain: POSTROUTING
-        out_interface: eth0
-        jump: MASQUERADE
-        source: "{{ external_cidr }}"
-      when:
-        - virtual
-        - not external_network_ipv6
-      become: yes
-    - name: Undercloud NAT - Allow Forwarding
-      iptables:
-        chain: FORWARD
-        in_interface: eth2
-        jump: ACCEPT
-      when:
-        - virtual
-        - not external_network_ipv6
-      become: yes
-    - name: Undercloud NAT - Allow Stateful Forwarding
-      iptables:
-        chain: FORWARD
-        in_interface: eth2
-        jump: ACCEPT
-        source: "{{ external_cidr }}"
-        ctstate: ESTABLISHED,RELATED
-      when:
-        - virtual
-        - not external_network_ipv6
-      become: yes
-    - name: Undercloud NAT - Save iptables
-      shell: service iptables save
-      become: yes
-      when:
-        - virtual
-        - not external_network_ipv6
+      with_items: "{{ overcloudrc_files }}"
     - name: Create congress datasources
       shell: "{{ overcloudrc }} && openstack congress datasource create {{ item }}"
       become: yes
     - name: Create congress datasources
       shell: "{{ overcloudrc }} && openstack congress datasource create {{ item }}"
       become: yes
diff --git a/lib/ansible/playbooks/prepare_overcloud_containers.yml b/lib/ansible/playbooks/prepare_overcloud_containers.yml
new file mode 100644 (file)
index 0000000..88a8df1
--- /dev/null
@@ -0,0 +1,105 @@
+---
+- hosts: all
+  tasks:
+    - name: Upload container patches archive
+      copy:
+        src: "{{ apex_temp_dir }}/docker_patches.tar.gz"
+        dest: "/home/stack/docker_patches.tar.gz"
+        owner: stack
+        group: stack
+        mode: 0644
+      when: patched_docker_services|length > 0
+    - name: Unpack container patches archive
+      unarchive:
+        src: /home/stack/docker_patches.tar.gz
+        remote_src: yes
+        list_files: yes
+        group: stack
+        owner: stack
+        dest: /home/stack/
+      when: patched_docker_services|length > 0
+    - name: Prepare generic docker registry image file
+      shell: >
+        {{ stackrc }} && openstack overcloud container image prepare
+        --namespace trunk.registry.rdoproject.org/{{ os_version }}
+        --tag {{ container_tag }}
+        --push-destination {{ undercloud_ip }}:8787
+        -e /usr/share/openstack-tripleo-heat-templates/environments/docker.yaml
+        --output-images-file overcloud_containers.yml
+      become: yes
+      become_user: stack
+    - name: Prepare SDN docker registry image file
+      shell: >
+        {{ stackrc }} && openstack overcloud container image prepare
+        --namespace trunk.registry.rdoproject.org/{{ os_version }}
+        --tag {{ container_tag }}
+        --push-destination {{ undercloud_ip }}:8787
+        -e {{ sdn_env_file }}
+        --output-images-file sdn_containers.yml
+      become: yes
+      become_user: stack
+      when: sdn != false
+    - name: Upload docker images to local registry
+      shell: >
+        {{ stackrc }} && openstack overcloud container image upload
+        --config-file /home/stack/overcloud_containers.yml
+    - name: Upload SDN docker images to local registry
+      shell: >
+        {{ stackrc }} && openstack overcloud container image upload
+        --config-file /home/stack/sdn_containers.yml
+      when: sdn != false
+    - name: Collect docker images in registry
+      uri:
+        url: http://{{ undercloud_ip }}:8787/v2/_catalog
+        body_format: json
+      register: response
+    - name: Patch Docker images
+      shell: >
+        cd /home/stack/containers/{{ item }} && docker build
+        -t {{ undercloud_ip }}:8787/{{ os_version }}/centos-binary-{{ item }}:apex .
+      when:
+        - patched_docker_services|length > 0
+        - item in (response.json)['repositories']|join(" ")
+      with_items: "{{ patched_docker_services }}"
+    - name: Push patched docker images to local registry
+      shell: docker push {{ undercloud_ip }}:8787/{{ os_version }}/centos-binary-{{ item }}:apex
+      when:
+        - patched_docker_services|length > 0
+        - item in (response.json)['repositories']|join(" ")
+      with_items: "{{ patched_docker_services }}"
+    - name: Prepare deployment generic docker image file
+      shell: >
+        {{ stackrc }} && openstack overcloud container image prepare
+        --namespace {{ undercloud_ip }}:8787/{{ os_version }}
+        --tag {{ container_tag }}
+        -e /usr/share/openstack-tripleo-heat-templates/environments/docker.yaml
+        --output-env-file docker-images.yaml
+      become: yes
+      become_user: stack
+    - name: Prepare deployment SDN docker image file
+      shell: >
+        {{ stackrc }} && openstack overcloud container image prepare
+        --namespace {{ undercloud_ip }}:8787/{{ os_version }}
+        --tag {{ container_tag }}
+        -e {{ sdn_env_file }}
+        --output-env-file sdn-images.yaml
+      when: sdn != false
+      become: yes
+      become_user: stack
+    - name: Modify Images with Apex tag
+      replace:
+        path: "{{ item[0] }}"
+        regexp: "(\\s*Docker.*?:.*?centos-binary-{{ item[1] }}):.*"
+        replace: '\1:apex'
+      with_nested:
+        - [ '/home/stack/sdn-images.yaml', '/home/stack/docker-images.yaml']
+        - "{{ patched_docker_services }}"
+    - name: Pull Ceph docker image
+      shell: docker pull {{ ceph_docker_image }}
+      become: yes
+    - name: Tag Ceph image for local registry
+      shell: docker tag {{ ceph_docker_image }} {{ undercloud_ip }}:8787/{{ ceph_docker_image }}
+      become: yes
+    - name: Push Ceph docker image to local registry
+      shell: docker push {{ undercloud_ip }}:8787/{{ ceph_docker_image }}
+      become: yes