Adds ability to deploy from upstream openstack 95/41795/25
authorTim Rozet <trozet@redhat.com>
Tue, 12 Sep 2017 21:32:56 +0000 (17:32 -0400)
committerTim Rozet <trozet@redhat.com>
Mon, 6 Nov 2017 04:35:02 +0000 (04:35 +0000)
To deploy with upstream openstack branch, use new deploy setting 'os_version'.
A default scenario file for nosdn with pike has been included in this patch.
If 'os_version' is a version other than the default version for this OPNFV
release, then upstream is used.

In order to use upstream with the current OS version use '--upstream'
argument to the deploy command, to force an upstream deployment.

Also include '-e upstream-environment.yaml' to use default upstream
deployment settings.

Supports nosdn and odl-nofeature deployments.

Change-Id: Ic07e308827b449637b4e86cdd086434e4de2fb69
Signed-off-by: Tim Rozet <trozet@redhat.com>
28 files changed:
apex/build_utils.py
apex/builders/__init__.py [new file with mode: 0644]
apex/builders/common_builder.py [new file with mode: 0644]
apex/builders/overcloud_builder.py [new file with mode: 0644]
apex/builders/undercloud_builder.py [new file with mode: 0644]
apex/common/constants.py
apex/common/utils.py
apex/deploy.py
apex/network/network_data.py [new file with mode: 0644]
apex/network/network_environment.py
apex/overcloud/deploy.py
apex/settings/deploy_settings.py
apex/tests/test_apex_common_builder.py [new file with mode: 0644]
apex/tests/test_apex_common_utils.py
apex/tests/test_apex_deploy.py
apex/tests/test_apex_network_data.py [new file with mode: 0644]
apex/tests/test_apex_overcloud_builder.py [new file with mode: 0644]
apex/tests/test_apex_undercloud_builder.py [new file with mode: 0644]
apex/undercloud/undercloud.py
build/network-environment.yaml
build/opnfv-environment.yaml
build/rpm_specs/opnfv-apex-common.spec
build/upstream-environment.yaml [new file with mode: 0644]
config/deploy/os-nosdn-pike-noha.yaml [new file with mode: 0644]
config/deploy/os-odl-pike-noha.yaml [new file with mode: 0644]
lib/ansible/playbooks/deploy_overcloud.yml
requirements.txt
setup.cfg

index 66a63d3..c9d8472 100644 (file)
@@ -16,9 +16,41 @@ import re
 import shutil
 import sys
 
+from apex.common import constants as con
 from urllib.parse import quote_plus
 
 
+def get_change(url, repo, branch, change_id):
+    """
+    Fetches a change from upstream repo
+    :param url: URL of upstream gerrit
+    :param repo: name of repo
+    :param branch: branch of repo
+    :param change_id: SHA change id
+    :return: change if found and not abandoned, closed, or merged
+    """
+    rest = GerritRestAPI(url=url)
+    change_path = "{}~{}~{}".format(quote_plus(repo), quote_plus(branch),
+                                    change_id)
+    change_str = "changes/{}?o=CURRENT_REVISION".format(change_path)
+    change = rest.get(change_str)
+    try:
+        assert change['status'] not in 'ABANDONED' 'CLOSED', \
+            'Change {} is in {} state'.format(change_id, change['status'])
+        if change['status'] == 'MERGED':
+            logging.info('Change {} is merged, ignoring...'
+                         .format(change_id))
+            return None
+        else:
+            return change
+
+    except KeyError:
+        logging.error('Failed to get valid change data structure from url '
+                      '{}/{}, data returned: \n{}'
+                      .format(change_id, change_str, change))
+        raise
+
+
 def clone_fork(args):
     ref = None
     logging.info("Cloning {}".format(args.repo))
@@ -36,26 +68,11 @@ def clone_fork(args):
     if m:
         change_id = m.group(1)
         logging.info("Using change ID {} from {}".format(change_id, args.repo))
-        rest = GerritRestAPI(url=args.url)
-        change_path = "{}~{}~{}".format(args.repo, quote_plus(args.branch),
-                                        change_id)
-        change_str = "changes/{}?o=CURRENT_REVISION".format(change_path)
-        change = rest.get(change_str)
-        try:
-            assert change['status'] not in 'ABANDONED' 'CLOSED',\
-                'Change {} is in {} state'.format(change_id, change['status'])
-            if change['status'] == 'MERGED':
-                logging.info('Change {} is merged, ignoring...'
-                             .format(change_id))
-            else:
-                current_revision = change['current_revision']
-                ref = change['revisions'][current_revision]['ref']
-                logging.info('setting ref to {}'.format(ref))
-        except KeyError:
-            logging.error('Failed to get valid change data structure from url '
-                          '{}/{}, data returned: \n{}'
-                          .format(change_id, change_str, change))
-            raise
+        change = get_change(args.url, args.repo, args.branch, change_id)
+        if change:
+            current_revision = change['current_revision']
+            ref = change['revisions'][current_revision]['ref']
+            logging.info('setting ref to {}'.format(ref))
 
     # remove existing file or directory named repo
     if os.path.exists(args.repo):
@@ -73,6 +90,19 @@ def clone_fork(args):
         logging.info('Checked out commit:\n{}'.format(ws.head.commit.message))
 
 
+def get_patch(change_id, repo, branch, url=con.OPENSTACK_GERRIT):
+    logging.info("Fetching patch for change id {}".format(change_id))
+    change = get_change(url, repo, branch, change_id)
+    if change:
+        current_revision = change['current_revision']
+        rest = GerritRestAPI(url=url)
+        change_path = "{}~{}~{}".format(quote_plus(repo), quote_plus(branch),
+                                        change_id)
+        patch_url = "changes/{}/revisions/{}/patch".format(change_path,
+                                                           current_revision)
+        return rest.get(patch_url)
+
+
 def get_parser():
     parser = argparse.ArgumentParser()
     parser.add_argument('--debug', action='store_true', default=False,
diff --git a/apex/builders/__init__.py b/apex/builders/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apex/builders/common_builder.py b/apex/builders/common_builder.py
new file mode 100644 (file)
index 0000000..101860c
--- /dev/null
@@ -0,0 +1,111 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Common building utilities for undercloud and overcloud
+
+import git
+import logging
+import os
+
+from apex import build_utils
+from apex.common import constants as con
+from apex.virtual import utils as virt_utils
+
+
+def project_to_path(project):
+    """
+    Translates project to absolute file path
+    :param project: name of project
+    :return: File path
+    """
+    if project.startswith('openstack/'):
+        project = os.path.basename(project)
+    if 'puppet' in project:
+        return "/etc/puppet/modules/{}".format(project.replace('puppet-', ''))
+    elif 'tripleo-heat-templates' in project:
+        return "/usr/share/openstack-tripleo-heat-templates"
+    else:
+        # assume python
+        return "/usr/lib/python2.7/site-packages/{}".format(project)
+
+
+def add_upstream_patches(patches, image, tmp_dir,
+                         default_branch=os.path.join('stable',
+                                                     con.DEFAULT_OS_VERSION)):
+    """
+    Adds patches from upstream OpenStack gerrit to Undercloud for deployment
+    :param patches: list of patches
+    :param image: undercloud image
+    :param tmp_dir: to store temporary patch files
+    :param default_branch: default branch to fetch commit (if not specified
+    in patch)
+    :return: None
+    """
+    virt_ops = list()
+    logging.debug("Evaluating upstream patches:\n{}".format(patches))
+    for patch in patches:
+        assert isinstance(patch, dict)
+        assert all(i in patch.keys() for i in ['project', 'change-id'])
+        if 'branch' in patch.keys():
+            branch = patch['branch']
+        else:
+            branch = default_branch
+        patch_diff = build_utils.get_patch(patch['change-id'],
+                                           patch['project'], branch)
+        if patch_diff:
+            patch_file = "{}.patch".format(patch['change-id'])
+            patch_file_path = os.path.join(tmp_dir, patch_file)
+            with open(patch_file_path, 'w') as fh:
+                fh.write(patch_diff)
+            project_path = project_to_path(patch['project'])
+            virt_ops.extend([
+                {con.VIRT_UPLOAD: "{}:{}".format(patch_file_path,
+                                                 project_path)},
+                {con.VIRT_RUN_CMD: "cd {} && patch -p1 < {}".format(
+                    project_path, patch_file)}])
+            logging.info("Adding patch {} to {}".format(patch_file,
+                                                        image))
+        else:
+            logging.info("Ignoring patch:\n{}".format(patch))
+    if virt_ops:
+        virt_utils.virt_customize(virt_ops, image)
+
+
+def add_repo(repo_url, repo_name, image, tmp_dir):
+    assert repo_name is not None
+    assert repo_url is not None
+    repo_file = "{}.repo".format(repo_name)
+    repo_file_path = os.path.join(tmp_dir, repo_file)
+    content = [
+        "[{}]".format(repo_name),
+        "name={}".format(repo_name),
+        "baseurl={}".format(repo_url),
+        "gpgcheck=0"
+    ]
+    logging.debug("Creating repo file {}".format(repo_name))
+    with open(repo_file_path, 'w') as fh:
+        fh.writelines("{}\n".format(line) for line in content)
+    logging.debug("Adding repo {} to {}".format(repo_file, image))
+    virt_utils.virt_customize([
+        {con.VIRT_UPLOAD: "{}:/etc/yum.repos.d/".format(repo_file_path)}],
+        image
+    )
+
+
+def create_git_archive(repo_url, repo_name, tmp_dir,
+                       branch='master', prefix=''):
+    repo = git.Repo.clone_from(repo_url, os.path.join(tmp_dir, repo_name))
+    repo_git = repo.git
+    if branch != str(repo.active_branch):
+        repo_git.checkout("origin/{}".format(branch))
+    archive_path = os.path.join(tmp_dir, "{}.tar".format(repo_name))
+    with open(archive_path, 'wb') as fh:
+        repo.archive(fh, prefix=prefix)
+    logging.debug("Wrote archive file: {}".format(archive_path))
+    return archive_path
diff --git a/apex/builders/overcloud_builder.py b/apex/builders/overcloud_builder.py
new file mode 100644 (file)
index 0000000..e7b0796
--- /dev/null
@@ -0,0 +1,45 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Used to modify overcloud qcow2 image
+
+import logging
+
+from apex.builders import common_builder as c_builder
+from apex.common import constants as con
+from apex.virtual import utils as virt_utils
+
+
+def inject_opendaylight(odl_version, image, tmp_dir):
+    assert odl_version in con.VALID_ODL_VERSIONS
+    # add repo
+    if odl_version == 'master':
+        odl_pkg_version = con.VALID_ODL_VERSIONS[-2]
+        branch = odl_version
+    else:
+        odl_pkg_version = odl_version
+        branch = "stable/{}".format(odl_version)
+    odl_url = "https://nexus.opendaylight.org/content/repositories" \
+              "/opendaylight-{}-epel-7-x86_64-devel/".format(odl_pkg_version)
+    repo_name = "opendaylight-{}".format(odl_pkg_version)
+    c_builder.add_repo(odl_url, repo_name, image, tmp_dir)
+    # download puppet-opendaylight
+    archive = c_builder.create_git_archive(
+        repo_url=con.PUPPET_ODL_URL, repo_name='puppet-opendaylight',
+        tmp_dir=tmp_dir, branch=branch, prefix='opendaylight/')
+    # install ODL, puppet-odl
+    virt_ops = [
+        {con.VIRT_INSTALL: 'opendaylight'},
+        {con.VIRT_UPLOAD: "{}:/etc/puppet/modules/".format(archive)},
+        {con.VIRT_RUN_CMD: 'rm -rf /etc/puppet/modules/opendaylight'},
+        {con.VIRT_RUN_CMD: "cd /etc/puppet/modules/ && tar xvf "
+                           "puppet-opendaylight.tar"}
+    ]
+    virt_utils.virt_customize(virt_ops, image)
+    logging.info("OpenDaylight injected into {}".format(image))
diff --git a/apex/builders/undercloud_builder.py b/apex/builders/undercloud_builder.py
new file mode 100644 (file)
index 0000000..baba8a5
--- /dev/null
@@ -0,0 +1,38 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Used to modify undercloud qcow2 image
+
+from apex.common import constants as con
+from apex.virtual import utils as virt_utils
+
+
+def add_upstream_packages(image):
+    """
+    Adds required base upstream packages to Undercloud for deployment
+    :param image:
+    :return: None
+    """
+    virt_ops = list()
+    pkgs = [
+        'openstack-utils',
+        'ceph-common',
+        'python2-networking-sfc',
+        'openstack-ironic-inspector',
+        'subunit-filters',
+        'docker-distribution',
+        'openstack-tripleo-validations',
+        'libguestfs-tools',
+    ]
+
+    for pkg in pkgs:
+        virt_ops.append({con.VIRT_INSTALL: pkg})
+    virt_utils.virt_customize(virt_ops, image)
+
+# TODO(trozet): add rest of build for undercloud here as well
index 0df7152..943e322 100644 (file)
@@ -33,14 +33,20 @@ DEFAULT_ROOT_DEV = 'sda'
 LIBVIRT_VOLUME_PATH = '/var/lib/libvirt/images'
 
 VIRT_UPLOAD = '--upload'
-VIRT_INSTALL = '-install'
+VIRT_INSTALL = '--install'
 VIRT_RUN_CMD = '--run-command'
 VIRT_PW = '--root-password'
 
 THT_DIR = '/usr/share/openstack-tripleo-heat-templates'
 THT_ENV_DIR = os.path.join(THT_DIR, 'environments')
 
+DEFAULT_OS_VERSION = 'ocata'
 DEFAULT_ODL_VERSION = 'carbon'
+VALID_ODL_VERSIONS = ['carbon', 'nitrogen', 'oxygen', 'master']
+PUPPET_ODL_URL = 'https://git.opendaylight.org/gerrit/integration/packaging' \
+                 '/puppet-opendaylight'
 DEBUG_OVERCLOUD_PW = 'opnfvapex'
 NET_ENV_FILE = 'network-environment.yaml'
 DEPLOY_TIMEOUT = 90
+UPSTREAM_RDO = 'https://images.rdoproject.org/ocata/delorean/current-tripleo/'
+OPENSTACK_GERRIT = 'https://review.openstack.org'
index f418d42..13250a4 100644 (file)
@@ -7,11 +7,17 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
+import datetime
 import json
 import logging
 import os
 import pprint
 import subprocess
+import tarfile
+import time
+import urllib.error
+import urllib.request
+import urllib.parse
 import yaml
 
 
@@ -129,3 +135,60 @@ def run_ansible(ansible_vars, playbook, host='localhost', user='root',
         e = "Ansible playbook failed. See Ansible logs for details."
         logging.error(e)
         raise Exception(e)
+
+
+def fetch_upstream_and_unpack(dest, url, targets):
+    """
+    Fetches targets from a url destination and downloads them if they are
+    newer.  Also unpacks tar files in dest dir.
+    :param dest: Directory to download and unpack files to
+    :param url: URL where target files are located
+    :param targets: List of target files to download
+    :return: None
+    """
+    os.makedirs(dest, exist_ok=True)
+    assert isinstance(targets, list)
+    for target in targets:
+        download_target = True
+        target_url = urllib.parse.urljoin(url, target)
+        target_dest = os.path.join(dest, target)
+        logging.debug("Fetching and comparing upstream target: \n{}".format(
+            target_url))
+        try:
+            u = urllib.request.urlopen(target_url)
+        except urllib.error.URLError as e:
+            logging.error("Failed to fetch target url. Error: {}".format(
+                e.reason))
+            raise
+        if os.path.isfile(target_dest):
+            logging.debug("Previous file found: {}".format(target_dest))
+            metadata = u.info()
+            headers = metadata.items()
+            target_url_date = None
+            for header in headers:
+                if isinstance(header, tuple) and len(header) == 2:
+                    if header[0] == 'Last-Modified':
+                        target_url_date = header[1]
+                        break
+            if target_url_date is not None:
+                target_dest_mtime = os.path.getmtime(target_dest)
+                target_url_mtime = time.mktime(
+                    datetime.datetime.strptime(target_url_date,
+                                               "%a, %d %b %Y %X "
+                                               "GMT").timetuple())
+                if target_url_mtime > target_dest_mtime:
+                    logging.debug('URL target is newer than disk...will '
+                                  'download')
+                else:
+                    logging.info('URL target does not need to be downloaded')
+                    download_target = False
+            else:
+                logging.debug('Unable to find last modified url date')
+        if download_target:
+            urllib.request.urlretrieve(target_url, filename=target_dest)
+            logging.info("Target downloaded: {}".format(target))
+        if target.endswith('.tar'):
+            logging.info('Unpacking tar file')
+            tar = tarfile.open(target_dest)
+            tar.extractall(path=dest)
+            tar.close()
index 4b1ef85..5485d15 100644 (file)
@@ -25,11 +25,15 @@ from apex import DeploySettings
 from apex import Inventory
 from apex import NetworkEnvironment
 from apex import NetworkSettings
+from apex.builders import common_builder as c_builder
+from apex.builders import overcloud_builder as oc_builder
+from apex.builders import undercloud_builder as uc_builder
 from apex.common import utils
 from apex.common import constants
 from apex.common import parsers
 from apex.common.exceptions import ApexDeployException
 from apex.network import jumphost
+from apex.network import network_data
 from apex.undercloud import undercloud as uc_lib
 from apex.overcloud import config as oc_cfg
 from apex.overcloud import deploy as oc_deploy
@@ -120,7 +124,8 @@ def create_deploy_parser():
     deploy_parser.add_argument('-e', '--environment-file',
                                dest='env_file',
                                default='opnfv-environment.yaml',
-                               help='Provide alternate base env file')
+                               help='Provide alternate base env file located '
+                                    'in deploy_dir')
     deploy_parser.add_argument('-v', '--virtual', action='store_true',
                                default=False,
                                dest='virtual',
@@ -172,6 +177,10 @@ def create_deploy_parser():
     deploy_parser.add_argument('--quickstart', action='store_true',
                                default=False,
                                help='Use tripleo-quickstart to deploy')
+    deploy_parser.add_argument('--upstream', action='store_true',
+                               default=False,
+                               help='Force deployment to use upstream '
+                                    'artifacts')
     return deploy_parser
 
 
@@ -233,8 +242,10 @@ def main():
     net_settings = NetworkSettings(args.network_settings_file)
     logging.info("Network settings are:\n {}".format(pprint.pformat(
                  net_settings)))
+    os_version = deploy_settings['deploy_options']['os_version']
     net_env_file = os.path.join(args.deploy_dir, constants.NET_ENV_FILE)
-    net_env = NetworkEnvironment(net_settings, net_env_file)
+    net_env = NetworkEnvironment(net_settings, net_env_file,
+                                 os_version=os_version)
     net_env_target = os.path.join(APEX_TEMP_DIR, constants.NET_ENV_FILE)
     utils.dump_yaml(dict(net_env), net_env_target)
     ha_enabled = deploy_settings['global_params']['ha_enabled']
@@ -268,7 +279,7 @@ def main():
     inventory = Inventory(args.inventory_file, ha_enabled, args.virtual)
 
     validate_cross_settings(deploy_settings, net_settings, inventory)
-
+    ds_opts = deploy_settings['deploy_options']
     if args.quickstart:
         deploy_settings_file = os.path.join(APEX_TEMP_DIR,
                                             'apex_deploy_settings.yaml')
@@ -312,7 +323,6 @@ def main():
                         'members'][0]
                 bridge = "br-{}".format(network)
                 jumphost.attach_interface_to_ovs(bridge, iface, network)
-        # Dump all settings out to temp bash files to be sourced
         instackenv_json = os.path.join(APEX_TEMP_DIR, 'instackenv.json')
         with open(instackenv_json, 'w') as fh:
             json.dump(inventory, fh)
@@ -322,10 +332,52 @@ def main():
             root_pw = constants.DEBUG_OVERCLOUD_PW
         else:
             root_pw = None
+
+        upstream = (os_version != constants.DEFAULT_OS_VERSION or
+                    args.upstream)
+        if os_version == 'master':
+            branch = 'master'
+        else:
+            branch = "stable/{}".format(os_version)
+        if upstream:
+            logging.info("Deploying with upstream artifacts for OpenStack "
+                         "{}".format(os_version))
+            args.image_dir = os.path.join(args.image_dir, os_version)
+            upstream_url = constants.UPSTREAM_RDO.replace(
+                constants.DEFAULT_OS_VERSION, os_version)
+            upstream_targets = ['overcloud-full.tar', 'undercloud.qcow2']
+            utils.fetch_upstream_and_unpack(args.image_dir, upstream_url,
+                                            upstream_targets)
+            sdn_image = os.path.join(args.image_dir, 'overcloud-full.qcow2')
+            if ds_opts['sdn_controller'] == 'opendaylight':
+                logging.info("Preparing upstream image with OpenDaylight")
+                oc_builder.inject_opendaylight(
+                    odl_version=ds_opts['odl_version'],
+                    image=sdn_image,
+                    tmp_dir=APEX_TEMP_DIR
+                )
+            # copy undercloud so we don't taint upstream fetch
+            uc_image = os.path.join(args.image_dir, 'undercloud_mod.qcow2')
+            uc_fetch_img = os.path.join(args.image_dir, 'undercloud.qcow2')
+            shutil.copyfile(uc_fetch_img, uc_image)
+            # prep undercloud with required packages
+            uc_builder.add_upstream_packages(uc_image)
+            # add patches from upstream to undercloud and overcloud
+            logging.info('Adding patches to undercloud')
+            patches = deploy_settings['global_params']['patches']
+            c_builder.add_upstream_patches(patches['undercloud'], uc_image,
+                                           APEX_TEMP_DIR, branch)
+            logging.info('Adding patches to overcloud')
+            c_builder.add_upstream_patches(patches['overcloud'], sdn_image,
+                                           APEX_TEMP_DIR, branch)
+        else:
+            sdn_image = os.path.join(args.image_dir, SDN_IMAGE)
+            uc_image = 'undercloud.qcow2'
         undercloud = uc_lib.Undercloud(args.image_dir,
                                        args.deploy_dir,
                                        root_pw=root_pw,
-                                       external_network=uc_external)
+                                       external_network=uc_external,
+                                       image_name=os.path.basename(uc_image))
         undercloud.start()
 
         # Generate nic templates
@@ -340,15 +392,34 @@ def main():
 
         # Prepare overcloud-full.qcow2
         logging.info("Preparing Overcloud for deployment...")
-        sdn_image = os.path.join(args.image_dir, SDN_IMAGE)
-        oc_deploy.prep_image(deploy_settings, sdn_image, APEX_TEMP_DIR,
-                             root_pw=root_pw)
+        if os_version != 'ocata':
+            net_data_file = os.path.join(APEX_TEMP_DIR, 'network_data.yaml')
+            net_data = network_data.create_network_data(net_settings,
+                                                        net_data_file)
+        else:
+            net_data = False
+        if upstream and args.env_file == 'opnfv-environment.yaml':
+            # Override the env_file if it is defaulted to opnfv
+            # opnfv env file will not work with upstream
+            args.env_file = 'upstream-environment.yaml'
         opnfv_env = os.path.join(args.deploy_dir, args.env_file)
-        oc_deploy.prep_env(deploy_settings, net_settings, inventory,
-                           opnfv_env, net_env_target, APEX_TEMP_DIR)
-        oc_deploy.create_deploy_cmd(deploy_settings, net_settings,
-                                    inventory, APEX_TEMP_DIR,
-                                    args.virtual, args.env_file)
+        if not upstream:
+            oc_deploy.prep_env(deploy_settings, net_settings, inventory,
+                               opnfv_env, net_env_target, APEX_TEMP_DIR)
+            oc_deploy.prep_image(deploy_settings, sdn_image, APEX_TEMP_DIR,
+                                 root_pw=root_pw)
+        else:
+            shutil.copyfile(sdn_image, os.path.join(APEX_TEMP_DIR,
+                                                    'overcloud-full.qcow2'))
+            shutil.copyfile(
+                opnfv_env,
+                os.path.join(APEX_TEMP_DIR, os.path.basename(opnfv_env))
+            )
+
+        oc_deploy.create_deploy_cmd(deploy_settings, net_settings, inventory,
+                                    APEX_TEMP_DIR, args.virtual,
+                                    os.path.basename(opnfv_env),
+                                    net_data=net_data)
         deploy_playbook = os.path.join(args.lib_dir, ANSIBLE_PATH,
                                        'deploy_overcloud.yml')
         virt_env = 'virtual-environment.yaml'
@@ -365,8 +436,11 @@ def main():
         deploy_vars['aarch64'] = platform.machine() == 'aarch64'
         deploy_vars['dns_server_args'] = ''
         deploy_vars['apex_temp_dir'] = APEX_TEMP_DIR
+        deploy_vars['apex_env_file'] = os.path.basename(opnfv_env)
         deploy_vars['stackrc'] = 'source /home/stack/stackrc'
         deploy_vars['overcloudrc'] = 'source /home/stack/overcloudrc'
+        deploy_vars['upstream'] = upstream
+        deploy_vars['os_version'] = os_version
         for dns_server in net_settings['dns_servers']:
             deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
                 dns_server)
@@ -393,7 +467,6 @@ def main():
         deploy_vars['external_network_cmds'] = \
             oc_deploy.external_network_cmds(net_settings)
         # TODO(trozet): just parse all ds_opts as deploy vars one time
-        ds_opts = deploy_settings['deploy_options']
         deploy_vars['gluon'] = ds_opts['gluon']
         deploy_vars['sdn'] = ds_opts['sdn_controller']
         for dep_option in 'yardstick', 'dovetail', 'vsperf':
@@ -459,5 +532,7 @@ def main():
         logging.info("Undercloud IP: {}, please connect by doing "
                      "'opnfv-util undercloud'".format(undercloud.ip))
         # TODO(trozet): add logging here showing controller VIP and horizon url
+
+
 if __name__ == '__main__':
     main()
diff --git a/apex/network/network_data.py b/apex/network/network_data.py
new file mode 100644 (file)
index 0000000..1177af0
--- /dev/null
@@ -0,0 +1,104 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import copy
+import logging
+import pprint
+
+from apex.common import utils
+from apex.common.constants import (
+    ADMIN_NETWORK,
+    TENANT_NETWORK,
+    STORAGE_NETWORK,
+    EXTERNAL_NETWORK,
+    API_NETWORK
+)
+from apex import NetworkSettings
+
+
+class NetworkDataException(Exception):
+    pass
+
+
+def create_network_data(ns, target=None):
+    """
+    Creates network data file for deployments
+    :param ns: Network Settings
+    :param target: Target file to write
+    :return: list of networks and properties
+    """
+    network_data = list()
+    if not isinstance(ns, NetworkSettings):
+        raise NetworkDataException('Invalid network settings given')
+
+    nets = ns['networks']
+
+    # TODO(trozet) change this all to be dynamic after TripleO bug
+    # https://bugs.launchpad.net/tripleo/+bug/1720849 is fixed
+
+    for net in nets.keys():
+        if net == ADMIN_NETWORK:
+            # we dont need to add ctlplane network to network data
+            continue
+        elif net == EXTERNAL_NETWORK:
+            network = nets[net][0]
+            net_name = net.title()
+            net_lower = net.lower()
+        elif net == API_NETWORK:
+            network = nets[net]
+            net_name = 'InternalApi'
+            net_lower = 'internal_api'
+        else:
+            network = nets[net]
+            net_name = net.title()
+            net_lower = net.lower()
+        # TODO(trozet): add ipv6 support
+        tmp_net = {'name': net_name,
+                   'name_lower': net_lower,
+                   'vip': net != TENANT_NETWORK,
+                   'enabled': net in ns.enabled_network_list}
+        if 'gateway' in network:
+            tmp_net['gateway_ip'] = str(network['gateway'])
+        if 'overcloud_ip_range' in network:
+            net_range = network['overcloud_ip_range']
+            tmp_net['allocation_pools'] = [{'start': str(net_range[0]),
+                                           'end': str(net_range[1])}]
+        elif tmp_net['enabled']:
+            logging.error("overcloud ip range is missing and must be provided "
+                          "in network settings when network is enabled for "
+                          "network {}".format(net))
+            raise NetworkDataException("overcloud_ip_range missing from "
+                                       "network: {}".format(net))
+        if 'cidr' in network:
+            tmp_net['ip_subnet'] = str(network['cidr'])
+        elif tmp_net['enabled']:
+            logging.error("cidr is missing and must be provided in network "
+                          "settings when network is enabled for network "
+                          "{}".format(net))
+            raise NetworkDataException("cidr is null for network {}".format(
+                net))
+
+        network_data.append(copy.deepcopy(tmp_net))
+
+    # have to do this due to the aforementioned bug
+    storage_mgmt_net = {
+        'name': 'StorageMgmt',
+        'enabled': False,
+        'name_lower': 'storage_mgmt',
+        'ip_subnet': '172.16.3.0/24',
+        'allocation_pools': [{'start': '172.16.3.4', 'end': '172.16.3.250'}],
+        'vip': True,
+    }
+    network_data.append(storage_mgmt_net)
+    if target:
+        logging.debug("Writing network data to {}".format(target))
+        utils.dump_yaml(network_data, target)
+    logging.debug("Network data parsed as:\n "
+                  "{}".format(pprint.pformat(network_data)))
+    return network_data
index c2e9991..ea71e0f 100644 (file)
@@ -8,7 +8,6 @@
 ##############################################################################
 
 import re
-
 import yaml
 
 from apex.settings.network_settings import NetworkSettings
@@ -19,7 +18,8 @@ from apex.common.constants import (
     TENANT_NETWORK,
     STORAGE_NETWORK,
     EXTERNAL_NETWORK,
-    API_NETWORK
+    API_NETWORK,
+    DEFAULT_OS_VERSION,
 )
 
 HEAT_NONE = 'OS::Heat::None'
@@ -40,6 +40,12 @@ API_RESOURCES = {'OS::TripleO::Network::InternalApi': None,
                  'OS::TripleO::Network::Ports::InternalApiVipPort': PORTS,
                  'OS::TripleO::Controller::Ports::InternalApiPort': PORTS,
                  'OS::TripleO::Compute::Ports::InternalApiPort': PORTS}
+STORAGE_MGMT_RESOURCES = {
+    'OS::TripleO::Network::StorageMgmt': None,
+    'OS::TripleO::Network::Ports::StorageMgmtVipPort': PORTS,
+    'OS::TripleO::Controller::Ports::StorageMgmtPort': PORTS,
+    'OS::TripleO::Compute::Ports::StorageMgmtPort': PORTS
+}
 
 # A list of flags that will be set to true when IPv6 is enabled
 IPV6_FLAGS = ["NovaIPv6", "MongoDbIPv6", "CorosyncIPv6", "CephIPv6",
@@ -58,23 +64,20 @@ class NetworkEnvironment(dict):
     based on a NetworkSettings object.
     """
     def __init__(self, net_settings, filename, compute_pre_config=False,
-                 controller_pre_config=False):
+                 controller_pre_config=False, os_version=DEFAULT_OS_VERSION):
         """
         Create Network Environment according to Network Settings
         """
         init_dict = {}
+        if not isinstance(net_settings, NetworkSettings):
+            raise NetworkEnvException('Invalid Network Settings object')
         if isinstance(filename, str):
             with open(filename, 'r') as net_env_fh:
                 init_dict = yaml.safe_load(net_env_fh)
-
         super().__init__(init_dict)
-        if not isinstance(net_settings, NetworkSettings):
-            raise NetworkEnvException('Invalid Network Settings object')
-
         self._set_tht_dir()
-
         nets = net_settings['networks']
-
+        self.os_version = os_version
         admin_cidr = nets[ADMIN_NETWORK]['cidr']
         admin_prefix = str(admin_cidr.prefixlen)
         self[param_def]['ControlPlaneSubnetCidr'] = admin_prefix
@@ -173,6 +176,9 @@ class NetworkEnvironment(dict):
         # apply resource registry update for API_RESOURCES
         self._config_resource_reg(API_RESOURCES, postfix)
 
+        if self.os_version != 'ocata':
+            self._config_resource_reg(STORAGE_MGMT_RESOURCES, '/noop.yaml')
+
         # Set IPv6 related flags to True. Not that we do not set those to False
         # when IPv4 is configured, we'll use the default or whatever the user
         # may have set.
@@ -204,7 +210,10 @@ class NetworkEnvironment(dict):
         for key, prefix in resources.items():
             if prefix is None:
                 if postfix == '/noop.yaml':
-                    self[reg][key] = HEAT_NONE
+                    if self.os_version == 'ocata':
+                        self[reg][key] = HEAT_NONE
+                    else:
+                        del self[reg][key]
                     continue
                 prefix = ''
             self[reg][key] = self.tht_dir + prefix + postfix
index 495743b..3ddb5f4 100644 (file)
@@ -93,10 +93,14 @@ def build_sdn_env_list(ds, sdn_map, env_list=None):
 
 
 def create_deploy_cmd(ds, ns, inv, tmp_dir,
-                      virtual, env_file='opnfv-environment.yaml'):
+                      virtual, env_file='opnfv-environment.yaml',
+                      net_data=False):
 
     logging.info("Creating deployment command")
-    deploy_options = [env_file, 'network-environment.yaml']
+    deploy_options = ['network-environment.yaml']
+
+    if env_file:
+        deploy_options.append(env_file)
     ds_opts = ds['deploy_options']
     deploy_options += build_sdn_env_list(ds_opts, SDN_FILE_MAP)
 
@@ -133,6 +137,8 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir,
     cmd += " --control-scale {}".format(num_control)
     cmd += " --compute-scale {}".format(num_compute)
     cmd += ' --control-flavor control --compute-flavor compute'
+    if net_data:
+        cmd += ' --networks-file network_data.yaml'
     logging.info("Deploy command set: {}".format(cmd))
 
     with open(os.path.join(tmp_dir, 'deploy_command'), 'w') as fh:
@@ -356,7 +362,7 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
         perf = False
 
     # Modify OPNFV environment
-    # TODO: Change to build a dict and outputing yaml rather than parsing
+    # TODO: Change to build a dict and outputting yaml rather than parsing
     for line in fileinput.input(tmp_opnfv_env, inplace=True):
         line = line.strip('\n')
         output_line = line
@@ -370,6 +376,14 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
             output_line += key_out
         elif 'replace_public_key' in line:
             output_line = "    public_key: '{}'".format(public_key)
+        elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
+                'resource_registry' in line:
+            output_line = "resource_registry:\n" \
+                          "  OS::TripleO::NodeUserData: first-boot.yaml"
+        elif 'ComputeExtraConfigPre' in line and \
+                ds_opts['dataplane'] == 'ovs_dpdk':
+            output_line = '  OS::TripleO::ComputeExtraConfigPre: ' \
+                          './ovs-dpdk-preconfig.yaml'
 
         if ds_opts['sdn_controller'] == 'opendaylight' and \
                 'odl_vpp_routing_node' in ds_opts:
@@ -430,46 +444,32 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
                     if perf_line:
                         output_line = ("  {}:{}".format(cfg, perf_line))
 
-            # kernel args
-            # (FIXME) use compute's kernel settings for all nodes for now.
-            if 'ComputeKernelArgs' in line and perf_kern_comp:
-                kernel_args = ''
-                for k, v in perf_kern_comp.items():
-                    kernel_args += "{}={} ".format(k, v)
-                if kernel_args:
-                    output_line = "  ComputeKernelArgs: '{}'".\
-                        format(kernel_args)
             if ds_opts['dataplane'] == 'ovs_dpdk' and perf_ovs_comp:
                 for k, v in OVS_PERF_MAP.items():
                     if k in line and v in perf_ovs_comp:
                         output_line = "  {}: '{}'".format(k, perf_ovs_comp[v])
 
+            # kernel args
+            # (FIXME) use compute's kernel settings for all nodes for now.
+            if perf_kern_comp:
+                if 'NovaSchedulerDefaultFilters' in line:
+                    output_line = \
+                        "  NovaSchedulerDefaultFilters: 'RamFilter," \
+                        "ComputeFilter,AvailabilityZoneFilter," \
+                        "ComputeCapabilitiesFilter," \
+                        "ImagePropertiesFilter,NUMATopologyFilter'"
+                elif 'ComputeKernelArgs' in line:
+                    kernel_args = ''
+                    for k, v in perf_kern_comp.items():
+                        kernel_args += "{}={} ".format(k, v)
+                    if kernel_args:
+                        output_line = "  ComputeKernelArgs: '{}'".\
+                            format(kernel_args)
+
         print(output_line)
 
     logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
 
-    # Modify Network environment
-    for line in fileinput.input(net_env, inplace=True):
-        line = line.strip('\n')
-        if 'ComputeExtraConfigPre' in line and \
-                ds_opts['dataplane'] == 'ovs_dpdk':
-            print('  OS::TripleO::ComputeExtraConfigPre: '
-                  './ovs-dpdk-preconfig.yaml')
-        elif ((perf and perf_kern_comp) or ds_opts.get('rt_kvm')) and \
-                'resource_registry' in line:
-            print("resource_registry:\n"
-                  "  OS::TripleO::NodeUserData: first-boot.yaml")
-        elif perf and perf_kern_comp and \
-                'NovaSchedulerDefaultFilters' in line:
-            print("  NovaSchedulerDefaultFilters: 'RamFilter,"
-                  "ComputeFilter,AvailabilityZoneFilter,"
-                  "ComputeCapabilitiesFilter,ImagePropertiesFilter,"
-                  "NUMATopologyFilter'")
-        else:
-            print(line)
-
-    logging.info("network-environment file written to {}".format(net_env))
-
 
 def generate_ceph_key():
     key = os.urandom(16)
index c059405..f2012b2 100644 (file)
@@ -10,7 +10,6 @@
 
 import yaml
 
-from apex.common import utils
 from apex.common import constants
 
 REQ_DEPLOY_SETTINGS = ['sdn_controller',
@@ -23,7 +22,8 @@ REQ_DEPLOY_SETTINGS = ['sdn_controller',
                        'vpp',
                        'ceph',
                        'gluon',
-                       'rt_kvm']
+                       'rt_kvm',
+                       'os_version']
 
 OPT_DEPLOY_SETTINGS = ['performance',
                        'vsperf',
@@ -39,7 +39,8 @@ OPT_DEPLOY_SETTINGS = ['performance',
 VALID_ROLES = ['Controller', 'Compute', 'ObjectStorage']
 VALID_PERF_OPTS = ['kernel', 'nova', 'vpp', 'ovs']
 VALID_DATAPLANES = ['ovs', 'ovs_dpdk', 'fdio']
-VALID_ODL_VERSIONS = ['carbon', 'nitrogen', 'oxygen', 'master']
+REQ_PATCH_CRITERIA = ['change-id', 'project']
+OPT_PATCH_CRITERIA = ['branch']
 
 
 class DeploySettings(dict):
@@ -104,10 +105,13 @@ class DeploySettings(dict):
                 elif req_set == 'odl_version':
                     self['deploy_options'][req_set] = \
                         constants.DEFAULT_ODL_VERSION
+                elif req_set == 'os_version':
+                    self['deploy_options'][req_set] = \
+                        constants.DEFAULT_OS_VERSION
                 else:
                     self['deploy_options'][req_set] = False
             elif req_set == 'odl_version' and self['deploy_options'][
-                    'odl_version'] not in VALID_ODL_VERSIONS:
+                    'odl_version'] not in constants.VALID_ODL_VERSIONS:
                 raise DeploySettingsException(
                     "Invalid ODL version: {}".format(self[deploy_options][
                         'odl_version']))
@@ -137,11 +141,30 @@ class DeploySettings(dict):
                                                           " ".join(
                                                               VALID_PERF_OPTS)
                                                       ))
+        # validate global params
+        if 'ha_enabled' not in self['global_params']:
+
+            raise DeploySettingsException('ha_enabled is missing in global '
+                                          'parameters of deploy settings file')
+        if 'patches' not in self['global_params']:
+            self['global_params']['patches'] = dict()
+        for node in ('undercloud', 'overcloud'):
+            if node not in self['global_params']['patches']:
+                self['global_params']['patches'][node] = list()
+            else:
+                patches = self['global_params']['patches'][node]
+                assert isinstance(patches, list)
+                for patch in patches:
+                    assert isinstance(patch, dict)
+                    # Assert all required criteria exists for each patch
+                    assert all(i in patch.keys() for i in REQ_PATCH_CRITERIA)
+                    patch_criteria = REQ_PATCH_CRITERIA + OPT_PATCH_CRITERIA
+                    # Assert all patch keys are valid criteria
+                    assert all(i in patch_criteria for i in patch.keys())
 
     def _dump_performance(self):
         """
         Creates performance settings string for bash consumption.
-
         Output will be in the form of a list that can be iterated over in
         bash, with each string being the direct input to the performance
         setting script in the form <role> <category> <key> <value> to
diff --git a/apex/tests/test_apex_common_builder.py b/apex/tests/test_apex_common_builder.py
new file mode 100644 (file)
index 0000000..d042d2b
--- /dev/null
@@ -0,0 +1,86 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import unittest
+
+from apex.builders import common_builder as c_builder
+from apex.common import constants as con
+from mock import patch
+from mock import mock_open
+from mock import MagicMock
+
+
+class TestCommonBuilder(unittest.TestCase):
+    @classmethod
+    def setup_class(cls):
+        """This method is run once for each class before any tests are run"""
+
+    @classmethod
+    def teardown_class(cls):
+        """This method is run once for each class _after_ all tests are run"""
+
+    def setup(self):
+        """This method is run once before _each_ test method is executed"""
+
+    def teardown(self):
+        """This method is run once after _each_ test method is executed"""
+
+    def test_project_to_path(self):
+        project = 'openstack/tripleo-heat-templates'
+        path = '/usr/share/openstack-tripleo-heat-templates'
+        self.assertEquals(c_builder.project_to_path(project), path)
+        project = 'openstack/puppet-tripleo'
+        path = '/etc/puppet/modules/tripleo'
+        self.assertEquals(c_builder.project_to_path(project), path)
+        project = 'openstack/nova'
+        path = '/usr/lib/python2.7/site-packages/nova'
+        self.assertEquals(c_builder.project_to_path(project), path)
+
+    @patch('builtins.open', mock_open())
+    @patch('apex.build_utils.get_patch')
+    @patch('apex.virtual.utils.virt_customize')
+    def test_add_upstream_patches(self, mock_customize, mock_get_patch):
+        mock_get_patch.return_value = None
+        change_id = 'I301370fbf47a71291614dd60e4c64adc7b5ebb42'
+        patches = [{
+            'change-id': change_id,
+            'project': 'openstack/tripleo-heat-templates'
+        }]
+        c_builder.add_upstream_patches(patches, 'dummy.qcow2', '/dummytmp/')
+        assert mock_customize.not_called
+        project_path = '/usr/share/openstack-tripleo-heat-templates'
+        patch_file = "{}.patch".format(change_id)
+        patch_file_path = "/dummytmp/{}".format(patch_file)
+        test_virt_ops = [
+            {con.VIRT_UPLOAD: "{}:{}".format(patch_file_path,
+                                             project_path)},
+            {con.VIRT_RUN_CMD: "cd {} && patch -p1 < {}".format(
+                project_path, patch_file)}]
+        mock_get_patch.return_value = 'some random diff'
+        c_builder.add_upstream_patches(patches, 'dummy.qcow2', '/dummytmp/')
+        mock_customize.assert_called_once_with(test_virt_ops, 'dummy.qcow2')
+
+    @patch('builtins.open', mock_open())
+    @patch('apex.virtual.utils.virt_customize')
+    def test_add_repo(self, mock_customize):
+        c_builder.add_repo('fake/url', 'dummyrepo', 'dummy.qcow2',
+                           '/dummytmp/')
+        repo_file_path = '/dummytmp/dummyrepo.repo'
+        test_virt_ops = [
+            {con.VIRT_UPLOAD: "{}:/etc/yum.repos.d/".format(repo_file_path)}
+        ]
+        mock_customize.assert_called_once_with(test_virt_ops, 'dummy.qcow2')
+
+    @patch('builtins.open', mock_open())
+    @patch('git.Repo.clone_from')
+    def test_create_git_archive(self, mock_git):
+        mock_git.return_value = MagicMock()
+        self.assertEqual(c_builder.create_git_archive('fake/url', 'dummyrepo',
+                                                      '/dummytmp/'),
+                         '/dummytmp/dummyrepo.tar')
index aee39a7..6f2a947 100644 (file)
@@ -9,6 +9,8 @@
 
 import ipaddress
 import os
+import shutil
+import urllib.error
 
 from apex.common import utils
 from apex.settings.network_settings import NetworkSettings
@@ -66,3 +68,35 @@ class TestCommonUtils:
         playbook = 'apex/tests/playbooks/test_failed_playbook.yaml'
         assert_raises(Exception, utils.run_ansible, None,
                       os.path.join(playbook), dry_run=True)
+
+    def test_fetch_upstream_and_unpack(self):
+        url = 'https://github.com/opnfv/apex/blob/master/'
+        utils.fetch_upstream_and_unpack('/tmp/fetch_test',
+                                        url, ['INFO'])
+        assert os.path.isfile('/tmp/fetch_test/INFO')
+        shutil.rmtree('/tmp/fetch_test')
+
+    def test_fetch_upstream_previous_file(self):
+        test_file = 'overcloud-full.tar.md5'
+        url = 'https://images.rdoproject.org/master/delorean/' \
+              'current-tripleo/stable/'
+        os.makedirs('/tmp/fetch_test', exist_ok=True)
+        open("/tmp/fetch_test/{}".format(test_file), 'w').close()
+        utils.fetch_upstream_and_unpack('/tmp/fetch_test',
+                                        url, [test_file])
+        assert os.path.isfile("/tmp/fetch_test/{}".format(test_file))
+        shutil.rmtree('/tmp/fetch_test')
+
+    def test_fetch_upstream_invalid_url(self):
+        url = 'http://notavalidsite.com/'
+        assert_raises(urllib.error.URLError,
+                      utils.fetch_upstream_and_unpack, '/tmp/fetch_test',
+                      url, ['INFO'])
+        shutil.rmtree('/tmp/fetch_test')
+
+    def test_fetch_upstream_and_unpack_tarball(self):
+        url = 'http://artifacts.opnfv.org/apex/tests/'
+        utils.fetch_upstream_and_unpack('/tmp/fetch_test',
+                                        url, ['dummy_test.tar'])
+        assert os.path.isfile('/tmp/fetch_test/test.txt')
+        shutil.rmtree('/tmp/fetch_test')
index 4a0c983..0a9b6c1 100644 (file)
@@ -16,6 +16,7 @@ from mock import MagicMock
 from mock import mock_open
 
 from apex.common.exceptions import ApexDeployException
+from apex.common.constants import DEFAULT_OS_VERSION
 from apex.deploy import deploy_quickstart
 from apex.deploy import validate_cross_settings
 from apex.deploy import build_vms
@@ -140,11 +141,13 @@ class TestDeploy(unittest.TestCase):
                                            'dataplane': 'ovs',
                                            'sfc': False,
                                            'vpn': False,
-                                           'yardstick': 'test'}}
+                                           'yardstick': 'test',
+                                           'os_version': DEFAULT_OS_VERSION}}
         args = mock_parser.return_value.parse_args.return_value
         args.virtual = False
         args.quickstart = False
         args.debug = False
+        args.upstream = False
         net_sets = mock_net_sets.return_value
         net_sets.enabled_network_list = ['external']
         net_sets.__getitem__.side_effect = net_sets_dict.__getitem__
@@ -210,7 +213,8 @@ class TestDeploy(unittest.TestCase):
                                            'dataplane': 'ovs',
                                            'sfc': False,
                                            'vpn': False,
-                                           'yardstick': 'test'}}
+                                           'yardstick': 'test',
+                                           'os_version': DEFAULT_OS_VERSION}}
         args = mock_parser.return_value.parse_args.return_value
         args.virtual = True
         args.quickstart = False
@@ -220,6 +224,7 @@ class TestDeploy(unittest.TestCase):
         args.virt_compute_nodes = 1
         args.virt_compute_ram = None
         args.virt_default_ram = 12
+        args.upstream = False
         net_sets = mock_net_sets.return_value
         net_sets.enabled_network_list = ['admin']
         deploy_sets = mock_deploy_sets.return_value
diff --git a/apex/tests/test_apex_network_data.py b/apex/tests/test_apex_network_data.py
new file mode 100644 (file)
index 0000000..9197e1a
--- /dev/null
@@ -0,0 +1,76 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import os
+
+from nose.tools import (
+    assert_equal,
+    assert_is_instance,
+    assert_raises
+)
+
+from apex.common.constants import (
+    EXTERNAL_NETWORK,
+    STORAGE_NETWORK,
+    ADMIN_NETWORK,
+)
+from apex import NetworkSettings
+from apex.network import network_data
+from apex.settings.network_settings import NetworkSettingsException
+from apex.tests.constants import TEST_CONFIG_DIR
+
+files_dir = os.path.join(TEST_CONFIG_DIR, 'network')
+
+REQUIRED_KEYS = [
+    'name',
+    'vip',
+    'name_lower',
+    'enabled'
+]
+
+
+class TestNetworkData:
+    @classmethod
+    def setup_class(cls):
+        """This method is run once for each class before any tests are run"""
+
+    @classmethod
+    def teardown_class(cls):
+        """This method is run once for each class _after_ all tests are run"""
+
+    def setup(self):
+        """This method is run once before _each_ test method is executed"""
+
+    def teardown(self):
+        """This method is run once after _each_ test method is executed"""
+
+    def test_create_network_data(self):
+        ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
+        output = network_data.create_network_data(ns)
+        assert_is_instance(output, list)
+        # TODO(trozet) change this back to 4 after OOO bug is fixed
+        assert len(output) is 5
+        for net in output:
+            assert_is_instance(net, dict)
+            for key in REQUIRED_KEYS:
+                assert key in net
+                if key == 'vip' or key == 'enabled':
+                    assert_is_instance(net[key], bool)
+                else:
+                    assert net[key] is not None
+
+    def test_negative_create_network_data(self):
+        assert_raises(network_data.NetworkDataException,
+                      network_data.create_network_data, 'blah')
+
+    def test_create_network_data_with_write(self):
+        ns = NetworkSettings(os.path.join(files_dir, 'network_settings.yaml'))
+        network_data.create_network_data(ns, '/tmp/blah_network_data.yaml')
+        assert os.path.isfile('/tmp/blah_network_data.yaml')
+        os.remove('/tmp/blah_network_data.yaml')
diff --git a/apex/tests/test_apex_overcloud_builder.py b/apex/tests/test_apex_overcloud_builder.py
new file mode 100644 (file)
index 0000000..e9a6e6c
--- /dev/null
@@ -0,0 +1,50 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import unittest
+
+from apex.builders import overcloud_builder as oc_builder
+from apex.common import constants as con
+from mock import patch
+
+
+class TestOvercloudBuilder(unittest.TestCase):
+    @classmethod
+    def setup_class(cls):
+        """This method is run once for each class before any tests are run"""
+
+    @classmethod
+    def teardown_class(cls):
+        """This method is run once for each class _after_ all tests are run"""
+
+    def setup(self):
+        """This method is run once before _each_ test method is executed"""
+
+    def teardown(self):
+        """This method is run once after _each_ test method is executed"""
+
+    @patch('apex.builders.common_builder.create_git_archive')
+    @patch('apex.builders.common_builder.add_repo')
+    @patch('apex.virtual.utils.virt_customize')
+    def test_inject_opendaylight(self, mock_customize, mock_add_repo,
+                                 mock_git_archive):
+        mock_git_archive.return_value = '/dummytmp/puppet-opendaylight.tar'
+        archive = '/dummytmp/puppet-opendaylight.tar'
+        test_virt_ops = [
+            {con.VIRT_INSTALL: 'opendaylight'},
+            {con.VIRT_UPLOAD: "{}:/etc/puppet/modules/".format(archive)},
+            {con.VIRT_RUN_CMD: 'rm -rf /etc/puppet/modules/opendaylight'},
+            {con.VIRT_RUN_CMD: "cd /etc/puppet/modules/ && tar xvf "
+                               "puppet-opendaylight.tar"}
+        ]
+        oc_builder.inject_opendaylight(con.DEFAULT_ODL_VERSION, 'dummy.qcow2',
+                                       '/dummytmp/')
+        assert mock_git_archive.called
+        assert mock_add_repo.called
+        mock_customize.assert_called_once_with(test_virt_ops, 'dummy.qcow2')
diff --git a/apex/tests/test_apex_undercloud_builder.py b/apex/tests/test_apex_undercloud_builder.py
new file mode 100644 (file)
index 0000000..c749888
--- /dev/null
@@ -0,0 +1,34 @@
+##############################################################################
+# Copyright (c) 2017 Tim Rozet (Red Hat)
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import unittest
+
+from apex.builders import undercloud_builder as uc_builder
+from mock import patch
+
+
+class TestUndercloudBuilder(unittest.TestCase):
+    @classmethod
+    def setup_class(cls):
+        """This method is run once for each class before any tests are run"""
+
+    @classmethod
+    def teardown_class(cls):
+        """This method is run once for each class _after_ all tests are run"""
+
+    def setup(self):
+        """This method is run once before _each_ test method is executed"""
+
+    def teardown(self):
+        """This method is run once after _each_ test method is executed"""
+
+    @patch('apex.virtual.utils.virt_customize')
+    def test_add_upstream_pkgs(self, mock_customize):
+        uc_builder.add_upstream_packages('dummy.qcow2')
+        assert mock_customize.called
index 5003563..5d73dd4 100644 (file)
@@ -30,13 +30,15 @@ class Undercloud:
     This class represents an Apex Undercloud VM
     """
     def __init__(self, image_path, template_path,
-                 root_pw=None, external_network=False):
+                 root_pw=None, external_network=False,
+                 image_name='undercloud.qcow2'):
         self.ip = None
         self.root_pw = root_pw
         self.external_net = external_network
         self.volume = os.path.join(constants.LIBVIRT_VOLUME_PATH,
                                    'undercloud.qcow2')
         self.image_path = image_path
+        self.image_name = image_name
         self.template_path = template_path
         self.vm = None
         if Undercloud._get_vm():
@@ -134,9 +136,14 @@ class Undercloud:
 
     def setup_volumes(self):
         for img_file in ('overcloud-full.vmlinuz', 'overcloud-full.initrd',
-                         'undercloud.qcow2'):
+                         self.image_name):
             src_img = os.path.join(self.image_path, img_file)
-            dest_img = os.path.join(constants.LIBVIRT_VOLUME_PATH, img_file)
+            if img_file == self.image_name:
+                dest_img = os.path.join(constants.LIBVIRT_VOLUME_PATH,
+                                        'undercloud.qcow2')
+            else:
+                dest_img = os.path.join(constants.LIBVIRT_VOLUME_PATH,
+                                        img_file)
             if not os.path.isfile(src_img):
                 raise ApexUndercloudException(
                     "Required source file does not exist:{}".format(src_img))
@@ -147,7 +154,6 @@ class Undercloud:
         # TODO(trozet):check if resize needed right now size is 50gb
         # there is a lib called vminspect which has some dependencies and is
         # not yet available in pip.  Consider switching to this lib later.
-        # execute ansible playbook
 
     def inject_auth(self):
         virt_ops = list()
index 247dd27..3fd22e3 100644 (file)
@@ -59,15 +59,6 @@ resource_registry:
   OS::TripleO::Compute::Net::SoftwareConfig: nics/compute.yaml
   OS::TripleO::Controller::Net::SoftwareConfig: nics/controller.yaml
 
-  # Services
-  OS::TripleO::Services::SwiftStorage: OS::Heat::None
-  OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
-  OS::TripleO::Services::SwiftProxy: OS::Heat::None
-
-  # Extra Config
-  OS::TripleO::ComputeExtraConfigPre: OS::Heat::None
-  OS::TripleO::ControllerExtraConfigPre: OS::Heat::None
-
 parameter_defaults:
   NeutronExternalNetworkBridge: 'br-ex'
 
index c94ba31..70243a5 100644 (file)
@@ -180,3 +180,12 @@ parameter_defaults:
     - OS::TripleO::Services::NeutronHoneycombAgent
     - OS::TripleO::Services::NeutronVppAgent
     - OS::TripleO::Services::Vpp
+
+resource_registry:
+  # Services
+  OS::TripleO::Services::SwiftStorage: OS::Heat::None
+  OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
+  OS::TripleO::Services::SwiftProxy: OS::Heat::None
+  # Extra Config
+  OS::TripleO::ComputeExtraConfigPre: OS::Heat::None
+  OS::TripleO::ControllerExtraConfigPre: OS::Heat::None
index 51094a9..7b7cac1 100644 (file)
@@ -17,6 +17,7 @@ Requires:       initscripts net-tools iputils iproute iptables python34 python34
 Requires:       ipxe-roms-qemu >= 20160127-1
 Requires:       libvirt-devel ansible
 Requires:       python34-iptables python34-cryptography python34-pbr
+Requires:       python34-GitPython python34-pygerrit2
 
 %description
 Scripts for OPNFV deployment using Apex
@@ -75,6 +76,8 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
 %{_sysconfdir}/opnfv-apex/os-nosdn-ovs_dpdk-noha.yaml
 %{_sysconfdir}/opnfv-apex/os-nosdn-nofeature-ha.yaml
 %{_sysconfdir}/opnfv-apex/os-nosdn-performance-ha.yaml
+%{_sysconfdir}/opnfv-apex/os-nosdn-pike-noha.yaml
+%{_sysconfdir}/opnfv-apex/os-odl-pike-noha.yaml
 %{_sysconfdir}/opnfv-apex/os-nosdn-ovs_dpdk-ha.yaml
 %{_sysconfdir}/opnfv-apex/os-nosdn-kvm-ha.yaml
 %{_sysconfdir}/opnfv-apex/os-nosdn-kvm-noha.yaml
@@ -113,6 +116,10 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/
 %doc %{_docdir}/opnfv/inventory.yaml.example
 
 %changelog
+* Wed Oct 25 2017 Tim Rozet <trozet@redhat.com> - 5.0-9
+- Adds GitPython and pygerrit2 dependencies
+* Mon Oct 2 2017 Tim Rozet <trozet@redhat.com> - 5.0-8
+- Adds upstream deployment scenario
 * Wed Sep 20 2017 Tim Rozet <trozet@redhat.com> - 5.0-7
 - Add calipso
 * Fri Sep 08 2017 Tim Rozet <trozet@redhat.com> - 5.0-6
diff --git a/build/upstream-environment.yaml b/build/upstream-environment.yaml
new file mode 100644 (file)
index 0000000..ef6cdb6
--- /dev/null
@@ -0,0 +1,31 @@
+---
+# Environment file used to list common parameters required for all deployment
+# types
+
+parameters:
+  CloudDomain: opnfvlf.org
+
+parameter_defaults:
+  NeutronNetworkVLANRanges: 'datacentre:500:525'
+  SshServerOptions:
+    HostKey:
+      - '/etc/ssh/ssh_host_rsa_key'
+      - '/etc/ssh/ssh_host_ecdsa_key'
+      - '/etc/ssh/ssh_host_ed25519_key'
+    SyslogFacility: 'AUTHPRIV'
+    AuthorizedKeysFile: '.ssh/authorized_keys'
+    PasswordAuthentication: 'no'
+    ChallengeResponseAuthentication: 'no'
+    GSSAPIAuthentication: 'no'
+    GSSAPICleanupCredentials: 'no'
+    UsePAM: 'yes'
+    X11Forwarding: 'yes'
+    UsePrivilegeSeparation: 'sandbox'
+    AcceptEnv:
+      - 'LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES'
+      - 'LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT'
+      - 'LC_IDENTIFICATION LC_ALL LANGUAGE'
+      - 'XMODIFIERS'
+    Subsystem: 'sftp  /usr/libexec/openssh/sftp-server'
+    UseDNS: 'no'
+  #ExtraConfig:
diff --git a/config/deploy/os-nosdn-pike-noha.yaml b/config/deploy/os-nosdn-pike-noha.yaml
new file mode 100644 (file)
index 0000000..1141784
--- /dev/null
@@ -0,0 +1,14 @@
+---
+global_params:
+  ha_enabled: false
+  patches:
+    undercloud:
+      - change-id: I301370fbf47a71291614dd60e4c64adc7b5ebb42
+        project: openstack/tripleo-heat-templates
+deploy_options:
+  os_version: pike
+  sdn_controller: false
+  tacker: false
+  congress: false
+  sfc: false
+  vpn: false
diff --git a/config/deploy/os-odl-pike-noha.yaml b/config/deploy/os-odl-pike-noha.yaml
new file mode 100644 (file)
index 0000000..44eff66
--- /dev/null
@@ -0,0 +1,15 @@
+---
+global_params:
+  ha_enabled: false
+  patches:
+    undercloud:
+      - change-id: I301370fbf47a71291614dd60e4c64adc7b5ebb42
+        project: openstack/tripleo-heat-templates
+deploy_options:
+  os_version: pike
+  sdn_controller: opendaylight
+  odl_version: carbon
+  tacker: false
+  congress: false
+  sfc: false
+  vpn: false
index 19e4638..8acfa76 100644 (file)
       with_items:
         - network-environment.yaml
         - instackenv.json
-        - opnfv-environment.yaml
         - overcloud-full.qcow2
         - deploy_command
         - virtual-environment.yaml
         - baremetal-environment.yaml
+        - "{{ apex_env_file }}"
+    - name: Copy network data to undercloud
+      copy:
+        src: "{{ apex_temp_dir }}/network_data.yaml"
+        dest: "/home/stack/network_data.yaml"
+        owner: stack
+        group: stack
+        mode: 0644
+      when: os_version != 'ocata'
     - copy:
         src: "{{ apex_temp_dir }}/storage-environment.yaml"
         dest: /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
index 617ec23..0326a8c 100644 (file)
@@ -9,3 +9,5 @@ cryptography
 python-ipmi
 PyYAML
 Jinja2>=2.8
+GitPython
+pygerrit2
index 87cb6b9..52ad12f 100644 (file)
--- a/setup.cfg
+++ b/setup.cfg
@@ -33,6 +33,7 @@ data_files =
     share/opnfv-apex/ =
         build/network-environment.yaml
         build/opnfv-environment.yaml
+        build/upstream-environment.yaml
         build/nics-template.yaml.jinja2
         build/csit-environment.yaml
         build/virtual-environment.yaml