Updates ODL Pipeline scripts for CSIT 49/26949/13
authorTim Rozet <trozet@redhat.com>
Thu, 12 Jan 2017 17:27:41 +0000 (12:27 -0500)
committerTim Rozet <trozet@redhat.com>
Mon, 30 Jan 2017 19:36:24 +0000 (14:36 -0500)
Changes Include:
 - Change to TripleOInspector which only introspect a
   current Apex deployment and dump yaml config to be bundled with
   snapshots.
 - Add TripleOHelper which consists of all tripleO helper functions.
   Many this are done to virsh so the idea is to have at.
   Some point in time a libvirtHelper, or use another libvirt
   python lib. Thatsway it is a class so that we can inherit
   later on.
 - New argument for passing the SSH private key to use to connect to
   nodes is added to the service utils.
 - Some general clean up and consolidation of logic

JIRA: APEX-363

Change-Id: I792db0fac3f4e81969fe85c05fc298fe5af02537
Signed-off-by: Tim Rozet <trozet@redhat.com>
21 files changed:
odl-pipeline/lib/common/config.py [deleted file]
odl-pipeline/lib/common/constants.py [new file with mode: 0644]
odl-pipeline/lib/deployment_cloner.sh [deleted file]
odl-pipeline/lib/deployment_cloner/deployment_cloner.py [deleted file]
odl-pipeline/lib/odl_reinstaller.sh [changed mode: 0755->0644]
odl-pipeline/lib/odl_reinstaller/__init__.py [changed mode: 0755->0644]
odl-pipeline/lib/odl_reinstaller/install_odl.pp [deleted file]
odl-pipeline/lib/odl_reinstaller/odl_reinstaller.py [changed mode: 0755->0644]
odl-pipeline/lib/tripleo_introspector.sh [moved from odl-pipeline/lib/tripleo_manager.sh with 75% similarity]
odl-pipeline/lib/tripleo_introspector/__init__.py [moved from odl-pipeline/lib/deployment_cloner/__init__.py with 100% similarity]
odl-pipeline/lib/tripleo_introspector/tripleo_introspector.py [new file with mode: 0755]
odl-pipeline/lib/tripleo_manager/__init__.py [deleted file]
odl-pipeline/lib/tripleo_manager/tripleo_manager.py [deleted file]
odl-pipeline/lib/utils/node_manager.py
odl-pipeline/lib/utils/processutils.py
odl-pipeline/lib/utils/service.py
odl-pipeline/lib/utils/shutil.py
odl-pipeline/lib/utils/ssh_util.py
odl-pipeline/lib/utils/tripleo_helper.py [new file with mode: 0644]
odl-pipeline/lib/utils/utils_log.py
odl-pipeline/lib/utils/utils_yaml.py

diff --git a/odl-pipeline/lib/common/config.py b/odl-pipeline/lib/common/config.py
deleted file mode 100644 (file)
index 58ebff5..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-
-# inside the clone info folder
-ID_RSA_PATH = '/undercloud_ssh/'
-NODE_YAML_PATH = '/node.yaml'
-OVERCLOUDRC_PATH = '/openstack.cred'
diff --git a/odl-pipeline/lib/common/constants.py b/odl-pipeline/lib/common/constants.py
new file mode 100644 (file)
index 0000000..bf5de63
--- /dev/null
@@ -0,0 +1,2 @@
+# inside the pod_config dir
+NODE_YAML_PATH = './node.yaml'
diff --git a/odl-pipeline/lib/deployment_cloner.sh b/odl-pipeline/lib/deployment_cloner.sh
deleted file mode 100755 (executable)
index 3cb9354..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
-set -e
-export PYTHONPATH=$PYTHONPATH:$DIR
-mkdir -p $DIR/tmp
-cd $DIR
-python ./deployment_cloner/deployment_cloner.py $@
diff --git a/odl-pipeline/lib/deployment_cloner/deployment_cloner.py b/odl-pipeline/lib/deployment_cloner/deployment_cloner.py
deleted file mode 100755 (executable)
index dc2f3ba..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/bin/python
-from utils import utils_yaml
-from utils.utils_log import for_all_methods, log_enter_exit
-from utils.service import Service
-from utils.node_manager import NodeManager
-from utils.processutils import execute
-from common import config as CONFIG
-
-
-@for_all_methods(log_enter_exit)
-class DeploymentCloner(Service):
-
-    undercloud_root_dir = '~/DeploymentCloner/'
-
-    def create_cli_parser(self, parser):
-        parser.add_argument('--undercloud-ip', help="ip of undercloud",
-                            required=True)
-        parser.add_argument('--dest-dir', help="where everything should go to",
-                            required=True)
-        return parser
-
-    def undercloud_dict(self, undercloud_ip):
-        return {'address': undercloud_ip,
-                'user': 'stack'}
-
-    def run(self, sys_args, config):
-        dest_dir = sys_args.dest_dir if sys_args.dest_dir[:-1] == '/'\
-            else (sys_args.dest_dir + '/')
-        self.node_manager = NodeManager()
-        underlcloud = self.node_manager.add_node(
-            'undercloud',
-            self.undercloud_dict(sys_args.undercloud_ip))
-        # copy all files to undercloud
-        underlcloud.copy('to', '.', self.undercloud_root_dir)
-        # generate the undercloud yaml
-        underlcloud.execute('cd %s; ./tripleo_manager.sh --out ./cloner-info/'
-                            % self.undercloud_root_dir, log_true=True)
-        underlcloud.copy('from', dest_dir,
-                         self.undercloud_root_dir + '/cloner-info/')
-        node_yaml_path = dest_dir + '/cloner-info/' + CONFIG.NODE_YAML_PATH
-        node_yaml = utils_yaml.read_dict_from_yaml(node_yaml_path)
-        for name, node in node_yaml['servers'].iteritems():
-            node['vNode-name'] = self.get_virtual_node_name_from_mac(
-                node['orig-ctl-mac'])
-        utils_yaml.write_dict_to_yaml(node_yaml, node_yaml_path)
-        # TODO copy qcow and tar it
-
-    def get_virtual_node_name_from_mac(self, mac):
-        vNode_names, _ = execute('virsh list|awk \'{print $2}\'', shell=True)
-        for node in vNode_names.split('\n'):
-            if 'baremetal' in node:
-                admin_net_mac, _ = execute(
-                    'virsh domiflist %s |grep admin |awk \'{print $5}\''
-                    % node, shell=True)
-                if admin_net_mac.replace('\n', '') == mac:
-                    return node
-        raise Exception('Could not find corresponding virtual node for MAC: %s'
-                        % mac)
-
-
-def main():
-    main = DeploymentCloner()
-    main.start()
-
-if __name__ == '__main__':
-    main()
old mode 100755 (executable)
new mode 100644 (file)
old mode 100755 (executable)
new mode 100644 (file)
diff --git a/odl-pipeline/lib/odl_reinstaller/install_odl.pp b/odl-pipeline/lib/odl_reinstaller/install_odl.pp
deleted file mode 100755 (executable)
index 97a00bd..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-include ::tripleo::packages
-
-if count(hiera('ntp::servers')) > 0 {
-  include ::ntp
-}
-
-class {"opendaylight":
-  extra_features => any2array(hiera('opendaylight::extra_features', 'odl-netvirt-openstack')),
-  odl_rest_port  => hiera('opendaylight::odl_rest_port'),
-  enable_l3      => hiera('opendaylight::enable_l3', 'no'),
-  #tarball_url    =>  'file:///home/heat-admin/distribution-karaf-0.6.0-SNAPSHOT.tar.gz',
-  #unitfile_url   =>  'file:///home/heat-admin/opendaylight-unitfile.tar.gz' 
-}
-
-
old mode 100755 (executable)
new mode 100644 (file)
index 25ec21b..c7a78c5
@@ -1,56 +1,97 @@
 #!/bin/python
-import os
+import re
+import time
+
+from utils.processutils import ProcessExecutionError
+from tripleo_introspector.tripleo_introspector import TripleOIntrospector
+from utils import processutils
 from utils.utils_log import LOG, for_all_methods, log_enter_exit
 from utils.service import Service
 from utils.node_manager import NodeManager
-from utils.ssh_util import SSH_CONFIG
-from common import config as CONFIG
 from utils import utils_yaml
 
 
 @for_all_methods(log_enter_exit)
 class ODLReInstaller(Service):
+    def __init__(self):
+        self.netvirt_url = "restconf/operational/network-topology:" \
+                           "network-topology/topology/netvirt:1"
+        self.nodes = None
+        self.odl_node = None
 
     def run(self, sys_args, config):
-        cloner_info_path = sys_args.cloner_info
-        SSH_CONFIG['ID_RSA_PATH'] = (cloner_info_path + CONFIG.ID_RSA_PATH +
-                                     'id_rsa')
-        node_config = utils_yaml.read_dict_from_yaml(
-            cloner_info_path + CONFIG.NODE_YAML_PATH)
+        pod_config = sys_args.pod_config
+        odl_artifact = sys_args.odl_artifact
+        node_config = utils_yaml.read_dict_from_yaml(pod_config)
+        # TODO Add validation of incoming node config
+        # self.check_node_config()
+
         # copy ODL to all nodes where it need to be copied
         self.nodes = NodeManager(node_config['servers']).get_nodes()
         for node in self.nodes:
-            LOG.info('Disconnecting OpenVSwitch from controller on node %s'
-                     % node.name)
             node.execute('ovs-vsctl del-controller br-int', as_root=True)
+        for node in self.nodes:
+            # Check if ODL runs on this node
+            rv, _ = node.execute('ps aux |grep -v grep |grep karaf',
+                                 as_root=True, check_exit_code=[0, 1])
+            if 'java' in rv:
+                self.odl_node = node
+                LOG.info("ODL node found: {}".format(self.odl_node.name))
+                node.execute('systemctl stop opendaylight', as_root=True)
+
+            self.disconnect_ovs(node)
+
+        # Upgrade ODL
+        self.reinstall_odl(self.odl_node, odl_artifact)
 
+        # Wait for ODL to come back up
+        full_netvirt_url = "http://{}:8081/{}".format(
+            self.odl_node.config['address'], self.netvirt_url)
+        counter = 1
+        while counter <= 10:
+            try:
+                self.odl_node.execute("curl --fail -u admin:admin {}".format(
+                    full_netvirt_url))
+                LOG.info("New OpenDaylight NetVirt is Up")
+                break
+            except processutils.ProcessExecutionError:
+                LOG.warning("NetVirt not up. Attempt: {}".format(counter))
+                if counter >= 10:
+                    LOG.warning("NetVirt not detected as up after 10 "
+                                "attempts...deployment may be unstable!")
+            counter += 1
+            time.sleep(10)
+
+        # Reconnect OVS instances
+        LOG.info("Reconnecting OVS instances")
         for node in self.nodes:
-            if 'ODL' in node.config:
-                tar_tmp_path = '/tmp/odl-artifact/'
-                if node.config['ODL'].get('active'):
-                    tarball_name = os.path.basename(sys_args.odl_artifact)
-                    node.copy('to', sys_args.odl_artifact,
-                              '/tmp/odl-artifact/' + tarball_name)
-                    node.execute('rm -rf /opt/opendaylight/*', as_root=True)
-                    node.execute('mkdir -p /opt/opendaylight/*', as_root=True)
-                    LOG.info('Extracting %s to /opt/opendaylight/ on node %s'
-                             % (tarball_name, node.name))
-                    node.execute('tar -zxf %s --strip-components=1 -C '
-                                 '/opt/opendaylight/'
-                                 % (tar_tmp_path + tarball_name), as_root=True)
-                    node.execute('chown -R odl:odl /opt/opendaylight',
-                                 as_root=True)
-                    node.execute('rm -rf ' + tar_tmp_path, as_root=True)
-                    LOG.info('Installing and Starting Opendaylight on node %s'
-                             % node.name)
-                    node.copy('to', 'odl_reinstaller/install_odl.pp',
-                              tar_tmp_path)
-                    node.execute('puppet apply --modulepath='
-                                 '/etc/puppet/modules/ %sinstall_odl.pp '
-                                 '--verbose --debug --trace '
-                                 '--detailed-exitcodes'
-                                 % tar_tmp_path, check_exit_code=[2],
-                                 as_root=True)
+            self.connect_ovs(node)
+        # Sleep for a few seconds to allow TCP connections to come up
+        time.sleep(5)
+        # Validate OVS instances
+        LOG.info("Validating OVS configuration")
+        for node in self.nodes:
+            self.validate_ovs(node)
+        LOG.info("OpenDaylight Upgrade Successful!")
+
+    @staticmethod
+    def reinstall_odl(node, odl_tarball):
+        tar_tmp_path = '/tmp/odl-artifact/'
+        node.copy('to', odl_tarball, tar_tmp_path + odl_tarball)
+        node.execute('rm -rf /opt/opendaylight/*', as_root=True)
+        node.execute('mkdir -p /opt/opendaylight/*', as_root=True)
+        LOG.info('Extracting %s to /opt/opendaylight/ on node %s'
+                 % (odl_tarball, node.name))
+        node.execute('tar -zxf %s --strip-components=1 -C '
+                     '/opt/opendaylight/'
+                     % (tar_tmp_path + odl_tarball), as_root=True)
+        node.execute('chown -R odl:odl /opt/opendaylight', as_root=True)
+        node.execute('rm -rf ' + tar_tmp_path, as_root=True)
+        LOG.info('Installing and Starting Opendaylight on node %s' % node.name)
+        node.execute('puppet apply -e "include opendaylight" '
+                     '--modulepath=/etc/puppet/modules/ '
+                     '--verbose --debug --trace --detailed-exitcodes',
+                     check_exit_code=[2], as_root=True)
         # --detailed-exitcodes: Provide extra information about the run via
         # exit codes. If enabled, 'puppet apply' will use the following exit
         # codes:
@@ -61,27 +102,98 @@ class ODLReInstaller(Service):
         # 4: The run succeeded, and some resources failed.
         # 6: The run succeeded, and included both changes and failures.
 
-        for node in self.nodes:
-            LOG.info('Connecting OpenVSwitch to controller on node %s'
-                     % node.name)
-            ovs_controller = node.config.get('ovs-controller')
-            if ovs_controller:
-                node.execute('ovs-vsctl set-controller br-int %s'
-                             % ovs_controller, as_root=True)
+    @staticmethod
+    def disconnect_ovs(node):
+        LOG.info('Disconnecting OpenVSwitch from controller on node %s'
+                 % node.name)
+        node.execute('ovs-vsctl del-controller br-int', as_root=True)
+        node.execute('ovs-vsctl del-manager', as_root=True)
+        LOG.info('Deleting Tunnel and Patch interfaces')
+        # Note this is required because ODL fails to reconcile pre-created
+        # ports
+        for br in 'br-int', 'br-ex':
+            LOG.info("Checking for ports on {}".format(br))
+            try:
+                out, _ = node.execute('ovs-vsctl list-ports {} | grep -E '
+                                      '"tun|patch"'.format(br),
+                                      as_root=True, shell=True)
+                ports = out.rstrip().split("\n")
+                for port in ports:
+                    LOG.info('Deleting port: {}'.format(port))
+                    node.execute('ovs-vsctl del-port {} {}'.format(br, port),
+                                 as_root=True)
+            except ProcessExecutionError:
+                LOG.info("No tunnel or patch ports configured")
+
+    @staticmethod
+    def connect_ovs(node):
+        LOG.info('Connecting OpenVSwitch to controller on node %s' % node.name)
+        ovs_manager_str = ' '.join(node.config['ovs-managers'])
+        node.execute('ovs-vsctl set-manager %s' % ovs_manager_str,
+                     as_root=True)
+
+    @staticmethod
+    def validate_ovs(node):
+        LOG.info("Validating OVS configuration for node: {}".format(node.name))
+        # Validate ovs manager is connected
+        out, _ = node.execute('ovs-vsctl show ', as_root=True)
+        mgr_search = \
+            re.search('Manager\s+\"tcp:[0-9.]+:6640\"\n\s*'
+                      'is_connected:\s*true', out)
+        if mgr_search is None:
+            raise ODLReinstallerException("OVS Manager is not connected")
+        else:
+            LOG.info("OVS is connected to OVSDB manager")
+
+        # Validate ovs controller is configured
+        cfg_controller = node.config['ovs-controller']
+        ovs_controller = TripleOIntrospector().get_ovs_controller(node)
+        if cfg_controller == '' or cfg_controller is None:
+            if ovs_controller is None or ovs_controller == '':
+                raise ODLReinstallerException("OVS controller is not set "
+                                              "for node: {}"
+                                              "".format(node.address))
+        elif ovs_controller != cfg_controller:
+            raise ODLReinstallerException("OVS controller is not set to the "
+                                          "correct pod config value on {}. "
+                                          "Config controller: {}, current "
+                                          "controller: {}"
+                                          "".format(node.address,
+                                                    cfg_controller,
+                                                    ovs_controller))
+        LOG.info("OVS Controller set correctly")
+        # Validate ovs controller is connected
+        ctrl_search = \
+            re.search('Controller\s+\"tcp:[0-9\.]+:6653\"\n\s*'
+                      'is_connected:\s*true', out)
+        if ctrl_search is None:
+            raise ODLReinstallerException("OVS Controller is not connected")
+        else:
+            LOG.info("OVS is connected to OpenFlow controller")
 
     def create_cli_parser(self, parser):
-        parser.add_argument('--cloner-info',
-                            help=("Give the path to the clone info"),
+        parser.add_argument('--pod-config',
+                            help="File containing pod configuration",
+                            dest='pod_config',
                             required=True)
         parser.add_argument('--odl-artifact',
-                            help=("Path to Opendaylight tarball"),
+                            help="Path to Opendaylight tarball to use for "
+                                 "upgrade",
+                            dest='odl_artifact',
                             required=True)
         return parser
 
 
+class ODLReinstallerException(Exception):
+    def __init__(self, value):
+        self.value = value
+
+    def __str__(self):
+        return self.value
+
+
 def main():
-    main = ODLReInstaller()
-    main.start()
+    ODLReInstaller().start()
 
 if __name__ == '__main__':
     main()
similarity index 75%
rename from odl-pipeline/lib/tripleo_manager.sh
rename to odl-pipeline/lib/tripleo_introspector.sh
index 9c999a3..8d1b9de 100755 (executable)
@@ -7,4 +7,4 @@ set -e
 export PYTHONPATH=$PYTHONPATH:$DIR
 mkdir -p $DIR/tmp
 cd $DIR
-python ./tripleo_manager/tripleo_manager.py $@
+python ./tripleo_introspector/tripleo_introspector.py $@
diff --git a/odl-pipeline/lib/tripleo_introspector/tripleo_introspector.py b/odl-pipeline/lib/tripleo_introspector/tripleo_introspector.py
new file mode 100755 (executable)
index 0000000..dd378ed
--- /dev/null
@@ -0,0 +1,116 @@
+import os
+import re
+
+from utils import processutils
+from utils.node import Node
+from utils.utils_log import log_enter_exit, for_all_methods, LOG
+from utils.service import Service
+from utils.shutil import shutil
+from common import constants
+from utils import utils_yaml
+from utils.tripleo_helper import TripleoHelper
+
+
+@for_all_methods(log_enter_exit)
+class TripleOIntrospector(Service):
+
+    def __init__(self):
+        self.overcloud_user = 'heat-admin'
+        self.node_info = {'servers': {}}
+
+    def create_cli_parser(self, parser):
+        parser.add_argument('--out-file',
+                            help="File where pod config will be written to. "
+                                 "Defaults to ./node.yaml",
+                            default=constants.NODE_YAML_PATH,
+                            dest="out_file",
+                            required=False)
+        return parser
+
+    def run(self, sys_args, config):
+        self.gen_node_info()
+        shutil.mkdir_if_not_exist(os.path.dirname(sys_args.out_file))
+        utils_yaml.write_dict_to_yaml(self.node_info, sys_args.out_file)
+
+    def gen_node_info(self):
+        overcloud_ip_list = TripleoHelper.find_overcloud_ips()
+
+        for node_ip in overcloud_ip_list:
+            LOG.info('Introspecting node %s' % node_ip)
+            node = Node('intro-%s' % node_ip, address=node_ip,
+                        user=self.overcloud_user)
+            node_mac = None
+            virsh_domain = None
+            server_name, _ = node.execute('hostname')
+            server_name = server_name.rstrip()
+            if 'overcloud-controller' in server_name:
+                node_type = 'controller'
+            elif 'overcloud-novacompute' in server_name:
+                node_type = 'compute'
+            else:
+                raise TripleOInspectorException('Unknown type '
+                                                '(controller/compute) %s '
+                                                % server_name)
+            try:
+                processutils.execute('ping -c 1 %s' % node_ip)
+                res, _ = processutils.execute('/usr/sbin/arp -a '
+                                              '%s' % node_ip)
+                node_mac = \
+                    re.search('([0-9a-z]+:){5}[0-9a-z]+', res).group(0)
+                virsh_domain = \
+                    TripleoHelper.get_virtual_node_name_from_mac(node_mac)
+            except AttributeError:
+                LOG.warning("Unable to find MAC address for node {"
+                            "}".format(node_ip))
+
+            # find ovs controller and manager
+            ovs_controller = self.get_ovs_controller(node)
+            out, _ = node.execute('ovs-vsctl get-manager', as_root=True)
+            ovs_managers = out.rstrip().split("\n")
+            if all(ovs_manager == '' for ovs_manager in ovs_managers):
+                LOG.warning("OVS managers for node {} is empty!".format(
+                    node_ip))
+            self.node_info['servers'][server_name] = {
+                'address': node_ip,
+                'user': self.overcloud_user,
+                'type': node_type,
+                'orig-ctl-mac': node_mac,
+                'vNode-name': virsh_domain,
+                'ovs-controller': ovs_controller,
+                'ovs-managers': ovs_managers}
+
+    @staticmethod
+    def copy_ssh_id_and_overcloudrc(dest):
+        undercloud = TripleoHelper.get_undercloud()
+        # copy overcloudrc
+        undercloud.copy('from', dest, './overcloudrc')
+
+        # copy ssh id
+        undercloud.copy('from', dest, '.ssh/id_rsa')
+
+    @staticmethod
+    def get_ovs_controller(node):
+        # find ovs controller and manager
+        ovs_controller, _ = node.execute('ovs-vsctl get-controller '
+                                         'br-int', as_root=True)
+        ovs_controller = ovs_controller.rstrip()
+        if ovs_controller == '':
+            LOG.warning("OVS controller for node {} is empty!".format(
+                node.address))
+        else:
+            return ovs_controller
+
+
+class TripleOInspectorException(Exception):
+    def __init__(self, value):
+        self.value = value
+
+    def __str__(self):
+        return self.value
+
+
+def main():
+    TripleOIntrospector().start()
+
+if __name__ == '__main__':
+    main()
diff --git a/odl-pipeline/lib/tripleo_manager/__init__.py b/odl-pipeline/lib/tripleo_manager/__init__.py
deleted file mode 100755 (executable)
index e69de29..0000000
diff --git a/odl-pipeline/lib/tripleo_manager/tripleo_manager.py b/odl-pipeline/lib/tripleo_manager/tripleo_manager.py
deleted file mode 100755 (executable)
index 0a4ec0d..0000000
+++ /dev/null
@@ -1,179 +0,0 @@
-import os
-import yaml
-from novaclient.client import Client as nova
-from novaclient import api_versions
-import ironicclient.client
-from neutronclient.v2_0.client import Client as neutron
-from keystoneauth1 import identity
-from keystoneauth1 import session
-
-from utils.utils_log import log_enter_exit, for_all_methods, LOG
-from utils.service import Service
-from utils.shutil import shutil
-from utils.node_manager import NodeManager
-from common import config as CONFIG
-
-
-@for_all_methods(log_enter_exit)
-class TripleOManager(Service):
-
-    def __init__(self):
-        self.auth = None
-        self.session = None
-        self.novacl = self._get_nova_client()
-        self.ironiccl = self._get_ironic_client()
-        self.neutroncl = self._get_neutron_client()
-
-    def create_cli_parser(self, parser):
-        parser.add_argument('--out', help="where env_info should go to",
-                            required=True)
-        return parser
-
-    def run(self, sys_args, config):
-        self.gen_node_info()
-        self.prepare_for_ci_pipeline()
-        self.gen_env_info(sys_args)
-
-    def prepare_for_ci_pipeline(self):
-        node_manager = NodeManager(config=self.node_info['servers'])
-        for node in node_manager.get_nodes():
-
-            # Check is ODL runs on this node
-            self.node_info['servers'][node.name]['ODL'] = {}
-            rv, _ = node.execute('ps aux |grep -v grep |grep karaf',
-                                 as_root=True, check_exit_code=[0, 1])
-            if 'java' in rv:
-                self.node_info['servers'][node.name]['ODL']['active'] = True
-
-            if (node.is_dir('/opt/opendaylight') or
-                    node.is_file('/opt/opendaylight-was-there')):
-                self.node_info['servers'][node.name]['ODL']['dir_exsist'] = \
-                    True
-                # Remove existing ODL version
-                node.execute('touch /opt/opendaylight-was-there', as_root=True)
-                node.execute('rm -rf /opt/opendaylight', as_root=True)
-
-            # Store ovs controller info
-            rv, _ = node.execute('ovs-vsctl get-controller br-int',
-                                 as_root=True)
-            self.node_info['servers'][node.name]['ovs-controller'] = \
-                rv.replace('\n', '')
-
-            # Disconnect ovs
-            node.execute('ovs-vsctl del-controller br-int', as_root=True)
-
-    def gen_env_info(self, sys_args):
-        shutil.mkdir_if_not_exsist(sys_args.out)
-        self.write_out_yaml_config(self.node_info,
-                                   sys_args.out + CONFIG.NODE_YAML_PATH)
-
-        # copy ssh key
-        shutil.copy('to', '/home/stack/.ssh/id_rsa',
-                    sys_args.out + CONFIG.ID_RSA_PATH)
-        shutil.copy('to', '/home/stack/.ssh/id_rsa.pub',
-                    sys_args.out + CONFIG.ID_RSA_PATH)
-        # copy rc files
-        shutil.copy('to', '/home/stack/stackrc',
-                    sys_args.out)
-        shutil.copy('to', '/home/stack/overcloudrc',
-                    sys_args.out + CONFIG.OVERCLOUDRC_PATH)
-
-    def gen_node_info(self):
-        for network in self.neutroncl.list_networks()['networks']:
-            if network['name'] == 'ctlplane':
-                ctlplane_id = network['id']
-        if hasattr(self, 'node_info') and self.node_info:
-            return self.node_info
-        self.node_info = {'servers': {}}
-        for server in self.novacl.servers.list():
-            if 'overcloud-controller' in server.name:
-                type = 'controller'
-            elif 'overcloud-novacompute' in server.name:
-                type = 'compute'
-            else:
-                raise Exception('Unknown type (controller/compute) %s '
-                                % server.name)
-            ctlplane_mac = None
-            for interface in server.interface_list():
-                if interface.net_id == ctlplane_id:
-                    ctlplane_mac = interface.mac_addr
-            if not ctlplane_mac:
-                raise Exception('Could not find mac address for ctl-plane for '
-                                'server %s' % server.name)
-            self.node_info['servers'][server.name] = {
-                'address': self.get_address_of_node(server=server),
-                'user': 'heat-admin',
-                'type': type,
-                'orig-ctl-mac': ctlplane_mac}
-
-    def write_out_yaml_config(self, config, path):
-        with open(path, 'w') as f:
-            yaml.dump(config, f, default_flow_style=False)
-
-    def _check_credentials(self):
-        for cred in ['OS_USERNAME', 'OS_PASSWORD',
-                     'OS_TENANT_NAME', 'OS_AUTH_URL']:
-            if not os.environ.get(cred):
-                raise Exception('Use export %s=...' % cred)
-
-    def create_auth(self):
-        self._check_credentials()
-        if not self.auth:
-            self.auth = identity.Password(
-                auth_url=os.environ['OS_AUTH_URL'],
-                username=os.environ['OS_USERNAME'],
-                password=os.environ['OS_PASSWORD'],
-                project_name=os.environ['OS_TENANT_NAME'])
-        if not self.session:
-            self.session = session.Session(auth=self.auth)
-
-    def _get_nova_client(self):
-        self.create_auth()
-        return nova(api_versions.APIVersion("2.0"), session=self.session)
-
-    def _get_ironic_client(self):
-        self.create_auth()
-        return ironicclient.client.get_client(1, session=self.session)
-
-    def _get_neutron_client(self):
-        self.create_auth()
-        return neutron(session=self.session)
-
-    def get_node_name_by_ilo_address(self, ilo_address):
-        try:
-            node_name = None
-            for node in self.ironiccl.node.list():
-                nova_uuid = node.instance_uuid
-                if ilo_address == self.ironiccl.node.get_by_instance_uuid(
-                        nova_uuid).driver_info['ilo_address']:
-                    node_name = self.novacl.servers.find(id=nova_uuid).name
-                    break
-            if not node_name:
-                raise Exception('Cannot get nova instance for ilo address %s'
-                                % ilo_address)
-            return node_name
-        except Exception as ex:
-            LOG.error('Unsupported installer platform.')
-            raise ex
-
-    def get_address_of_node(self, server_name=None, server=None):
-        if not (server_name or server):
-            raise Exception('Either server_name or server needs to be given')
-        if server_name:
-            try:
-                for server in self.novacl.servers.list():
-                    if server.name == server_name:
-                        return server.addresses['ctlplane'][0]['addr']
-            except Exception as ex:
-                LOG.error('Unsupported installer platform.')
-                raise ex
-        if server:
-            return server.addresses['ctlplane'][0]['addr']
-
-
-def main():
-    main = TripleOManager()
-    main.start()
-
-if __name__ == '__main__':
-    main()
index d11065f..8a320ed 100755 (executable)
@@ -17,14 +17,15 @@ class NodeManager(object):
     primary_controller = None
 
     def __init__(self, config=None):
-        if config:
+        if config is not None:
             for (node_name, node_config) in config.iteritems():
                 self.add_node(node_name, node_config)
 
     def add_node(self, node_name, node_config):
         from node import Node
         if not node_config.get('address'):
-            node_config['address'] = self.get_address_of_node(node_name)
+            raise NodeManagerException("IP address missing from node_config:"
+                                       " {}".format(node_config))
         node = Node(node_name, dict=node_config)
         self.env_nodes.append(node)
         self.env_node_dict[node_name] = node
@@ -41,3 +42,8 @@ class NodeManager(object):
         if node not in cls.env_nodes:
             cls.env_nodes.append(node)
         SshUtil.gen_ssh_config(cls.env_nodes)
+
+
+class NodeManagerException(Exception):
+    def __init__(self, value):
+        self.value = value
index b5aecb3..2abb88a 100755 (executable)
@@ -226,7 +226,7 @@ def execute(cmd, **kwargs):
                     stderr=sanitized_stderr,
                     cmd=(' '.join(cmd)) if isinstance(cmd, list) else cmd)
             (stdout, stderr) = result
-            return (stdout, (stderr, _returncode))
+            return stdout, (stderr, _returncode)
         except ProcessExecutionError:
             raise
         finally:
index 39cdce5..cf46872 100755 (executable)
@@ -13,6 +13,7 @@ import argparse
 import traceback
 from utils_log import LOG, LOG_PATH
 from abc import abstractmethod
+from ssh_util import SSH_CONFIG
 
 
 class Service(object):
@@ -31,6 +32,8 @@ class Service(object):
         parser = self._create_cli_parser()
         sys_args = parser.parse_args()
         config = self.read_config(sys_args)
+        if sys_args.ssh_key_file:
+            SSH_CONFIG['ID_RSA_PATH'] = sys_args.ssh_key_file
         self.run(sys_args, config)
 
     @abstractmethod
@@ -49,6 +52,9 @@ class Service(object):
         #                     required=False)
         # parser.add_argument('--boolean', help="",
         #                     required=False, action='store_true')
+        parser.add_argument('--ssh-key-file',
+                            help="SSH private key file to use",
+                            required=False)
         return self.create_cli_parser(parser)
 
     def read_config(self, sys_args):
index 40e2aba..5f6d482 100755 (executable)
@@ -17,7 +17,7 @@ class shutil():
     classdocs
     '''
     @staticmethod
-    def mkdir_if_not_exsist(path):
+    def mkdir_if_not_exist(path):
         if not path:
             raise Exception('Path should not be empty.')
         putils.execute(["mkdir", "-p", path])
index e70aed3..635a718 100755 (executable)
@@ -33,4 +33,4 @@ class SshUtil(object):
 
     @staticmethod
     def get_id_rsa():
-        return (SSH_CONFIG['ID_RSA_PATH'])
+        return SSH_CONFIG['ID_RSA_PATH']
diff --git a/odl-pipeline/lib/utils/tripleo_helper.py b/odl-pipeline/lib/utils/tripleo_helper.py
new file mode 100644 (file)
index 0000000..702e811
--- /dev/null
@@ -0,0 +1,53 @@
+import re
+import processutils
+from processutils import execute
+from utils.node import Node
+
+
+class TripleoHelper():
+
+    @staticmethod
+    def find_overcloud_ips():
+        try:
+            res, _ = TripleoHelper.get_undercloud().execute(
+                "'source /home/stack/stackrc; nova list'",
+                shell=True)
+        except processutils.ProcessExecutionError as e:
+            raise TripleOHelperException(
+                "Error unable to issue nova list "
+                "on undercloud.  Please verify "
+                "undercloud is up.  Full error: {"
+                "}".format(e.message))
+        return re.findall('ctlplane=([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)', res)
+
+    @staticmethod
+    def get_virtual_node_name_from_mac(mac):
+        vnode_names, _ = execute('virsh list|awk \'{print '
+                                 '$2}\'', shell=True)
+        for node in vnode_names.split('\n'):
+            if 'baremetal' in node:
+                admin_net_mac, _ = execute(
+                    'virsh domiflist %s |grep admin |awk \'{print $5}\''
+                    % node, shell=True)
+                if admin_net_mac.replace('\n', '') == mac:
+                    return node
+        raise Exception('Could not find corresponding virtual node for MAC: %s'
+                        % mac)
+
+    @staticmethod
+    def get_undercloud_ip():
+        out, _ = execute('virsh domifaddr undercloud', shell=True)
+        return re.findall('([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)', out)[0]
+
+    @staticmethod
+    def get_undercloud():
+        return Node('undercloud', address=TripleoHelper.get_undercloud_ip(),
+                    user='stack')
+
+
+class TripleOHelperException(Exception):
+    def __init__(self, value):
+        self.value = value
+
+    def __str__(self):
+        return self.value
index e49434c..9d7648f 100755 (executable)
@@ -11,6 +11,7 @@ import logging
 import datetime
 import os
 import sys
+import types
 
 LOG = logging.getLogger(__name__)
 LOG_LEVEL = logging.DEBUG
@@ -35,7 +36,10 @@ def log_enter_exit(func):
                    'args': args,
                    'kwargs': kwargs})
         start = datetime.datetime.now()
-        ret = func(self, *args, **kwargs)
+        if isinstance(func, types.FunctionType):
+            ret = func(*args, **kwargs)
+        else:
+            ret = func(self, *args, **kwargs)
         end = datetime.datetime.now()
         LOG.debug(("Exiting %(cls)s.%(method)s. "
                    "Spent %(duration)s sec. "
index f9513b8..b9357f6 100755 (executable)
@@ -12,9 +12,9 @@ import yaml
 
 def write_dict_to_yaml(config, path):
     with open(path, 'w+') as f:
-        yaml.dump(config, f, default_flow_style=False)
+        yaml.safe_dump(config, f, default_flow_style=False)
 
 
 def read_dict_from_yaml(path):
     with open(path, 'r') as f:
-        return yaml.load(f)
+        return yaml.safe_load(f)