adding odl-pipeline 21/25621/7
authorNiko Hermanns <nikolas.hermanns@ericsson.com>
Wed, 30 Nov 2016 10:59:44 +0000 (11:59 +0100)
committerNikolas Hermanns <nikolas.hermanns@ericsson.com>
Tue, 13 Dec 2016 10:26:00 +0000 (11:26 +0100)
Change-Id: I1c08883f0d68a61ce9e10c5596aec1a259eed71f
Signed-off-by: Nikolas Hermanns <nikolas.hermanns@ericsson.com>
35 files changed:
.gitignore
odl-pipeline/lib/deployment_cloner.sh [new file with mode: 0755]
odl-pipeline/lib/deployment_cloner/__init__.py [new file with mode: 0755]
odl-pipeline/lib/deployment_cloner/deployment_cloner.py [new file with mode: 0755]
odl-pipeline/lib/flash-all-bridges.sh [new file with mode: 0644]
odl-pipeline/lib/odl_reinstaller.sh [new file with mode: 0755]
odl-pipeline/lib/odl_reinstaller/__init__.py [new file with mode: 0755]
odl-pipeline/lib/odl_reinstaller/install_odl.pp [new file with mode: 0755]
odl-pipeline/lib/odl_reinstaller/odl_reinstaller.py [new file with mode: 0755]
odl-pipeline/lib/setup_jenkins_networks.sh [new file with mode: 0644]
odl-pipeline/lib/test_environment.sh [new file with mode: 0755]
odl-pipeline/lib/test_environment/__init__.py [new file with mode: 0755]
odl-pipeline/lib/test_environment/test_environment.py [new file with mode: 0755]
odl-pipeline/lib/tripleo_manager.sh [new file with mode: 0755]
odl-pipeline/lib/tripleo_manager/__init__.py [new file with mode: 0755]
odl-pipeline/lib/tripleo_manager/tripleo_manager.py [new file with mode: 0755]
odl-pipeline/lib/utils/__init__.py [new file with mode: 0755]
odl-pipeline/lib/utils/node.py [new file with mode: 0755]
odl-pipeline/lib/utils/node_manager.py [new file with mode: 0755]
odl-pipeline/lib/utils/processutils.py [new file with mode: 0755]
odl-pipeline/lib/utils/service.py [new file with mode: 0755]
odl-pipeline/lib/utils/shutil.py [new file with mode: 0755]
odl-pipeline/lib/utils/ssh_client.py [new file with mode: 0755]
odl-pipeline/lib/utils/ssh_util.py [new file with mode: 0755]
odl-pipeline/lib/utils/utils_log.py [new file with mode: 0755]
odl-pipeline/lib/utils/utils_yaml.py [new file with mode: 0755]
odl-pipeline/templates/ifcfg-enp0s4 [new file with mode: 0644]
odl-pipeline/templates/ifcfg-enp0s6 [new file with mode: 0644]
odl-pipeline/templates/nets/br-admin.xml [new file with mode: 0644]
odl-pipeline/templates/nets/br-private.xml [new file with mode: 0644]
odl-pipeline/templates/nets/br-public.xml [new file with mode: 0644]
odl-pipeline/templates/nets/br-storage.xml [new file with mode: 0644]
odl-pipeline/templates/nodes/baremetalX.xml [new file with mode: 0644]
odl-pipeline/templates/nodes/jenkins_slave.xml [new file with mode: 0644]
odl-pipeline/templates/nodes/undercloud.xml [new file with mode: 0644]

index 33a0451..332a121 100644 (file)
@@ -1,5 +1,15 @@
+*.project
+*.pydevproject
+*.pyc
 *~
 .*.sw?
 /docs_build/
 /docs_output/
 /releng/
+*.tar.gz
+*.qcow2
+*.img
+odl-pipeline/build/*
+odl-pipeline/trash/*
+odl-pipeline/lib/tmp
+odl-pipeline/disks/*
diff --git a/odl-pipeline/lib/deployment_cloner.sh b/odl-pipeline/lib/deployment_cloner.sh
new file mode 100755 (executable)
index 0000000..9e6afd0
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/bash
+DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+set -e
+export PYTHONPATH=$PYTHONPATH:$DIR
+mkdir -p $DIR/tmp
+python $DIR/deployment_cloner/deployment_cloner.py $@
diff --git a/odl-pipeline/lib/deployment_cloner/__init__.py b/odl-pipeline/lib/deployment_cloner/__init__.py
new file mode 100755 (executable)
index 0000000..e69de29
diff --git a/odl-pipeline/lib/deployment_cloner/deployment_cloner.py b/odl-pipeline/lib/deployment_cloner/deployment_cloner.py
new file mode 100755 (executable)
index 0000000..4ba5ee9
--- /dev/null
@@ -0,0 +1,65 @@
+#!/bin/python
+from utils import utils_yaml
+from utils.utils_log import for_all_methods, log_enter_exit
+from utils.service import Service
+from utils.node_manager import NodeManager
+from utils.processutils import execute
+
+
+@for_all_methods(log_enter_exit)
+class DeploymentCloner(Service):
+
+    undercloud_root_dir = '~/DeploymentCloner/'
+
+    def create_cli_parser(self, parser):
+        parser.add_argument('--undercloud-ip', help="ip of undercloud",
+                            required=True)
+        parser.add_argument('--dest-dir', help="where everything should go to",
+                            required=True)
+        return parser
+
+    def undercloud_dict(self, undercloud_ip):
+        return {'address': undercloud_ip,
+                'user': 'stack'}
+
+    def run(self, sys_args, config):
+        dest_dir = sys_args.dest_dir if sys_args.dest_dir[:-1] == '/'\
+            else (sys_args.dest_dir + '/')
+        self.node_manager = NodeManager()
+        underlcloud = self.node_manager.add_node(
+            'undercloud',
+            self.undercloud_dict(sys_args.undercloud_ip))
+        # copy all files to undercloud
+        underlcloud.copy('to', '.', self.undercloud_root_dir)
+        # generate the undercloud yaml
+        underlcloud.execute('cd %s; ./tripleo_manager.sh --out ./cloner-info/'
+                            % self.undercloud_root_dir, log_true=True)
+        underlcloud.copy('from', dest_dir,
+                         self.undercloud_root_dir + '/cloner-info/')
+        node_yaml_path = dest_dir + '/cloner-info/node.yaml'
+        node_yaml = utils_yaml.read_dict_from_yaml(node_yaml_path)
+        for name, node in node_yaml['servers'].iteritems():
+            node['vNode-name'] = self.get_virtual_node_name_from_mac(
+                node['orig-ctl-mac'])
+        utils_yaml.write_dict_to_yaml(node_yaml, node_yaml_path)
+        # TODO copy qcow and tar it
+
+    def get_virtual_node_name_from_mac(self, mac):
+        vNode_names, _ = execute('virsh list|awk \'{print $2}\'', shell=True)
+        for node in vNode_names.split('\n'):
+            if 'baremetal' in node:
+                admin_net_mac, _ = execute(
+                    'virsh domiflist %s |grep admin |awk \'{print $5}\''
+                    % node, shell=True)
+                if admin_net_mac.replace('\n', '') == mac:
+                    return node
+        raise Exception('Could not find corresponding virtual node for MAC: %s'
+                        % mac)
+
+
+def main():
+    main = DeploymentCloner()
+    main.start()
+
+if __name__ == '__main__':
+    main()
diff --git a/odl-pipeline/lib/flash-all-bridges.sh b/odl-pipeline/lib/flash-all-bridges.sh
new file mode 100644 (file)
index 0000000..db9d50d
--- /dev/null
@@ -0,0 +1,5 @@
+#!/bin/bash
+export bridges="admin|private|public|storage"
+for br in $(ifconfig |grep -v br-external |grep "^br" |grep -E $bridges |awk '{print $1}');do
+  sudo ip addr flush dev $br;
+done
diff --git a/odl-pipeline/lib/odl_reinstaller.sh b/odl-pipeline/lib/odl_reinstaller.sh
new file mode 100755 (executable)
index 0000000..0c3c8c5
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/bash
+DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+set -e
+export PYTHONPATH=$PYTHONPATH:$DIR
+mkdir -p $DIR/tmp
+python $DIR/odl_reinstaller/odl_reinstaller.py $@
diff --git a/odl-pipeline/lib/odl_reinstaller/__init__.py b/odl-pipeline/lib/odl_reinstaller/__init__.py
new file mode 100755 (executable)
index 0000000..e69de29
diff --git a/odl-pipeline/lib/odl_reinstaller/install_odl.pp b/odl-pipeline/lib/odl_reinstaller/install_odl.pp
new file mode 100755 (executable)
index 0000000..97a00bd
--- /dev/null
@@ -0,0 +1,15 @@
+include ::tripleo::packages
+
+if count(hiera('ntp::servers')) > 0 {
+  include ::ntp
+}
+
+class {"opendaylight":
+  extra_features => any2array(hiera('opendaylight::extra_features', 'odl-netvirt-openstack')),
+  odl_rest_port  => hiera('opendaylight::odl_rest_port'),
+  enable_l3      => hiera('opendaylight::enable_l3', 'no'),
+  #tarball_url    =>  'file:///home/heat-admin/distribution-karaf-0.6.0-SNAPSHOT.tar.gz',
+  #unitfile_url   =>  'file:///home/heat-admin/opendaylight-unitfile.tar.gz' 
+}
+
+
diff --git a/odl-pipeline/lib/odl_reinstaller/odl_reinstaller.py b/odl-pipeline/lib/odl_reinstaller/odl_reinstaller.py
new file mode 100755 (executable)
index 0000000..190abcf
--- /dev/null
@@ -0,0 +1,86 @@
+#!/bin/python
+import os
+from utils.utils_log import LOG, for_all_methods, log_enter_exit
+from utils.service import Service
+from utils.node_manager import NodeManager
+from utils.ssh_util import SSH_CONFIG
+
+
+@for_all_methods(log_enter_exit)
+class ODLReInstaller(Service):
+
+    def run(self, sys_args, config):
+        SSH_CONFIG['ID_RSA_PATH'] = sys_args.id_rsa
+        # copy ODL to all nodes where it need to be copied
+        self.nodes = NodeManager(config['servers']).get_nodes()
+        for node in self.nodes:
+            LOG.info('Disconnecting OpenVSwitch from controller on node %s'
+                     % node.name)
+            node.execute('ovs-vsctl del-controller br-int', as_root=True)
+
+        for node in self.nodes:
+            if 'ODL' in node.config:
+                tar_tmp_path = '/tmp/odl-artifact/'
+                if node.config['ODL'].get('active'):
+                    tarball_name = os.path.basename(sys_args.odl_artifact)
+                    node.copy('to', sys_args.odl_artifact,
+                              '/tmp/odl-artifact/' + tarball_name)
+                    node.execute('rm -rf /opt/opendaylight/*', as_root=True)
+                    node.execute('mkdir -p /opt/opendaylight/*', as_root=True)
+                    LOG.info('Extracting %s to /opt/opendaylight/ on node %s'
+                             % (tarball_name, node.name))
+                    node.execute('tar -zxf %s --strip-components=1 -C '
+                                 '/opt/opendaylight/'
+                                 % (tar_tmp_path + tarball_name), as_root=True)
+                    node.execute('chown -R odl:odl /opt/opendaylight',
+                                 as_root=True)
+                    node.execute('rm -rf ' + tar_tmp_path, as_root=True)
+                    LOG.info('Installing and Starting Opendaylight on node %s'
+                             % node.name)
+                    node.copy('to', 'odl_reinstaller/install_odl.pp',
+                              tar_tmp_path)
+                    node.execute('puppet apply --modulepath='
+                                 '/etc/puppet/modules/ %sinstall_odl.pp '
+                                 '--verbose --debug --trace '
+                                 '--detailed-exitcodes'
+                                 % tar_tmp_path, check_exit_code=[2],
+                                 as_root=True)
+        # --detailed-exitcodes: Provide extra information about the run via
+        # exit codes. If enabled, 'puppet apply' will use the following exit
+        # codes:
+        # 0: The run succeeded with no changes or failures; the system was
+        #    already in the desired state.
+        # 1: The run failed.
+        # 2: The run succeeded, and some resources were changed.
+        # 4: The run succeeded, and some resources failed.
+        # 6: The run succeeded, and included both changes and failures.
+
+        for node in self.nodes:
+            LOG.info('Connecting OpenVSwitch to controller on node %s'
+                     % node.name)
+            ovs_controller = node.config.get('ovs-controller')
+            if ovs_controller:
+                node.execute('ovs-vsctl set-controller br-int %s'
+                             % ovs_controller, as_root=True)
+
+    def create_cli_parser(self, parser):
+        parser.add_argument('-c', '--config',
+                            help=("Give the path to the node config file "
+                                  "(node.yaml)"),
+                            required=True)
+        parser.add_argument('--odl-artifact',
+                            help=("Path to Opendaylight tarball"),
+                            required=True)
+        parser.add_argument('--id-rsa',
+                            help=("Path to the identity file which can "
+                                  "be used to connect to the overcloud"),
+                            required=True)
+        return parser
+
+
+def main():
+    main = ODLReInstaller()
+    main.start()
+
+if __name__ == '__main__':
+    main()
diff --git a/odl-pipeline/lib/setup_jenkins_networks.sh b/odl-pipeline/lib/setup_jenkins_networks.sh
new file mode 100644 (file)
index 0000000..d74b62e
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/bash
+set -e
+cd "$( dirname "${BASH_SOURCE[0]}" )"
+sudo ifdown enp0s4 2&>1 >> /dev/null /dev/null || true
+sudo ifdown enp0s6 2&>1 >> /dev/null /dev/null || true
+sudo cp ../templates/ifcfg-* /etc/network/interfaces.d/
+sudo ifup enp0s4
+sudo ifup enp0s6
\ No newline at end of file
diff --git a/odl-pipeline/lib/test_environment.sh b/odl-pipeline/lib/test_environment.sh
new file mode 100755 (executable)
index 0000000..56601f4
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/bash
+DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+set -e
+export PYTHONPATH=$PYTHONPATH:$DIR
+mkdir -p $DIR/tmp
+python $DIR/test_environment/test_environment.py $@
diff --git a/odl-pipeline/lib/test_environment/__init__.py b/odl-pipeline/lib/test_environment/__init__.py
new file mode 100755 (executable)
index 0000000..e69de29
diff --git a/odl-pipeline/lib/test_environment/test_environment.py b/odl-pipeline/lib/test_environment/test_environment.py
new file mode 100755 (executable)
index 0000000..481f38d
--- /dev/null
@@ -0,0 +1,131 @@
+#!/bin/python
+import os
+from utils.utils_log import LOG, for_all_methods, log_enter_exit
+from utils.service import Service
+from utils.processutils import execute
+from utils import utils_yaml
+from utils.shutil import shutil
+MAX_NODES = 5
+
+
+@for_all_methods(log_enter_exit)
+class TestEnvironment(Service):
+
+    NODE_NAME = 'baremetal'
+    TEMPLATES = '../templates'
+    BRIGES = ['admin', 'private', 'public', 'storage']
+
+    def run(self, sys_args, config):
+        self.BUILD_DIR = '../build/apex-%s' % sys_args.env_number
+        self.env = sys_args.env_number
+        self.cleanup()
+        if sys_args.cleanup:
+            return
+        if not sys_args.cloner_info or not sys_args.snapshot_disks:
+            LOG.error('--cloner-info, --snapshot-disks have to be given if not'
+                      ' only cleanup.')
+            exit(1)
+        node_info = utils_yaml.read_dict_from_yaml(sys_args.cloner_info +
+                                                   '/node.yaml')
+        nodes = node_info['servers']
+        number_of_nodes = len(nodes)
+        disk_home = self.BUILD_DIR + '/disks/'
+        shutil.mkdir_if_not_exsist(disk_home)
+        # Create Snapshots
+        for i in range(number_of_nodes):
+            disk_name = '%s%s.qcow2' % (self.NODE_NAME, i)
+            self.create_snapshot('%s/%s' % (sys_args.snapshot_disks,
+                                            disk_name),
+                                 '%s/%s' % (disk_home, disk_name))
+
+        # Create Bridges if not existing
+        for net in self.BRIGES:
+            bridge_name = '%s-%s' % (net, self.env)
+            if not self.check_if_br_exists(bridge_name):
+                LOG.info('Creating bridge %s' % bridge_name)
+                execute('ovs-vsctl add-br %s' % bridge_name, as_root=True)
+
+        # Create virtual Nodes
+        dom_template = self.TEMPLATES + '/nodes/baremetalX.xml'
+        dom_config = self.BUILD_DIR + '/nodes/baremetalX.xml'
+        shutil.mkdir_if_not_exsist(self.BUILD_DIR + '/nodes/')
+        LOG.info('Creating virtual Nodes')
+        for name, node in nodes.iteritems():
+            orig_node_name = node['vNode-name']
+            node_name = orig_node_name + '-' + self.env
+            LOG.info('Create node %s' % node_name)
+            type = node['type']
+            if type == 'compute':
+                cpu = 4
+                mem = 4
+            elif type == 'controller':
+                cpu = 8
+                mem = 10
+            else:
+                raise Exception('Unknown node type! %s' % type)
+            shutil.copy('to', dom_template, dom_config)
+            shutil.replace_string_in_file(dom_config, 'NaMe', node_name)
+            disk_full_path = os.path.abspath('%s/%s.qcow2' % (disk_home,
+                                                              orig_node_name))
+            shutil.replace_string_in_file(dom_config, 'DiSk', disk_full_path)
+            shutil.replace_string_in_file(dom_config, 'vCpU', str(cpu))
+            shutil.replace_string_in_file(dom_config, 'MeMoRy', str(mem))
+            shutil.replace_string_in_file(dom_config, 'InDeX', self.env)
+
+            execute('virsh define ' + dom_config)
+            execute('virsh start ' + node_name)
+
+            cores_per_environment = 8
+            cores = '%s-%s' % (int(self.env) * 8, int(self.env) * 8 +
+                               cores_per_environment - 1)
+            LOG.info('Pining vCPU of node %s to cores %s' % (node_name, cores))
+            for i in range(cpu):
+                execute('virsh vcpupin %(node)s %(nodes_cpu)s %(host_cpu)s' %
+                        {'node': node_name,
+                         'nodes_cpu': i,
+                         'host_cpu': cores})
+
+    def check_if_br_exists(self, bridge):
+        _, (_, rc) = execute('ovs-vsctl br-exists %s' % bridge,
+                             check_exit_code=[0, 2], as_root=True)
+        return True if rc == 0 else False
+
+    def create_snapshot(self, orig, path):
+        LOG.info('Creating snapshot of %s in %s' % (orig, path))
+        execute('qemu-img create -f qcow2 -b %s %s' % (orig, path),
+                as_root=True)
+
+    def cleanup(self):
+        for i in range(MAX_NODES):
+            rv, (_, rc) = execute('virsh destroy %(name)s%(i)s-%(env)s'
+                                  % {'i': i, 'env': self.env,
+                                     'name': self.NODE_NAME},
+                                  check_exit_code=[0, 1])
+            if rc == 0:
+                LOG.info(rv)
+            rv, (_, rc) = execute('virsh undefine %(name)s%(i)s-%(env)s'
+                                  % {'i': i, 'env': self.env,
+                                     'name': self.NODE_NAME},
+                                  check_exit_code=[0, 1])
+            if rc == 0:
+                LOG.info(rv)
+        execute('rm -rf ' + self.BUILD_DIR)
+
+    def create_cli_parser(self, parser):
+        parser.add_argument('--env-number', help="Number of the environment",
+                            required=True)
+        parser.add_argument('--cloner-info', help="Path to the cloner-info",
+                            required=False)
+        parser.add_argument('--snapshot-disks', help="Path to the snapshots",
+                            required=False)
+        parser.add_argument('--cleanup', help="Only Cleanup",
+                            required=False, action='store_true')
+        return parser
+
+
+def main():
+    main = TestEnvironment()
+    main.start()
+
+if __name__ == '__main__':
+    main()
diff --git a/odl-pipeline/lib/tripleo_manager.sh b/odl-pipeline/lib/tripleo_manager.sh
new file mode 100755 (executable)
index 0000000..f4f10cf
--- /dev/null
@@ -0,0 +1,9 @@
+#!/bin/bash
+DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+if [ -e ~/stackrc ];then
+  . ~/stackrc
+fi
+set -e
+export PYTHONPATH=$PYTHONPATH:$DIR
+mkdir -p $DIR/tmp
+python $DIR/tripleo_manager/tripleo_manager.py $@
diff --git a/odl-pipeline/lib/tripleo_manager/__init__.py b/odl-pipeline/lib/tripleo_manager/__init__.py
new file mode 100755 (executable)
index 0000000..e69de29
diff --git a/odl-pipeline/lib/tripleo_manager/tripleo_manager.py b/odl-pipeline/lib/tripleo_manager/tripleo_manager.py
new file mode 100755 (executable)
index 0000000..456564c
--- /dev/null
@@ -0,0 +1,179 @@
+import os
+import yaml
+from novaclient.client import Client as nova
+from novaclient import api_versions
+import ironicclient.client
+from neutronclient.v2_0.client import Client as neutron
+from keystoneauth1 import identity
+from keystoneauth1 import session
+
+from utils.utils_log import log_enter_exit, for_all_methods, LOG
+from utils.service import Service
+from utils.shutil import shutil
+from utils.node_manager import NodeManager
+
+
+@for_all_methods(log_enter_exit)
+class TripleOManager(Service):
+
+    def __init__(self):
+        self.auth = None
+        self.session = None
+        self.novacl = self._get_nova_client()
+        self.ironiccl = self._get_ironic_client()
+        self.neutroncl = self._get_neutron_client()
+
+    def create_cli_parser(self, parser):
+        parser.add_argument('--out', help="where env_info should go to",
+                            required=True)
+        return parser
+
+    def run(self, sys_args, config):
+        self.gen_node_info()
+        self.prepare_for_ci_pipeline()
+        self.gen_env_info(sys_args, config)
+        self.gen_virtual_deployment_info(sys_args, config)
+
+    def gen_virtual_deployment_info(self, sys_args, config):
+        pass
+
+    def prepare_for_ci_pipeline(self):
+        node_manager = NodeManager(config=self.node_info['servers'])
+        for node in node_manager.get_nodes():
+
+            # Check is ODL runs on this node
+            self.node_info['servers'][node.name]['ODL'] = {}
+            rv, _ = node.execute('ps aux |grep -v grep |grep karaf',
+                                 as_root=True, check_exit_code=[0, 1])
+            if 'java' in rv:
+                self.node_info['servers'][node.name]['ODL']['active'] = True
+
+            if (node.is_dir('/opt/opendaylight') or
+                    node.is_file('/opt/opendaylight-was-there')):
+                self.node_info['servers'][node.name]['ODL']['dir_exsist'] = \
+                    True
+                # Remove existing ODL version
+                node.execute('touch /opt/opendaylight-was-there', as_root=True)
+                node.execute('rm -rf /opt/opendaylight', as_root=True)
+
+            # Store ovs controller info
+            rv, _ = node.execute('ovs-vsctl get-controller br-int',
+                                 as_root=True)
+            self.node_info['servers'][node.name]['ovs-controller'] = \
+                rv.replace('\n', '')
+
+            # Disconnect ovs
+            node.execute('ovs-vsctl del-controller br-int', as_root=True)
+
+    def gen_env_info(self, sys_args, config):
+        shutil.mkdir_if_not_exsist(sys_args.out)
+        self.write_out_yaml_config(self.node_info, sys_args.out + '/node.yaml')
+
+        # copy ssh key
+        shutil.copy('to', '/home/stack/.ssh/id_rsa',
+                    sys_args.out + '/undercloud_ssh/')
+        shutil.copy('to', '/home/stack/.ssh/id_rsa.pub',
+                    sys_args.out + '/undercloud_ssh/')
+        # copy rc files
+        shutil.copy('to', '/home/stack/stackrc', sys_args.out)
+        shutil.copy('to', '/home/stack/overcloudrc', sys_args.out)
+
+    def gen_node_info(self):
+        for network in self.neutroncl.list_networks()['networks']:
+            if network['name'] == 'ctlplane':
+                ctlplane_id = network['id']
+        if hasattr(self, 'node_info') and self.node_info:
+            return self.node_info
+        self.node_info = {'servers': {}}
+        for server in self.novacl.servers.list():
+            if 'overcloud-controller' in server.name:
+                type = 'controller'
+            elif 'overcloud-novacompute' in server.name:
+                type = 'compute'
+            else:
+                raise Exception('Unknown type (controller/compute) %s '
+                                % server.name)
+            ctlplane_mac = None
+            for interface in server.interface_list():
+                if interface.net_id == ctlplane_id:
+                    ctlplane_mac = interface.mac_addr
+            if not ctlplane_mac:
+                raise Exception('Could not find mac address for ctl-plane for '
+                                'server %s' % server.name)
+            self.node_info['servers'][server.name] = {
+                'address': self.get_address_of_node(server=server),
+                'user': 'heat-admin',
+                'type': type,
+                'orig-ctl-mac': ctlplane_mac}
+
+    def write_out_yaml_config(self, config, path):
+        with open(path, 'w') as f:
+            yaml.dump(config, f, default_flow_style=False)
+
+    def _check_credentials(self):
+        for cred in ['OS_USERNAME', 'OS_PASSWORD',
+                     'OS_TENANT_NAME', 'OS_AUTH_URL']:
+            if not os.environ.get(cred):
+                raise Exception('Use export %s=...' % cred)
+
+    def create_auth(self):
+        self._check_credentials()
+        if not self.auth:
+            self.auth = identity.Password(
+                auth_url=os.environ['OS_AUTH_URL'],
+                username=os.environ['OS_USERNAME'],
+                password=os.environ['OS_PASSWORD'],
+                project_name=os.environ['OS_TENANT_NAME'])
+        if not self.session:
+            self.session = session.Session(auth=self.auth)
+
+    def _get_nova_client(self):
+        self.create_auth()
+        return nova(api_versions.APIVersion("2.0"), session=self.session)
+
+    def _get_ironic_client(self):
+        self.create_auth()
+        return ironicclient.client.get_client(1, session=self.session)
+
+    def _get_neutron_client(self):
+        self.create_auth()
+        return neutron(session=self.session)
+
+    def get_node_name_by_ilo_address(self, ilo_address):
+        try:
+            node_name = None
+            for node in self.ironiccl.node.list():
+                nova_uuid = node.instance_uuid
+                if ilo_address == self.ironiccl.node.get_by_instance_uuid(
+                        nova_uuid).driver_info['ilo_address']:
+                    node_name = self.novacl.servers.find(id=nova_uuid).name
+                    break
+            if not node_name:
+                raise Exception('Cannot get nova instance for ilo address %s'
+                                % ilo_address)
+            return node_name
+        except Exception as ex:
+            LOG.error('Unsupported installer platform.')
+            raise ex
+
+    def get_address_of_node(self, server_name=None, server=None):
+        if not (server_name or server):
+            raise Exception('Either server_name or server needs to be given')
+        if server_name:
+            try:
+                for server in self.novacl.servers.list():
+                    if server.name == server_name:
+                        return server.addresses['ctlplane'][0]['addr']
+            except Exception as ex:
+                LOG.error('Unsupported installer platform.')
+                raise ex
+        if server:
+            return server.addresses['ctlplane'][0]['addr']
+
+
+def main():
+    main = TripleOManager()
+    main.start()
+
+if __name__ == '__main__':
+    main()
diff --git a/odl-pipeline/lib/utils/__init__.py b/odl-pipeline/lib/utils/__init__.py
new file mode 100755 (executable)
index 0000000..e69de29
diff --git a/odl-pipeline/lib/utils/node.py b/odl-pipeline/lib/utils/node.py
new file mode 100755 (executable)
index 0000000..c3c2005
--- /dev/null
@@ -0,0 +1,87 @@
+#
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+#
+from ssh_client import SSHClient
+from ssh_util import SshUtil
+from utils_log import log_enter_exit, for_all_methods
+
+
+@for_all_methods(log_enter_exit)
+class Node(object):
+
+    def __init__(self, name, address=None, port=None,
+                 user=None, password=None, jump=None, dict=None):
+        self.name = name
+        self.address = address
+        self.jump = jump
+        self.user = user
+        self.port = port
+        self.password = password
+        if dict:
+            self.read_from_dic(dict)
+        self.sshc = SSHClient(self)
+        self.has_access = False
+        self.config = dict
+
+    def read_from_dic(self, dic):
+        allowed_keys = ['address', 'user', 'jump', 'password', 'port']
+        for (key, value) in dic.iteritems():
+            if key in allowed_keys:
+                setattr(self, key, value)
+
+    def ping(self, ip):
+        self.execute(['ping', '-c', '1', ip])
+
+    def execute(self, cmd, **kwargs):
+        return self.sshc.execute(cmd, **kwargs)
+
+    def chown(self, user, path):
+        self.execute('chown -R %(user)s:%(user)s %(path)s' % {'user': user,
+                                                              'path': path},
+                     as_root=True)
+
+    def is_dir(self, path):
+        rv, _ = self.execute('test -d %s && echo yes' % path,
+                             check_exit_code=[0, 1])
+        if rv == 'yes\n':
+            return True
+        else:
+            return False
+
+    def is_file(self, path):
+        rv, _ = self.execute('test -f %s && echo yes' % path,
+                             check_exit_code=[0, 1])
+        if rv == 'yes\n':
+            return True
+        else:
+            return False
+
+    def reboot(self):
+        self.execute('reboot', as_root=True, check_exit_code=[255])
+
+    def create_path_if_not_exsist(self, path, **kwargs):
+        return self.sshc.execute('mkdir -p %s' % path, **kwargs)
+
+    def copy(self, direction, local_path, remote_path, **kwargs):
+        return self.sshc.copy(direction, local_path, remote_path, **kwargs)
+
+    def to_ssh_config(self):
+        config = ["Host %s" % self.name,
+                  "    Hostname %s" %
+                  (self.address if self.address else self.name)]
+        if self.jump:
+            config.append("    ProxyCommand ssh -F %(config_path)s "
+                          "-W %%h:%%p %(name)s"
+                          % {'config_path': SshUtil.get_config_file_path(),
+                             'name': self.jump.name})
+        if self.user:
+            config.append("    user %s" % self.user)
+        if self.port:
+            config.append("    port %s" % self.port)
+        return '\n'.join(config)
diff --git a/odl-pipeline/lib/utils/node_manager.py b/odl-pipeline/lib/utils/node_manager.py
new file mode 100755 (executable)
index 0000000..d11065f
--- /dev/null
@@ -0,0 +1,43 @@
+#
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+#
+from ssh_util import SshUtil
+
+
+class NodeManager(object):
+
+    env_nodes = []
+    env_node_dict = {}
+    primary_controller = None
+
+    def __init__(self, config=None):
+        if config:
+            for (node_name, node_config) in config.iteritems():
+                self.add_node(node_name, node_config)
+
+    def add_node(self, node_name, node_config):
+        from node import Node
+        if not node_config.get('address'):
+            node_config['address'] = self.get_address_of_node(node_name)
+        node = Node(node_name, dict=node_config)
+        self.env_nodes.append(node)
+        self.env_node_dict[node_name] = node
+        return node
+
+    def get_nodes(self):
+        return self.env_nodes
+
+    def get_node(self, name):
+        return self.env_node_dict[name]
+
+    @classmethod
+    def gen_ssh_config(cls, node):
+        if node not in cls.env_nodes:
+            cls.env_nodes.append(node)
+        SshUtil.gen_ssh_config(cls.env_nodes)
diff --git a/odl-pipeline/lib/utils/processutils.py b/odl-pipeline/lib/utils/processutils.py
new file mode 100755 (executable)
index 0000000..b5aecb3
--- /dev/null
@@ -0,0 +1,233 @@
+#
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+#
+import utils_log as log
+import os
+import six
+import re
+import signal
+import subprocess
+from time import sleep
+from threading import Thread
+try:
+    from Queue import Queue
+except ImportError:
+    from queue import Queue  # python 3.x
+
+LOG = log.LOG
+LOG_LEVEL = log.LOG_LEVEL
+
+
+def _subprocess_setup():
+    # Python installs a SIGPIPE handler by default. This is usually not what
+    # non-Python subprocesses expect.
+    signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+# NOTE(flaper87): The following globals are used by `mask_password`
+_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
+
+# NOTE(ldbragst): Let's build a list of regex objects using the list of
+# _SANITIZE_KEYS we already have. This way, we only have to add the new key
+# to the list of _SANITIZE_KEYS and we can generate regular expressions
+# for XML and JSON automatically.
+_SANITIZE_PATTERNS_2 = []
+_SANITIZE_PATTERNS_1 = []
+
+
+def mask_password(message, secret="***"):
+    """Replace password with 'secret' in message.
+
+    :param message: The string which includes security information.
+    :param secret: value with which to replace passwords.
+    :returns: The unicode value of message with the password fields masked.
+
+    For example:
+
+    >>> mask_password("'adminPass' : 'aaaaa'")
+    "'adminPass' : '***'"
+    >>> mask_password("'admin_pass' : 'aaaaa'")
+    "'admin_pass' : '***'"
+    >>> mask_password('"password" : "aaaaa"')
+    '"password" : "***"'
+    >>> mask_password("'original_password' : 'aaaaa'")
+    "'original_password' : '***'"
+    >>> mask_password("u'original_password' :   u'aaaaa'")
+    "u'original_password' :   u'***'"
+    """
+    try:
+        message = six.text_type(message)
+    except UnicodeDecodeError:
+        # NOTE(jecarey): Temporary fix to handle cases where message is a
+        # byte string.   A better solution will be provided in Kilo.
+        pass
+
+    # NOTE(ldbragst): Check to see if anything in message contains any key
+    # specified in _SANITIZE_KEYS, if not then just return the message since
+    # we don't have to mask any passwords.
+    if not any(key in message for key in _SANITIZE_KEYS):
+        return message
+
+    substitute = r'\g<1>' + secret + r'\g<2>'
+    for pattern in _SANITIZE_PATTERNS_2:
+        message = re.sub(pattern, substitute, message)
+
+    substitute = r'\g<1>' + secret
+    for pattern in _SANITIZE_PATTERNS_1:
+        message = re.sub(pattern, substitute, message)
+
+    return message
+
+
+class ProcessExecutionError(Exception):
+    def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
+                 description=None):
+        self.exit_code = exit_code
+        self.stderr = stderr
+        self.stdout = stdout
+        self.cmd = cmd
+        self.description = description
+
+        if description is None:
+            description = "Unexpected error while running command."
+        if exit_code is None:
+            exit_code = '-'
+        message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r"
+                   % (description, cmd, exit_code, stdout, stderr))
+        super(ProcessExecutionError, self).__init__(message)
+
+
+def enqueue_output(out, queue):
+    for line in iter(out.readline, b''):
+        queue.put(line)
+    queue.put("##Finished##")
+    out.close()
+
+
+def execute(cmd, **kwargs):
+    """Helper method to shell out and execute a command through subprocess.
+
+    Allows optional retry.
+
+    :param cmd:             Passed to subprocess.Popen.
+    :type cmd:              list - will be converted if needed
+    :param process_input:   Send to opened process.
+    :type proces_input:     string
+    :param check_exit_code: Single bool, int, or list of allowed exit
+                            codes.  Defaults to [0].  Raise
+                            :class:`ProcessExecutionError` unless
+                            program exits with one of these code.
+    :type check_exit_code:  boolean, int, or [int]
+    :param delay_on_retry:  True | False. Defaults to True. If set to True,
+                            wait a short amount of time before retrying.
+    :type delay_on_retry:   boolean
+    :param attempts:        How many times to retry cmd.
+    :type attempts:         int
+    :param run_as_root:     True | False. Defaults to False. If set to True,
+        or as_root          the command is prefixed by the command specified
+                            in the root_helper kwarg.
+                            execute this command. Defaults to false.
+    :param shell:           whether or not there should be a shell used to
+    :type shell:            boolean
+    :param loglevel:        log level for execute commands.
+    :type loglevel:         int.  (Should be logging.DEBUG or logging.INFO)
+    :param non_blocking     Execute in background.
+    :type non_blockig:      boolean
+    :returns:               (stdout, (stderr, returncode)) from process
+                            execution
+    :raises:                :class:`UnknownArgumentError` on
+                            receiving unknown arguments
+    :raises:                :class:`ProcessExecutionError`
+    """
+    process_input = kwargs.pop('process_input', None)
+    check_exit_code = kwargs.pop('check_exit_code', [0])
+    ignore_exit_code = False
+    attempts = kwargs.pop('attempts', 1)
+    run_as_root = kwargs.pop('run_as_root', False) or kwargs.pop('as_root',
+                                                                 False)
+    shell = kwargs.pop('shell', False)
+    loglevel = kwargs.pop('loglevel', LOG_LEVEL)
+    non_blocking = kwargs.pop('non_blocking', False)
+
+    if not isinstance(cmd, list):
+        cmd = cmd.split(' ')
+
+    if run_as_root:
+        cmd = ['sudo'] + cmd
+    if shell:
+        cmd = ' '.join(cmd)
+    if isinstance(check_exit_code, bool):
+        ignore_exit_code = not check_exit_code
+        check_exit_code = [0]
+    elif isinstance(check_exit_code, int):
+        check_exit_code = [check_exit_code]
+
+    if kwargs:
+        raise Exception(('Got unknown keyword args '
+                         'to utils.execute: %r') % kwargs)
+
+    while attempts > 0:
+        attempts -= 1
+        try:
+            LOG.log(loglevel, ('Running cmd (subprocess): %s'), cmd)
+            _PIPE = subprocess.PIPE  # pylint: disable=E1101
+
+            if os.name == 'nt':
+                preexec_fn = None
+                close_fds = False
+            else:
+                preexec_fn = _subprocess_setup
+                close_fds = True
+
+            obj = subprocess.Popen(cmd,
+                                   stdin=_PIPE,
+                                   stdout=_PIPE,
+                                   stderr=_PIPE,
+                                   close_fds=close_fds,
+                                   preexec_fn=preexec_fn,
+                                   shell=shell)
+            result = None
+            if process_input is not None:
+                result = obj.communicate(process_input)
+            else:
+                if non_blocking:
+                    queue = Queue()
+                    thread = Thread(target=enqueue_output, args=(obj.stdout,
+                                                                 queue))
+                    thread.deamon = True
+                    thread.start()
+                    # If you want to read this output later:
+                    # try:
+                    #     from Queue import Queue, Empty
+                    # except ImportError:
+                    #     from queue import Queue, Empty  # python 3.x
+                    # try:  line = q.get_nowait() # or q.get(timeout=.1)
+                    # except Empty:
+                    #     print('no output yet')
+                    # else: # got line
+                    # ... do something with line
+                    return queue
+                result = obj.communicate()
+            obj.stdin.close()  # pylint: disable=E1101
+            _returncode = obj.returncode  # pylint: disable=E1101
+            LOG.log(loglevel, ('Result was %s') % _returncode)
+            if not ignore_exit_code and _returncode not in check_exit_code:
+                (stdout, stderr) = result
+                sanitized_stdout = mask_password(stdout)
+                sanitized_stderr = mask_password(stderr)
+                raise ProcessExecutionError(
+                    exit_code=_returncode,
+                    stdout=sanitized_stdout,
+                    stderr=sanitized_stderr,
+                    cmd=(' '.join(cmd)) if isinstance(cmd, list) else cmd)
+            (stdout, stderr) = result
+            return (stdout, (stderr, _returncode))
+        except ProcessExecutionError:
+            raise
+        finally:
+            sleep(0)
diff --git a/odl-pipeline/lib/utils/service.py b/odl-pipeline/lib/utils/service.py
new file mode 100755 (executable)
index 0000000..39cdce5
--- /dev/null
@@ -0,0 +1,67 @@
+#
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+#
+import sys
+import yaml
+import argparse
+import traceback
+from utils_log import LOG, LOG_PATH
+from abc import abstractmethod
+
+
+class Service(object):
+
+    def start(self):
+        try:
+            self._run()
+        except Exception as ex:
+            LOG.error(ex.message)
+            LOG.error(traceback.format_exc())
+            LOG.error("For more logs check: %(log_path)s"
+                      % {'log_path': LOG_PATH})
+            sys.exit(1)
+
+    def _run(self):
+        parser = self._create_cli_parser()
+        sys_args = parser.parse_args()
+        config = self.read_config(sys_args)
+        self.run(sys_args, config)
+
+    @abstractmethod
+    def run(self, sys_args, config):
+        # Do something
+        return
+
+    @abstractmethod
+    def create_cli_parser(self, parser):
+        # Read in own sys args
+        return parser
+
+    def _create_cli_parser(self):
+        parser = argparse.ArgumentParser(description='OVS Debugger')
+        # parser.add_argument('-c', '--config', help="Path to config.yaml",
+        #                     required=False)
+        # parser.add_argument('--boolean', help="",
+        #                     required=False, action='store_true')
+        return self.create_cli_parser(parser)
+
+    def read_config(self, sys_args):
+        if not hasattr(sys_args, 'config'):
+            return None
+        if not sys_args.config:
+            config_path = './etc/config.yaml'
+        else:
+            config_path = sys_args.config
+        try:
+            with open(config_path) as f:
+                return yaml.load(f)
+        except yaml.scanner.ScannerError as ex:
+            LOG.error("Yaml file corrupt. Try putting spaces after the "
+                      "colons.")
+            raise ex
diff --git a/odl-pipeline/lib/utils/shutil.py b/odl-pipeline/lib/utils/shutil.py
new file mode 100755 (executable)
index 0000000..40e2aba
--- /dev/null
@@ -0,0 +1,66 @@
+#
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+#
+import os
+import glob
+import processutils as putils
+
+
+class shutil():
+    '''
+    classdocs
+    '''
+    @staticmethod
+    def mkdir_if_not_exsist(path):
+        if not path:
+            raise Exception('Path should not be empty.')
+        putils.execute(["mkdir", "-p", path])
+
+    @staticmethod
+    def copy(direction, src, dst, **kwargs):
+        if direction == 'from':
+            dst_tmp = dst
+            dst = src
+            src = dst_tmp
+        if src[-1:] == '*':
+            files = glob.glob(src)
+            for file in files:
+                shutil._copy(file, dst, **kwargs)
+        else:
+            shutil._copy(src, dst, **kwargs)
+
+    @staticmethod
+    def _copy(src, dst, **kwargs):
+        if os.path.isfile(src):
+            if dst[-1:] == '/':
+                shutil.mkdir_if_not_exsist(dst)
+            putils.execute(['cp', src, dst], **kwargs)
+        else:
+            putils.execute(['cp', '-R', src, dst], **kwargs)
+
+    @staticmethod
+    def rm(path, **kwargs):
+        putils.execute(['rm', '-rf', path], **kwargs)
+
+    @staticmethod
+    def mv(src, dst):
+        putils.execute(["mv", src, dst])
+
+    @staticmethod
+    def get_all_files_in_path(path):
+        if os.path.exists(path):
+            return putils.execute(['l', path])
+
+    @staticmethod
+    def replace_string_in_file(file, str, replace):
+        with open(file, 'r') as f:
+            string = f.read()
+        string = string.replace(str, replace)
+        with open(file, 'w+') as f:
+            f.write(string)
diff --git a/odl-pipeline/lib/utils/ssh_client.py b/odl-pipeline/lib/utils/ssh_client.py
new file mode 100755 (executable)
index 0000000..464a74e
--- /dev/null
@@ -0,0 +1,88 @@
+#
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+#
+from processutils import execute
+from ssh_util import SshUtil
+from node_manager import NodeManager
+import os
+from utils_log import LOG
+import glob
+
+
+class SSHClient(object):
+
+    def __init__(self, node):
+        self.node = node
+
+    def execute(self, cmd, **kwargs):
+        if 'log_true' in kwargs:
+            if kwargs['log_true']:
+                LOG.info('Node: %s Executing: %s' % (self.node.name, cmd))
+            kwargs.pop('log_true')
+        NodeManager.gen_ssh_config(self.node)
+        if not isinstance(cmd, str):
+            cmd = ' '.join(cmd)
+        cmd_addition = ['ssh', '-i', SshUtil.get_id_rsa(), '-F',
+                        SshUtil.get_config_file_path(),
+                        self.node.name]
+        if self.node.password:
+            cmd_addition = ['sshpass', '-p', self.node.password] + cmd_addition
+        if 'as_root' in kwargs:
+            kwargs.pop('as_root')
+            cmd = 'sudo ' + cmd
+        cmd_addition.append(cmd)
+        return execute(cmd_addition, **kwargs)
+
+    def copy(self, direction, local_path, remote_path, **kwargs):
+        all_files = None
+        if direction is 'to':
+            msg = ('Copying file %s to %s:%s' % (local_path, self.node.name,
+                                                 remote_path))
+            if self.node.is_dir(remote_path):
+                pass
+            elif remote_path[-1:] == '/':
+                self.node.create_path_if_not_exsist(remote_path)
+            else:
+                # Remove the file
+                self.execute('rm -f %s' % remote_path, as_root=True)
+                self.node.create_path_if_not_exsist(
+                    os.path.dirname(remote_path))
+            if '*' in local_path:
+                all_files = glob.glob(local_path)
+        else:
+            if local_path[-1:] == '/':
+                execute('mkdir -p %s' % local_path)
+            msg = ('Copying file from %s:%s to %s' % (self.node.name,
+                                                      remote_path,
+                                                      local_path))
+        LOG.info(msg)
+        if all_files:
+            for one_file in all_files:
+                return self._copy(direction, one_file, remote_path, **kwargs)
+        else:
+            return self._copy(direction, local_path, remote_path, **kwargs)
+
+    def _copy(self, direction, local_path, remote_path, **kwargs):
+        # TODO create dir is not existing
+        NodeManager.gen_ssh_config(self.node)
+        cmd = ['scp', '-i', SshUtil.get_id_rsa(), '-F',
+               SshUtil.get_config_file_path()]
+        if direction == 'to':
+            if os.path.isdir(local_path):
+                cmd.append('-r')
+            cmd = cmd + [local_path,
+                         ('%s:%s') % (self.node.name, remote_path)]
+        if direction == 'from':
+            if self.node.is_dir(remote_path):
+                cmd.append('-r')
+            cmd = cmd + [('%s:%s') % (self.node.name, remote_path),
+                         local_path]
+        if self.node.password:
+            cmd = ['sshpass', '-p', self.node.password] + cmd
+        return execute(cmd, **kwargs)
diff --git a/odl-pipeline/lib/utils/ssh_util.py b/odl-pipeline/lib/utils/ssh_util.py
new file mode 100755 (executable)
index 0000000..e70aed3
--- /dev/null
@@ -0,0 +1,36 @@
+#
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+#
+import os
+home = os.getenv("HOME")
+SSH_CONFIG = {'TMP_SSH_CONFIG': "./tmp/ssh_config",
+              'ID_RSA_PATH': "%s/.ssh/id_rsa" % home}
+
+
+class SshUtil(object):
+
+    @staticmethod
+    def gen_ssh_config(node_list):
+        config = ["UserKnownHostsFile=/dev/null",
+                  "StrictHostKeyChecking=no",
+                  "ForwardAgent yes",
+                  "GSSAPIAuthentication=no",
+                  "LogLevel ERROR"]
+        for node in node_list:
+            config.append(node.to_ssh_config())
+        with open(SSH_CONFIG['TMP_SSH_CONFIG'], 'w') as f:
+            f.write('\n'.join(config))
+
+    @staticmethod
+    def get_config_file_path():
+        return SSH_CONFIG['TMP_SSH_CONFIG']
+
+    @staticmethod
+    def get_id_rsa():
+        return (SSH_CONFIG['ID_RSA_PATH'])
diff --git a/odl-pipeline/lib/utils/utils_log.py b/odl-pipeline/lib/utils/utils_log.py
new file mode 100755 (executable)
index 0000000..e49434c
--- /dev/null
@@ -0,0 +1,67 @@
+#
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+#
+import logging
+import datetime
+import os
+import sys
+
+LOG = logging.getLogger(__name__)
+LOG_LEVEL = logging.DEBUG
+LOG_PATH = "./tmp/%s.log" % os.path.basename(sys.argv[0])
+logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
+                    filename=LOG_PATH, level=LOG_LEVEL)
+#                    datefmt='%Y-%m-%dT:%H:%M:%s', level=LOG_LEVEL)
+console = logging.StreamHandler()
+console.setLevel(logging.INFO)
+formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
+console.setFormatter(formatter)
+LOG.addHandler(console)
+
+
+def log_enter_exit(func):
+
+    def inner(self, *args, **kwargs):
+        LOG.debug(("Entering %(cls)s.%(method)s "
+                   "args: %(args)s, kwargs: %(kwargs)s") %
+                  {'cls': self.__class__.__name__,
+                   'method': func.__name__,
+                   'args': args,
+                   'kwargs': kwargs})
+        start = datetime.datetime.now()
+        ret = func(self, *args, **kwargs)
+        end = datetime.datetime.now()
+        LOG.debug(("Exiting %(cls)s.%(method)s. "
+                   "Spent %(duration)s sec. "
+                   "Return %(return)s") %
+                  {'cls': self.__class__.__name__,
+                   'duration': end - start,
+                   'method': func.__name__,
+                   'return': ret})
+        return ret
+    return inner
+
+
+def for_all_methods(decorator):
+    # @for_all_methods(log_enter_exit)
+    # class ...
+
+    def decorate(cls):
+        for attr in cls.__dict__:
+            if callable(getattr(cls, attr)):
+                setattr(cls, attr, decorator(getattr(cls, attr)))
+        return cls
+    return decorate
+
+
+def dict_to_nice_string(dict):
+    return_string = []
+    for key, value in dict.iteritems():
+        return_string.append('%s: %s' % (key, value))
+    return ', '.join(return_string)
diff --git a/odl-pipeline/lib/utils/utils_yaml.py b/odl-pipeline/lib/utils/utils_yaml.py
new file mode 100755 (executable)
index 0000000..f9513b8
--- /dev/null
@@ -0,0 +1,20 @@
+#
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+#
+import yaml
+
+
+def write_dict_to_yaml(config, path):
+    with open(path, 'w+') as f:
+        yaml.dump(config, f, default_flow_style=False)
+
+
+def read_dict_from_yaml(path):
+    with open(path, 'r') as f:
+        return yaml.load(f)
diff --git a/odl-pipeline/templates/ifcfg-enp0s4 b/odl-pipeline/templates/ifcfg-enp0s4
new file mode 100644 (file)
index 0000000..d7751dd
--- /dev/null
@@ -0,0 +1,5 @@
+# br-admin
+auto enp0s4
+iface enp0s4 inet static
+  address 192.0.2.199
+  netmask 255.255.255.0
\ No newline at end of file
diff --git a/odl-pipeline/templates/ifcfg-enp0s6 b/odl-pipeline/templates/ifcfg-enp0s6
new file mode 100644 (file)
index 0000000..e25dc4e
--- /dev/null
@@ -0,0 +1,5 @@
+# br-public
+auto enp0s6
+iface enp0s6 inet static
+  address 192.168.37.199
+  netmask 255.255.255.0
\ No newline at end of file
diff --git a/odl-pipeline/templates/nets/br-admin.xml b/odl-pipeline/templates/nets/br-admin.xml
new file mode 100644 (file)
index 0000000..9508c46
--- /dev/null
@@ -0,0 +1,7 @@
+<network ipv6='yes'>
+  <name>br-admin-NaMe</name>
+  <forward mode='bridge'/>
+  <bridge name='br-admin-NaMe'/>
+  
+</network>
+
diff --git a/odl-pipeline/templates/nets/br-private.xml b/odl-pipeline/templates/nets/br-private.xml
new file mode 100644 (file)
index 0000000..0fe3ffa
--- /dev/null
@@ -0,0 +1,6 @@
+<network ipv6='yes'>
+  <name>br-private-NaMe</name>
+  <forward mode='bridge'/>
+  <bridge name='br-private-NaMe'/>
+</network>
+
diff --git a/odl-pipeline/templates/nets/br-public.xml b/odl-pipeline/templates/nets/br-public.xml
new file mode 100644 (file)
index 0000000..66b08f1
--- /dev/null
@@ -0,0 +1,6 @@
+<network ipv6='yes'>
+  <name>br-public-NaMe</name>
+  <forward mode='bridge'/>
+  <bridge name='br-public-NaMe'/>
+</network>
+
diff --git a/odl-pipeline/templates/nets/br-storage.xml b/odl-pipeline/templates/nets/br-storage.xml
new file mode 100644 (file)
index 0000000..ce5edbd
--- /dev/null
@@ -0,0 +1,6 @@
+<network ipv6='yes'>
+  <name>br-storage-NaMe</name>
+  <forward mode='bridge'/>
+  <bridge name='br-storage-NaMe'/>
+</network>
+
diff --git a/odl-pipeline/templates/nodes/baremetalX.xml b/odl-pipeline/templates/nodes/baremetalX.xml
new file mode 100644 (file)
index 0000000..3eb3e87
--- /dev/null
@@ -0,0 +1,87 @@
+<domain type='kvm'>
+  <name>NaMe</name>
+  <memory unit='GiB'>MeMoRy</memory>
+  <currentMemory unit='GiB'>MeMoRy</currentMemory>
+  <vcpu placement='static'>vCpU</vcpu>
+  <resource>
+    <partition>/machine</partition>
+  </resource>
+  <os>
+    <type arch='x86_64'>hvm</type>
+    <boot dev='hd'/>
+    <bootmenu enable='no'/>
+  </os>
+  <cpu mode='host-passthrough'/>
+  <clock offset='utc'/>
+  <on_poweroff>destroy</on_poweroff>
+  <on_reboot>restart</on_reboot>
+  <on_crash>restart</on_crash>
+  <devices>
+    <emulator>/usr/bin/kvm</emulator>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='unsafe'/>
+      <source file='DiSk'/>
+      <target dev='sda' bus='sata'/>
+      <alias name='sata0-0-0'/>
+      <address type='drive' controller='0' bus='0' target='0' unit='0'/>
+    </disk>
+    <controller type='scsi' index='0' model='virtio-scsi'>
+      <alias name='scsi0'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
+    </controller>
+    <controller type='usb' index='0'>
+      <alias name='usb'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
+    </controller>
+    <controller type='pci' index='0' model='pci-root'>
+      <alias name='pci.0'/>
+    </controller>
+    <controller type='sata' index='0'>
+      <alias name='sata0'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
+    </controller>
+    <interface type='bridge'>
+      <source bridge='admin-InDeX'/>
+       <virtualport type='openvswitch'/>
+      <model type='virtio'/>
+      <alias name='net0'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
+    </interface>
+    <interface type='bridge'>
+      <source bridge='private-InDeX'/>
+       <virtualport type='openvswitch'/>
+      <model type='virtio'/>
+      <alias name='net1'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
+    </interface>
+    <interface type='bridge'>
+      <source bridge='public-InDeX'/>
+       <virtualport type='openvswitch'/>
+      <model type='virtio'/>
+      <alias name='net2'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
+    </interface>
+    <interface type='bridge'>
+      <source bridge='storage-InDeX'/>
+       <virtualport type='openvswitch'/>
+      <model type='virtio'/>
+      <alias name='net3'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>
+    </interface>
+    <input type='mouse' bus='ps2'/>
+    <input type='keyboard' bus='ps2'/>
+    <graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1'>
+      <listen type='address' address='127.0.0.1'/>
+    </graphics>
+    <video>
+      <model type='cirrus' vram='16384' heads='1'/>
+      <alias name='video0'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+    </video>
+    <memballoon model='virtio'>
+      <alias name='balloon0'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+    </memballoon>
+  </devices>
+</domain>
+
diff --git a/odl-pipeline/templates/nodes/jenkins_slave.xml b/odl-pipeline/templates/nodes/jenkins_slave.xml
new file mode 100644 (file)
index 0000000..e295a5e
--- /dev/null
@@ -0,0 +1,89 @@
+<domain type='kvm'>
+  <name>js-pod-NaMe</name>
+  <memory unit='GiB'>2</memory>
+  <currentMemory unit='GiB'>2</currentMemory>
+  <vcpu placement='static'>2</vcpu>
+  <resource>
+    <partition>/machine</partition>
+  </resource>
+  <os>
+    <type arch='x86_64'>hvm</type>
+    <boot dev='hd'/>
+    <boot dev='network'/>
+    <bootmenu enable='no'/>
+    <bios rebootTimeout='30000'/>
+  </os>
+  <clock offset='utc'/>
+  <on_poweroff>destroy</on_poweroff>
+  <on_reboot>restart</on_reboot>
+  <on_crash>restart</on_crash>
+  <devices>
+    <emulator>/usr/bin/kvm</emulator>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2'/>
+      <source file='DiSk/jenkins_slave-NaMe.qcow2'/>
+      <target dev='vda' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
+    </disk>
+    <controller type='usb' index='0'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
+    </controller>
+    <controller type='pci' index='0' model='pci-root'/>
+    <controller type='ide' index='0'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
+    </controller>
+    <interface type='network'>
+      <source network='default'/>
+      <mac address='MaC'/>
+      <model type='e1000'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
+    </interface>
+    <interface type='network'>
+      <source network='br-admin-NaMe'/>
+      <model type='virtio'/>
+      <alias name='net0'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
+    </interface>
+    <interface type='network'>
+      <source network='br-private-NaMe'/>
+      <model type='virtio'/>
+      <alias name='net1'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
+    </interface>
+    <interface type='network'>
+      <source network='br-public-NaMe'/>
+      <model type='virtio'/>
+      <alias name='net2'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
+    </interface>
+    <interface type='network'>
+      <source network='br-storage-NaMe'/>
+      <model type='virtio'/>
+      <alias name='net3'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x0a' function='0x0'/>
+    </interface>
+    <serial type='pty'>
+      <target port='0'/>
+    </serial>
+    <console type='pty'>
+      <target type='serial' port='0'/>
+    </console>
+    <input type='mouse' bus='ps2'/>
+    <input type='keyboard' bus='ps2'/>
+    <graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1' keymap='en-us'>
+      <listen type='address' address='127.0.0.1'/>
+    </graphics>
+    <sound model='ich6'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
+    </sound>
+    <video>
+      <model type='cirrus' vram='16384' heads='1'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+    </video>
+    <memballoon model='virtio'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>
+    </memballoon>
+  </devices>
+  <seclabel type='dynamic' model='apparmor' relabel='yes'/>
+</domain>
+
diff --git a/odl-pipeline/templates/nodes/undercloud.xml b/odl-pipeline/templates/nodes/undercloud.xml
new file mode 100644 (file)
index 0000000..d37dcc5
--- /dev/null
@@ -0,0 +1,87 @@
+<domain type='kvm' id='66'>
+  <name>undercloud-NaMe</name>
+  <memory unit='KiB'>12582912</memory>
+  <currentMemory unit='KiB'>12582912</currentMemory>
+  <vcpu placement='static'>4</vcpu>
+  <resource>
+    <partition>/machine</partition>
+  </resource>
+  <os>
+    <type arch='x86_64'>hvm</type>
+    <boot dev='hd'/>
+    <bootmenu enable='no'/>
+  </os>
+  <features>
+    <acpi/>
+    <apic/>
+    <pae/>
+  </features>
+  <cpu mode='host-passthrough'/>
+  <clock offset='utc'/>
+  <on_poweroff>destroy</on_poweroff>
+  <on_reboot>restart</on_reboot>
+  <on_crash>restart</on_crash>
+  <devices>
+    <emulator>/usr/bin/kvm</emulator>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='unsafe'/>
+      <source file='/home/user/odl-pipeline/disks/apex-NaMe/undercloud.qcow2'/>
+      <backingStore/>
+      <target dev='sda' bus='sata'/>
+      <alias name='sata0-0-0'/>
+      <address type='drive' controller='0' bus='0' target='0' unit='0'/>
+    </disk>
+    <controller type='scsi' index='0' model='virtio-scsi'>
+      <alias name='scsi0'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+    </controller>
+    <controller type='usb' index='0'>
+      <alias name='usb'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
+    </controller>
+    <controller type='pci' index='0' model='pci-root'>
+      <alias name='pci.0'/>
+    </controller>
+    <controller type='sata' index='0'>
+      <alias name='sata0'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
+    </controller>
+    <interface type='network'>
+      <mac address='00:9f:11:d7:b1:49'/>
+      <source network='default' bridge='virbr0'/>
+      <target dev='vnet0'/>
+      <model type='virtio'/>
+      <alias name='net0'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
+    </interface>
+    <interface type='bridge'>
+      <mac address='00:9f:11:d7:b1:4b'/>
+      <source network='admin_network' bridge='br-admin-NaMe'/>
+      <model type='virtio'/>
+      <alias name='net1'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
+    </interface>
+    <interface type='bridge'>
+      <mac address='00:9f:11:d7:b1:4d'/>
+      <source network='public_network' bridge='br-public-NaMe'/>
+      <model type='virtio'/>
+      <alias name='net2'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
+    </interface>
+    <input type='mouse' bus='ps2'/>
+    <input type='keyboard' bus='ps2'/>
+    <graphics type='vnc' port='5900' autoport='yes' listen='127.0.0.1'>
+      <listen type='address' address='127.0.0.1'/>
+    </graphics>
+    <video>
+      <model type='cirrus' vram='16384' heads='1'/>
+      <alias name='video0'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+    </video>
+    <memballoon model='virtio'>
+      <alias name='balloon0'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
+    </memballoon>
+  </devices>
+</domain>
+