set -o nounset
set -o pipefail
-APEX_PKGS="common undercloud opendaylight-sfc onos"
+APEX_PKGS="common undercloud onos"
IPV6_FLAG=False
# log info to console
RPM_INSTALL_PATH=$BUILD_DIRECTORY/noarch
RPM_LIST=$RPM_INSTALL_PATH/$(basename $OPNFV_RPM_URL)
VERSION_EXTENSION=$(echo $(basename $OPNFV_RPM_URL) | sed 's/opnfv-apex-//')
-for pkg in common undercloud opendaylight-sfc onos; do
+for pkg in common undercloud onos; do
RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
done
SRPM_INSTALL_PATH=$BUILD_DIRECTORY
SRPM_LIST=$SRPM_INSTALL_PATH/$(basename $OPNFV_SRPM_URL)
VERSION_EXTENSION=$(echo $(basename $OPNFV_SRPM_URL) | sed 's/opnfv-apex-//')
-for pkg in common undercloud opendaylight-sfc onos; do
+for pkg in common undercloud onos; do
SRPM_LIST+=" ${SRPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
done
}
slave-label: arm-pod2
installer: fuel
<<: *colorado
+ - arm-pod3:
+ slave-label: arm-pod3
+ installer: fuel
+ <<: *colorado
#--------------------------------
# master
#--------------------------------
slave-label: arm-pod2
installer: fuel
<<: *master
+ - arm-pod3:
+ slave-label: arm-pod3
+ installer: fuel
+ <<: *master
#--------------------------------
# scenarios
#--------------------------------
name: 'fuel-os-odl_l2-sfc-noha-arm-pod2-colorado-trigger'
triggers:
- timed: ''
+#----------------------------------------------------------
+# Enea Armband POD 3 Triggers running against master branch
+#----------------------------------------------------------
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-master-trigger'
+ triggers:
+ - timed: ''
+#---------------------------------------------------------------
+# Enea Armband POD 3 Triggers running against colorado branch
+#---------------------------------------------------------------
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-colorado-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-colorado-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-colorado-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-colorado-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-colorado-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-colorado-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-colorado-trigger'
+ triggers:
+ - timed: ''
blocking-jobs:
- 'compass-os-.*?-{pod}-daily-.*?'
- 'compass-os-.*?-baremetal-daily-.*?'
- - 'compass-verify-[^-]*'
+ - 'compass-verify-[^-]*-[^-]*'
block-level: 'NODE'
wrappers:
- build-blocker:
use-build-blocker: true
blocking-jobs:
- - 'compass-verify-[^-]*'
+ - 'compass-verify-[^-]*-[^-]*'
- 'compass-os-.*?-virtual-daily-.*?'
block-level: 'NODE'
- project:
name: 'daisy4nfv-verify-jobs'
- project: 'daisy4nfv'
-
- installer: 'daisy4nfv'
+ project: 'daisy'
#####################################
# branch definitions
#####################################
projects:
- name: 'daisy4nfv-verify-basic-{stream}'
current-parameters: false
+ predefined-parameters: |
+ GERRIT_BRANCH=$GERRIT_BRANCH
+ GERRIT_REFSPEC=$GERRIT_REFSPEC
+ GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
+ GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
node-parameters: false
kill-phase-on: FAILURE
abort-all-job: true
projects:
- name: 'daisy4nfv-verify-build-{stream}'
current-parameters: false
+ predefined-parameters: |
+ GERRIT_BRANCH=$GERRIT_BRANCH
+ GERRIT_REFSPEC=$GERRIT_REFSPEC
+ GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
+ GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
node-parameters: false
kill-phase-on: FAILURE
abort-all-job: true
projects:
- name: 'daisy4nfv-verify-deploy-virtual-{stream}'
current-parameters: false
+ predefined-parameters: |
+ GERRIT_BRANCH=$GERRIT_BRANCH
+ GERRIT_REFSPEC=$GERRIT_REFSPEC
+ GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
+ GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
node-parameters: false
kill-phase-on: FAILURE
abort-all-job: true
projects:
- name: 'daisy4nfv-verify-smoke-test-{stream}'
current-parameters: false
+ predefined-parameters: |
+ GERRIT_BRANCH=$GERRIT_BRANCH
+ GERRIT_REFSPEC=$GERRIT_REFSPEC
+ GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
+ GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
node-parameters: false
kill-phase-on: FAILURE
abort-all-job: true
# builder macros
#####################################
- builder:
- name: 'daisy4nfv-verify-basic-macro'
+ name: 'daisy-verify-basic-macro'
builders:
- shell:
!include-raw: ./daisy4nfv-basic.sh
- builder:
- name: 'daisy4nfv-verify-build-macro'
+ name: 'daisy-verify-build-macro'
builders:
- shell:
!include-raw: ./daisy4nfv-build.sh
- builder:
- name: 'daisy4nfv-verify-deploy-virtual-macro'
+ name: 'daisy-verify-deploy-virtual-macro'
builders:
- shell:
!include-raw: ./daisy4nfv-virtual-deploy.sh
- builder:
- name: 'daisy4nfv-verify-smoke-test-macro'
+ name: 'daisy-verify-smoke-test-macro'
builders:
- shell: |
#!/bin/bash
slave-label: '{pod}'
installer: fuel
<<: *master
+ - arm-pod3:
+ slave-label: '{pod}'
+ installer: fuel
+ <<: *master
- zte-pod1:
slave-label: '{pod}'
installer: fuel
slave-label: '{pod}'
installer: fuel
<<: *colorado
+ - arm-pod3:
+ slave-label: '{pod}'
+ installer: fuel
+ <<: *colorado
# PODs for verify jobs triggered by each patch upload
- ool-virtual1:
slave-label: '{pod}'
name: GS_URL
default: '$GS_BASE{gs-pathname}'
description: "Directory where the build artifact will be located upon the completion of the build."
+ - string:
+ name: GERRIT_REFSPEC
+ default: 'refs/heads/{branch}'
+ description: "JJB configured GERRIT_REFSPEC parameter"
scm:
- gerrit-trigger-scm:
- change-merged-event
- comment-added-contains-event:
comment-contains-value: 'remerge'
+ - comment-added-contains-event:
+ comment-contains-value: 'rebuild docs'
projects:
- project-compare-type: 'ANT'
project-pattern: '*'
name: LAB_CONFIG_URL
default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
description: 'Base URI to the configuration directory'
+- parameter:
+ name: 'arm-pod3-defaults'
+ parameters:
+ - node:
+ name: SLAVE_NAME
+ description: 'Slave name on Jenkins'
+ allowed-slaves:
+ - arm-pod3
+ default-slaves:
+ - arm-pod3
+ - string:
+ name: GIT_BASE
+ default: https://gerrit.opnfv.org/gerrit/$PROJECT
+ description: 'Git URL to use on this Jenkins Slave'
+ - string:
+ name: LAB_CONFIG_URL
+ default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
+ description: 'Base URI to the configuration directory'
- parameter:
name: 'intel-virtual6-defaults'
parameters:
installer: fuel
auto-trigger-name: 'qtip-daily-zte-pod2-trigger'
<<: *master
+ - zte-pod3:
+ installer: fuel
+ auto-trigger-name: 'qtip-daily-zte-pod3-trigger'
+ <<: *master
#--------------------------------
jobs:
- trigger:
name: 'qtip-daily-zte-pod2-trigger'
triggers:
- - timed: '0 5 * * *'
+ - timed: '0 7 * * *'
+- trigger:
+ name: 'qtip-daily-zte-pod3-trigger'
+ triggers:
+ - timed: '0 1 * * *'
mkdir -p upload
mv docs_output "$local_path"
- gsutil -m cp -r "$local_path" "gs://$GS_URL"
+ gsutil -m cp -r "$local_path" "gs://$gs_path"
gsutil -m setmeta \
-h "Content-Type:text/html" \
branch: '{stream}'
gs-pathname: ''
disabled: false
+ slave-label: 'opnfv-build-ubuntu'
- colorado:
branch: 'stable/{stream}'
gs-pathname: '/{stream}'
disabled: false
+ slave-label: 'intel-pod3'
- job-template:
project: '{project}'
- gerrit-parameter:
branch: '{branch}'
- - 'opnfv-build-ubuntu-defaults'
+ - '{slave-label}-defaults'
scm:
- gerrit-trigger-scm:
make
# run basic sanity test
make sanity
+ cd ../ci
+ ./build-vsperf.sh verify
- job-template:
name: 'vswitchperf-merge-{stream}'
project: '{project}'
- gerrit-parameter:
branch: '{branch}'
- - 'opnfv-build-ubuntu-defaults'
+ - '{slave-label}-defaults'
scm:
- gerrit-trigger-scm:
cd src
make clobber
make
+ cd ../ci
+ ./build-vsperf.sh merge
installer: fuel
auto-trigger-name: 'daily-trigger-disabled'
<<: *colorado
+ - arm-pod3:
+ slave-label: '{pod}'
+ installer: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *master
+ - arm-pod3:
+ slave-label: '{pod}'
+ installer: fuel
+ auto-trigger-name: 'daily-trigger-disabled'
+ <<: *colorado
- orange-pod2:
slave-label: '{pod}'
installer: joid
parameters:
- string:
name: YARDSTICK_DB_BACKEND
- default: ''
+ default: '-i 104.197.68.199:8086'
description: 'Arguments to use in order to choose the backend DB'
- parameter:
name: 'yardstick-params-armband-baremetal'
parameters:
- string:
name: YARDSTICK_DB_BACKEND
- default: ''
+ default: '-i 104.197.68.199:8086'
description: 'Arguments to use in order to choose the backend DB'
- parameter:
name: 'yardstick-params-joid-baremetal'
parameters:
- string:
name: YARDSTICK_DB_BACKEND
- default: ''
+ default: '-i 104.197.68.199:8086'
description: 'Arguments to use in order to choose the backend DB'
- parameter:
name: 'yardstick-params-intel-pod8'
parameters:
- string:
name: YARDSTICK_DB_BACKEND
- default: ''
+ default: '-i 104.197.68.199:8086'
description: 'Arguments to use in order to choose the backend DB'
- parameter:
name: 'yardstick-params-lf-pod1'
default: '-i 104.197.68.199:8086'
description: 'Arguments to use in order to choose the backend DB'
+- parameter:
+ name: 'yardstick-params-arm-pod3'
+ parameters:
+ - string:
+ name: YARDSTICK_DB_BACKEND
+ default: '-i 104.197.68.199:8086'
+ description: 'Arguments to use in order to choose the backend DB'
+
- parameter:
name: 'yardstick-params-virtual'
parameters:
- string:
name: YARDSTICK_DB_BACKEND
- default: ''
+ default: '-i 104.197.68.199:8086'
description: 'Arguments to use in order to choose the backend DB'
- parameter:
parameters:
- string:
name: YARDSTICK_DB_BACKEND
- default: ''
+ default: '-i 104.197.68.199:8086'
description: 'Arguments to use in order to choose the backend DB'
- parameter:
parameters:
- string:
name: YARDSTICK_DB_BACKEND
- default: ''
+ default: '-i 104.197.68.199:8086'
description: 'Arguments to use in order to choose the backend DB'
- parameter:
parameters:
- string:
name: YARDSTICK_DB_BACKEND
- default: ''
+ default: '-i 104.197.68.199:8086'
description: 'Arguments to use in order to choose the backend DB'
#######################
rm -rf /var/lib/libvirt/images/*.qcow2
echo "restarting services"
-service dnsmasq restart
+service dnsmasq restart || true
service libvirtd restart
service ironic-api restart
service ironic-conductor start
--- /dev/null
+How to deploy Infra Cloud on baremetal
+==================================
+
+Install bifrost controller
+--------------------------
+First step for deploying Infra Cloud is to install the bifrost controller. This can be virtualized, doesn't need to be on baremetal.
+To achieve that, first we can create a virtual machine with libvirt, with the proper network setup. This VM needs to share one physical interface (the PXE boot one), with the servers for the controller and compute nodes.
+Please follow documentation on: [https://git.openstack.org/cgit/openstack/bifrost/tree/tools/virsh_dev_env/README.md](https://git.openstack.org/cgit/openstack/bifrost/tree/tools/virsh_dev_env/README.md) to get sample templates and instructions for creating the bifrost VM.
+
+Once the **baremetal** VM is finished, you can login by ssh and start installing bifrost there. To proceed, follow this steps:
+
+ 1. Change to root user, install git
+ 2. Clone releng project (cd /opt, git clone https://gerrit.opnfv.org/gerrit/releng)
+ 3. cd /opt/releng/prototypes/puppet-infracloud
+ 4. Copy hiera to the right folder (cp hiera/common_baremetal.yaml /var/lib/hiera/common.yaml)
+ 5. Ensure hostname is properly set ( hostnamectl set-hostname baremetal.opnfvlocal , hostname -f )
+ 6. Install puppet and modules ( ./install_puppet.sh , ./install_modules.sh )
+ 7. Apply puppet to install bifrost (puppet apply manifests/site.pp --modulepath=/etc/puppet/modules:/opt/releng/prototypes/puppet-infracloud/modules)
+
+ With these steps you will have a bifrost controller up and running.
+
+Deploy baremetal servers
+--------------------------
+Once you have bifrost controller ready, you need to use it to start deployment of the baremetal servers.
+On the same bifrost VM, follow these steps:
+
+ 1. Source bifrost env vars: source /opt/stack/bifrost/env-vars
+ 2. Export baremetal servers inventory: export BIFROST_INVENTORY-SOURCE=/opt/stack/baremetal.json
+ 3. Enroll the servers: ansible-playbook -vvv -i inventory/bifrost_inventory.py enroll-dynamic.yaml -e @/etc/bifrost/bifrost_global_vars
+ 4. Deploy the servers: ansible-playbook -vvv -i inventory/bifrost_inventory.py deploy-dynamic.yaml -e @/etc/bifrost/bifrost_global_vars
+ 5. Wait until they are on **active** state, check it with: ironic node-list
+
+In case of some server needing to be redeployed, you can reset it and redeploy again with:
+
+ 1. ironic node-set-provision-state <name_of_server> deleted
+ 2. Wait and check with ironic node-list until the server is on **available** state
+ 3. Redeploy again: ansible-playbook -vvv -i inventory/bifrost_inventory.py deploy-dynamic.yaml -e @/etc/bifrost/bifrost_global_vars
+
+Deploy baremetal servers
+--------------------------
+Once all the servers are on **active** state, they can be accessed by ssh and InfraCloud manifests can be deployed on them, to properly deploy a controller and a compute.
+On each of those, follow that steps:
+
+ 1. ssh from the bifrost controller to their external ips: ssh root@172.30.13.90
+ 2. cd /opt, clone releng project (git clone https://gerrit.opnfv.org/gerrit/releng)
+ 3. Copy hiera to the right folder ( cp hiera/common_baremetal.yaml /var/lib/hiera/common.yaml)
+ 4. Install modules: ./install_modules.sh
+ 5. Apply puppet: puppet apply manifests/site.pp --modulepath=/etc/puppet/modules:/opt/releng/prototypes/puppet-infracloud/modules
+
+Once this has been done on controller and compute, you will have a working cloud. To start working with it, follow that steps:
+
+ 1. Ensure that controller00.opnfvlocal resolves properly to the external IP (this is already done in the bifrost controller)
+ 2. Copy releng/prototypes/puppet-infracloud/creds/clouds.yaml to $HOME/.config/openstack/clouds.yaml
+ 3. Install python-openstackclient
+ 4. Specify the cloud you want to use: export OS_CLOUD=opnfvlocal
+ 5. Now you can start operating in your cloud with openstack-client: openstack flavor list
+
multiple => true,
}
+ # disable selinux in case of RHEL
+ if ($::osfamily == 'RedHat') {
+ class { 'selinux':
+ mode => 'disabled',
+ }
+ }
+
+ # update hosts
create_resources('host', hiera_hash('hosts'))
}
##############################################################################
-from SSHUtils import SSH_Connection
-
-
class ApexAdapter:
def __init__(self, installer_ip):
pass
def get_file_from_controller(self, origin, target, ip=None, options=None):
- pass
\ No newline at end of file
+ pass
##############################################################################
-from SSHUtils import SSH_Connection
-
-
class CompassAdapter:
def __init__(self, installer_ip):
pass
def get_file_from_controller(self, origin, target, ip=None, options=None):
- pass
\ No newline at end of file
+ pass
##############################################################################
# Copyright (c) 2016 Ericsson AB and others.
# Author: Jose Lausuch (jose.lausuch@ericsson.com)
+# George Paraskevopoulos (geopar@intracom-telecom.com)
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from SSHUtils import SSH_Connection
+import SSHUtils as ssh_utils
import RelengLogger as rl
def __init__(self, installer_ip, user="root", password="r00tme"):
self.installer_ip = installer_ip
- self.user = user
- self.password = password
- self.connection = SSH_Connection(
- installer_ip, self.user, self.password, use_system_keys=False)
+ self.installer_user = user
+ self.installer_password = password
+ self.installer_connection = ssh_utils.get_ssh_client(
+ installer_ip,
+ self.installer_user,
+ password=self.installer_password)
self.logger = rl.Logger("Handler").getLogger()
- def runcmd_fuel_nodes(self):
- output, error = self.connection.run_remote_cmd('fuel nodes')
+ def runcmd_fuel_installer(self, cmd):
+ _, stdout, stderr = (self
+ .installer_connection
+ .exec_command(cmd))
+ error = stderr.readlines()
if len(error) > 0:
- self.logger.error("error %s" % error)
+ self.logger.error("error %s" % ''.join(error))
return error
+ output = ''.join(stdout.readlines())
return output
+ def runcmd_fuel_nodes(self):
+ return self.runcmd_fuel_installer('fuel nodes')
+
def runcmd_fuel_env(self):
- output, error = self.connection.run_remote_cmd('fuel env')
- if len(error) > 0:
- self.logger.error("error %s" % error)
- return error
- return output
+ return self.runcmd_fuel_installer('fuel env')
def get_clusters(self):
environments = []
def get_file_from_installer(self, remote_path, local_path, options=None):
self.logger.debug("Fetching %s from %s" %
(remote_path, self.installer_ip))
- if self.connection.scp_get(local_path, remote_path) != 0:
- self.logger.error("SCP failed to retrieve the file.")
+ get_file_result = ssh_utils.get_file(self.installer_connection,
+ remote_path,
+ local_path)
+ if get_file_result is None:
+ self.logger.error("SFTP failed to retrieve the file.")
return 1
self.logger.info("%s successfully copied from Fuel to %s" %
(remote_path, local_path))
remote_path,
local_path,
ip=None,
+ user='root',
options=None):
if ip is None:
controllers = self.get_controller_ips(options=options)
else:
target_ip = ip
- fuel_dir = '/root/scp/'
- cmd = 'mkdir -p %s;rsync -Rav %s:%s %s' % (
- fuel_dir, target_ip, remote_path, fuel_dir)
- self.logger.info("Copying %s from %s to Fuel..." %
- (remote_path, target_ip))
- output, error = self.connection.run_remote_cmd(cmd)
- self.logger.debug("Copying files from Fuel to %s..." % local_path)
- self.get_file_from_installer(
- fuel_dir + remote_path, local_path, options)
- cmd = 'rm -r %s' % fuel_dir
- output, error = self.connection.run_remote_cmd(cmd)
+ installer_jumphost = {
+ 'ip': self.installer_ip,
+ 'username': self.installer_user,
+ 'password': self.installer_password
+ }
+ controller_conn = ssh_utils.get_ssh_client(
+ target_ip,
+ user,
+ jumphost=installer_jumphost)
+
+ self.logger.debug("Fetching %s from %s" %
+ (remote_path, target_ip))
+
+ get_file_result = ssh_utils.get_file(controller_conn,
+ remote_path,
+ local_path)
+ if get_file_result is None:
+ self.logger.error("SFTP failed to retrieve the file.")
+ return 1
self.logger.info("%s successfully copied from %s to %s" %
(remote_path, target_ip, local_path))
##############################################################################
-from SSHUtils import SSH_Connection
-
-
class JoidAdapter:
def __init__(self, installer_ip):
pass
def get_file_from_controller(self, origin, target, ip=None, options=None):
- pass
\ No newline at end of file
+ pass
# logger.debug("message to be shown with - DEBUG -")
import logging
-import os
class Logger:
##############################################################################
# Copyright (c) 2015 Ericsson AB and others.
-# Author: Jose Lausuch (jose.lausuch@ericsson.com)
+# Authors: George Paraskevopoulos (geopar@intracom-telecom.com)
+# Jose Lausuch (jose.lausuch@ericsson.com)
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
import paramiko
-from scp import SCPClient
-import time
import RelengLogger as rl
+import os
+logger = rl.Logger('SSHUtils').getLogger()
-class SSH_Connection:
-
- def __init__(self,
- host,
- user,
- password,
- use_system_keys=True,
- private_key=None,
- use_proxy=False,
- proxy_host=None,
- proxy_user=None,
- proxy_password=None,
- timeout=10):
- self.host = host
- self.user = user
- self.password = password
- self.use_system_keys = use_system_keys
- self.private_key = private_key
- self.use_proxy = use_proxy
- self.proxy_host = proxy_host
- self.proxy_user = proxy_user
- self.proxy_password = proxy_password
- self.timeout = timeout
- paramiko.util.log_to_file("paramiko.log")
- self.logger = rl.Logger("SSHUtils").getLogger()
-
- def connect(self):
- client = paramiko.SSHClient()
- if self.use_system_keys:
- client.load_system_host_keys()
- elif self.private_key:
- client.load_host_keys(self.private_key)
+
+def get_ssh_client(hostname, username, password=None, jumphost=None):
+ client = None
+ try:
+ if jumphost is None:
+ client = paramiko.SSHClient()
else:
- client.load_host_keys('/dev/null')
+ client = JumpHostHopClient()
+ client.configure_jump_host(jumphost['ip'],
+ jumphost['username'],
+ jumphost['password'])
+
+ if client is None:
+ raise Exception('Could not connect to client')
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ client.connect(hostname,
+ username=username,
+ password=password)
+ return client
+ except Exception, e:
+ logger.error(e)
+ return None
- t = self.timeout
- proxy = None
- if self.use_proxy:
- proxy_command = 'ssh -o UserKnownHostsFile=/dev/null '
- '-o StrictHostKeyChecking=no %s@%s -W %s:%s' % (self.proxy_user,
- self.proxy_host,
- self.host, 22)
- proxy = paramiko.ProxyCommand(proxy_command)
- self.logger.debug("Proxy command: %s" % proxy_command)
- while t > 0:
- try:
- self.logger.debug(
- "Trying to stablish ssh connection to %s..." % self.host)
- client.connect(self.host,
- username=self.user,
- password=self.password,
- look_for_keys=True,
- sock=proxy,
- pkey=self.private_key,
- timeout=self.timeout)
- self.logger.debug("Successfully connected to %s!" % self.host)
- return client
- except:
- time.sleep(1)
- t -= 1
-
- if t == 0:
- return None
-
- def scp_put(self, local_path, remote_path):
- client = self.connect()
- if client:
- scp = SCPClient(client.get_transport())
- try:
- scp.put(local_path, remote_path)
- client.close()
- return 0
- except Exception, e:
- self.logger.error(e)
- client.close()
- return 1
- else:
- self.logger.error("Cannot stablish ssh connection.")
-
- def scp_get(self, local_path, remote_path):
- client = self.connect()
- if client:
- scp = SCPClient(client.get_transport())
- try:
- scp.get(remote_path, local_path)
- client.close()
- return 0
- except Exception, e:
- self.logger.error(e)
- client.close()
- return 1
- else:
- self.logger.error("Cannot stablish ssh connection.")
- return 1
-
- def run_remote_cmd(self, command):
- client = self.connect()
- if client:
- try:
- stdin, stdout, stderr = client.exec_command(command)
- out = ''
- for line in stdout.readlines():
- out += line
- err = stderr.readlines()
- client.close()
- return out, err
- except:
- client.close()
- return 1
- else:
- self.logger.error("Cannot stablish ssh connection.")
- return 1
+
+def get_file(ssh_conn, src, dest):
+ try:
+ sftp = ssh_conn.open_sftp()
+ sftp.get(src, dest)
+ return True
+ except Exception, e:
+ logger.error("Error [get_file(ssh_conn, '%s', '%s']: %s" %
+ (src, dest, e))
+ return None
+
+
+def put_file(ssh_conn, src, dest):
+ try:
+ sftp = ssh_conn.open_sftp()
+ sftp.put(src, dest)
+ return True
+ except Exception, e:
+ logger.error("Error [put_file(ssh_conn, '%s', '%s']: %s" %
+ (src, dest, e))
+ return None
+
+
+class JumpHostHopClient(paramiko.SSHClient):
+ '''
+ Connect to a remote server using a jumphost hop
+ '''
+ def __init__(self, *args, **kwargs):
+ self.logger = rl.Logger("JumpHostHopClient").getLogger()
+ self.jumphost_ssh = None
+ self.jumphost_transport = None
+ self.jumphost_channel = None
+ self.jumphost_ip = None
+ self.jumphost_ssh_key = None
+ self.local_ssh_key = os.path.join(os.getcwd(), 'id_rsa')
+ super(JumpHostHopClient, self).__init__(*args, **kwargs)
+
+ def configure_jump_host(self, jh_ip, jh_user, jh_pass,
+ jh_ssh_key='/root/.ssh/id_rsa'):
+ self.jumphost_ip = jh_ip
+ self.jumphost_ssh_key = jh_ssh_key
+ self.jumphost_ssh = paramiko.SSHClient()
+ self.jumphost_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ self.jumphost_ssh.connect(jh_ip,
+ username=jh_user,
+ password=jh_pass)
+ self.jumphost_transport = self.jumphost_ssh.get_transport()
+
+ def connect(self, hostname, port=22, username='root', password=None,
+ pkey=None, key_filename=None, timeout=None, allow_agent=True,
+ look_for_keys=True, compress=False, sock=None, gss_auth=False,
+ gss_kex=False, gss_deleg_creds=True, gss_host=None,
+ banner_timeout=None):
+ try:
+ if self.jumphost_ssh is None:
+ raise Exception('You must configure the jump '
+ 'host before calling connect')
+
+ get_file_res = get_file(self.jumphost_ssh,
+ self.jumphost_ssh_key,
+ self.local_ssh_key)
+ if get_file_res is None:
+ raise Exception('Could\'t fetch SSH key from jump host')
+ jumphost_key = (paramiko.RSAKey
+ .from_private_key_file(self.local_ssh_key))
+
+ self.jumphost_channel = self.jumphost_transport.open_channel(
+ "direct-tcpip",
+ (hostname, 22),
+ (self.jumphost_ip, 22))
+
+ self.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ super(JumpHostHopClient, self).connect(hostname,
+ username=username,
+ pkey=jumphost_key,
+ sock=self.jumphost_channel)
+ os.remove(self.local_ssh_key)
+ except Exception, e:
+ self.logger.error(e)