Merge "Trigger test unit execution on verify + Published for jenkins"
authorMorgan Richomme <morgan.richomme@orange.com>
Fri, 28 Oct 2016 07:23:05 +0000 (07:23 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Fri, 28 Oct 2016 07:23:05 +0000 (07:23 +0000)
22 files changed:
jjb/apex/apex-deploy.sh
jjb/apex/apex-upload-artifact.sh
jjb/armband/armband-ci-jobs.yml
jjb/compass4nfv/compass-ci-jobs.yml
jjb/compass4nfv/compass-project-jobs.yml
jjb/daisy4nfv/daisy4nfv-verify-jobs.yml
jjb/functest/functest-ci-jobs.yml
jjb/opnfv/opnfv-docs.yml
jjb/opnfv/slave-params.yml
jjb/qtip/qtip-ci-jobs.yml
jjb/releng-macros.yaml
jjb/vswitchperf/vswitchperf.yml
jjb/yardstick/yardstick-ci-jobs.yml
prototypes/bifrost/scripts/destroy-env.sh
prototypes/puppet-infracloud/deploy_on_baremetal.md [new file with mode: 0644]
prototypes/puppet-infracloud/modules/opnfv/manifests/server.pp
utils/installer-adapter/ApexAdapter.py
utils/installer-adapter/CompassAdapter.py
utils/installer-adapter/FuelAdapter.py
utils/installer-adapter/JoidAdapter.py
utils/installer-adapter/RelengLogger.py
utils/installer-adapter/SSHUtils.py

index 72fa6f6..e21387a 100755 (executable)
@@ -3,7 +3,7 @@ set -o errexit
 set -o nounset
 set -o pipefail
 
-APEX_PKGS="common undercloud opendaylight-sfc onos"
+APEX_PKGS="common undercloud onos"
 IPV6_FLAG=False
 
 # log info to console
index 0dd112b..f54e4c5 100755 (executable)
@@ -49,13 +49,13 @@ echo "ISO Upload Complete!"
 RPM_INSTALL_PATH=$BUILD_DIRECTORY/noarch
 RPM_LIST=$RPM_INSTALL_PATH/$(basename $OPNFV_RPM_URL)
 VERSION_EXTENSION=$(echo $(basename $OPNFV_RPM_URL) | sed 's/opnfv-apex-//')
-for pkg in common undercloud opendaylight-sfc onos; do
+for pkg in common undercloud onos; do
     RPM_LIST+=" ${RPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
 done
 SRPM_INSTALL_PATH=$BUILD_DIRECTORY
 SRPM_LIST=$SRPM_INSTALL_PATH/$(basename $OPNFV_SRPM_URL)
 VERSION_EXTENSION=$(echo $(basename $OPNFV_SRPM_URL) | sed 's/opnfv-apex-//')
-for pkg in common undercloud opendaylight-sfc onos; do
+for pkg in common undercloud onos; do
     SRPM_LIST+=" ${SRPM_INSTALL_PATH}/opnfv-apex-${pkg}-${VERSION_EXTENSION}"
 done
 }
index d4fa5da..2122959 100644 (file)
             slave-label: arm-pod2
             installer: fuel
             <<: *colorado
+        - arm-pod3:
+            slave-label: arm-pod3
+            installer: fuel
+            <<: *colorado
 #--------------------------------
 #        master
 #--------------------------------
             slave-label: arm-pod2
             installer: fuel
             <<: *master
+        - arm-pod3:
+            slave-label: arm-pod3
+            installer: fuel
+            <<: *master
 #--------------------------------
 #       scenarios
 #--------------------------------
     name: 'fuel-os-odl_l2-sfc-noha-arm-pod2-colorado-trigger'
     triggers:
         - timed: ''
+#----------------------------------------------------------
+# Enea Armband POD 3 Triggers running against master branch
+#----------------------------------------------------------
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-master-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-master-trigger'
+    triggers:
+        - timed: ''
+#---------------------------------------------------------------
+# Enea Armband POD 3 Triggers running against colorado branch
+#---------------------------------------------------------------
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-ha-arm-pod3-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-nosdn-nofeature-ha-arm-pod3-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l3-nofeature-ha-arm-pod3-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-bgpvpn-ha-arm-pod3-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-nofeature-noha-arm-pod3-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-ha-arm-pod3-colorado-trigger'
+    triggers:
+        - timed: ''
+- trigger:
+    name: 'fuel-os-odl_l2-sfc-noha-arm-pod3-colorado-trigger'
+    triggers:
+        - timed: ''
index da882cd..eb91131 100644 (file)
             blocking-jobs:
                 - 'compass-os-.*?-{pod}-daily-.*?'
                 - 'compass-os-.*?-baremetal-daily-.*?'
-                - 'compass-verify-[^-]*'
+                - 'compass-verify-[^-]*-[^-]*'
             block-level: 'NODE'
 
     wrappers:
index 4d799af..5ce9064 100644 (file)
@@ -56,7 +56,7 @@
         - build-blocker:
             use-build-blocker: true
             blocking-jobs:
-                - 'compass-verify-[^-]*'
+                - 'compass-verify-[^-]*-[^-]*'
                 - 'compass-os-.*?-virtual-daily-.*?'
             block-level: 'NODE'
 
index e81e300..d2adafd 100644 (file)
@@ -1,9 +1,7 @@
 - project:
     name: 'daisy4nfv-verify-jobs'
 
-    project: 'daisy4nfv'
-
-    installer: 'daisy4nfv'
+    project: 'daisy'
 #####################################
 # branch definitions
 #####################################
             projects:
                 - name: 'daisy4nfv-verify-basic-{stream}'
                   current-parameters: false
+                  predefined-parameters: |
+                    GERRIT_BRANCH=$GERRIT_BRANCH
+                    GERRIT_REFSPEC=$GERRIT_REFSPEC
+                    GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
+                    GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
                   node-parameters: false
                   kill-phase-on: FAILURE
                   abort-all-job: true
             projects:
                 - name: 'daisy4nfv-verify-build-{stream}'
                   current-parameters: false
+                  predefined-parameters: |
+                    GERRIT_BRANCH=$GERRIT_BRANCH
+                    GERRIT_REFSPEC=$GERRIT_REFSPEC
+                    GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
+                    GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
                   node-parameters: false
                   kill-phase-on: FAILURE
                   abort-all-job: true
             projects:
                 - name: 'daisy4nfv-verify-deploy-virtual-{stream}'
                   current-parameters: false
+                  predefined-parameters: |
+                    GERRIT_BRANCH=$GERRIT_BRANCH
+                    GERRIT_REFSPEC=$GERRIT_REFSPEC
+                    GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
+                    GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
                   node-parameters: false
                   kill-phase-on: FAILURE
                   abort-all-job: true
             projects:
                 - name: 'daisy4nfv-verify-smoke-test-{stream}'
                   current-parameters: false
+                  predefined-parameters: |
+                    GERRIT_BRANCH=$GERRIT_BRANCH
+                    GERRIT_REFSPEC=$GERRIT_REFSPEC
+                    GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
+                    GERRIT_CHANGE_COMMIT_MESSAGE=$GERRIT_CHANGE_COMMIT_MESSAGE
                   node-parameters: false
                   kill-phase-on: FAILURE
                   abort-all-job: true
 # builder macros
 #####################################
 - builder:
-    name: 'daisy4nfv-verify-basic-macro'
+    name: 'daisy-verify-basic-macro'
     builders:
         - shell:
             !include-raw: ./daisy4nfv-basic.sh
 
 - builder:
-    name: 'daisy4nfv-verify-build-macro'
+    name: 'daisy-verify-build-macro'
     builders:
         - shell:
             !include-raw: ./daisy4nfv-build.sh
 
 - builder:
-    name: 'daisy4nfv-verify-deploy-virtual-macro'
+    name: 'daisy-verify-deploy-virtual-macro'
     builders:
         - shell:
             !include-raw: ./daisy4nfv-virtual-deploy.sh
 
 - builder:
-    name: 'daisy4nfv-verify-smoke-test-macro'
+    name: 'daisy-verify-smoke-test-macro'
     builders:
         - shell: |
             #!/bin/bash
index 3487793..afeb1f9 100644 (file)
             slave-label: '{pod}'
             installer: fuel
             <<: *master
+        - arm-pod3:
+            slave-label: '{pod}'
+            installer: fuel
+            <<: *master
         - zte-pod1:
             slave-label: '{pod}'
             installer: fuel
             slave-label: '{pod}'
             installer: fuel
             <<: *colorado
+        - arm-pod3:
+            slave-label: '{pod}'
+            installer: fuel
+            <<: *colorado
 # PODs for verify jobs triggered by each patch upload
         - ool-virtual1:
             slave-label: '{pod}'
index 0ac8aa7..307c1db 100644 (file)
             name: GS_URL
             default: '$GS_BASE{gs-pathname}'
             description: "Directory where the build artifact will be located upon the completion of the build."
+        - string:
+            name: GERRIT_REFSPEC
+            default: 'refs/heads/{branch}'
+            description: "JJB configured GERRIT_REFSPEC parameter"
 
     scm:
         - gerrit-trigger-scm:
                 - change-merged-event
                 - comment-added-contains-event:
                     comment-contains-value: 'remerge'
+                - comment-added-contains-event:
+                    comment-contains-value: 'rebuild docs'
             projects:
                 - project-compare-type: 'ANT'
                   project-pattern: '*'
index 7eca41a..b46960f 100644 (file)
             name: LAB_CONFIG_URL
             default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
             description: 'Base URI to the configuration directory'
+- parameter:
+    name: 'arm-pod3-defaults'
+    parameters:
+        - node:
+            name: SLAVE_NAME
+            description: 'Slave name on Jenkins'
+            allowed-slaves:
+                - arm-pod3
+            default-slaves:
+                - arm-pod3
+        - string:
+            name: GIT_BASE
+            default: https://gerrit.opnfv.org/gerrit/$PROJECT
+            description: 'Git URL to use on this Jenkins Slave'
+        - string:
+            name: LAB_CONFIG_URL
+            default: ssh://jenkins-enea@gerrit.opnfv.org:29418/securedlab
+            description: 'Base URI to the configuration directory'
 - parameter:
     name: 'intel-virtual6-defaults'
     parameters:
index d0d6b47..cca8cee 100644 (file)
             installer: fuel
             auto-trigger-name: 'qtip-daily-zte-pod2-trigger'
             <<: *master
+        - zte-pod3:
+            installer: fuel
+            auto-trigger-name: 'qtip-daily-zte-pod3-trigger'
+            <<: *master
 
 #--------------------------------
     jobs:
 - trigger:
     name: 'qtip-daily-zte-pod2-trigger'
     triggers:
-        - timed: '0 5 * * *'
+        - timed: '0 7 * * *'
 
+- trigger:
+    name: 'qtip-daily-zte-pod3-trigger'
+    triggers:
+        - timed: '0 1 * * *'
index d2dc1d1..2ebd775 100644 (file)
 
             mkdir -p upload
             mv docs_output "$local_path"
-            gsutil -m cp -r "$local_path" "gs://$GS_URL"
+            gsutil -m cp -r "$local_path" "gs://$gs_path"
 
             gsutil -m setmeta \
                 -h "Content-Type:text/html" \
index 363423d..3f7f6bf 100644 (file)
             branch: '{stream}'
             gs-pathname: ''
             disabled: false
+            slave-label: 'opnfv-build-ubuntu'
         - colorado:
             branch: 'stable/{stream}'
             gs-pathname: '/{stream}'
             disabled: false
+            slave-label: 'intel-pod3'
 
 - job-template:
 
@@ -72,7 +74,7 @@
             project: '{project}'
         - gerrit-parameter:
             branch: '{branch}'
-        - 'opnfv-build-ubuntu-defaults'
+        - '{slave-label}-defaults'
 
     scm:
         - gerrit-trigger-scm:
             make
             # run basic sanity test
             make sanity
+            cd ../ci
+            ./build-vsperf.sh verify
 
 - job-template:
     name: 'vswitchperf-merge-{stream}'
             project: '{project}'
         - gerrit-parameter:
             branch: '{branch}'
-        - 'opnfv-build-ubuntu-defaults'
+        - '{slave-label}-defaults'
 
     scm:
         - gerrit-trigger-scm:
             cd src
             make clobber
             make
+            cd ../ci
+            ./build-vsperf.sh merge
index 962ea47..9d80e42 100644 (file)
             installer: fuel
             auto-trigger-name: 'daily-trigger-disabled'
             <<: *colorado
+        - arm-pod3:
+            slave-label: '{pod}'
+            installer: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *master
+        - arm-pod3:
+            slave-label: '{pod}'
+            installer: fuel
+            auto-trigger-name: 'daily-trigger-disabled'
+            <<: *colorado
         - orange-pod2:
             slave-label: '{pod}'
             installer: joid
     parameters:
         - string:
             name: YARDSTICK_DB_BACKEND
-            default: ''
+            default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
 - parameter:
     name: 'yardstick-params-armband-baremetal'
     parameters:
         - string:
             name: YARDSTICK_DB_BACKEND
-            default: ''
+            default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
 - parameter:
     name: 'yardstick-params-joid-baremetal'
     parameters:
         - string:
             name: YARDSTICK_DB_BACKEND
-            default: ''
+            default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
 - parameter:
     name: 'yardstick-params-intel-pod8'
     parameters:
         - string:
             name: YARDSTICK_DB_BACKEND
-            default: ''
+            default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
 - parameter:
     name: 'yardstick-params-lf-pod1'
             default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
 
+- parameter:
+    name: 'yardstick-params-arm-pod3'
+    parameters:
+        - string:
+            name: YARDSTICK_DB_BACKEND
+            default: '-i 104.197.68.199:8086'
+            description: 'Arguments to use in order to choose the backend DB'
+
 - parameter:
     name: 'yardstick-params-virtual'
     parameters:
         - string:
             name: YARDSTICK_DB_BACKEND
-            default: ''
+            default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
 
 - parameter:
     parameters:
         - string:
             name: YARDSTICK_DB_BACKEND
-            default: ''
+            default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
 
 - parameter:
     parameters:
         - string:
             name: YARDSTICK_DB_BACKEND
-            default: ''
+            default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
 
 - parameter:
     parameters:
         - string:
             name: YARDSTICK_DB_BACKEND
-            default: ''
+            default: '-i 104.197.68.199:8086'
             description: 'Arguments to use in order to choose the backend DB'
 
 #######################
index 6746457..cdc55df 100755 (executable)
@@ -46,7 +46,7 @@ fi
 rm -rf /var/lib/libvirt/images/*.qcow2
 
 echo "restarting services"
-service dnsmasq restart
+service dnsmasq restart || true
 service libvirtd restart
 service ironic-api restart
 service ironic-conductor start
diff --git a/prototypes/puppet-infracloud/deploy_on_baremetal.md b/prototypes/puppet-infracloud/deploy_on_baremetal.md
new file mode 100644 (file)
index 0000000..334dff4
--- /dev/null
@@ -0,0 +1,57 @@
+How to deploy Infra Cloud on baremetal
+==================================
+
+Install bifrost controller
+--------------------------
+First step for deploying Infra Cloud is to install the bifrost controller. This can be virtualized, doesn't need to be on baremetal.
+To achieve that, first we can create a virtual machine with libvirt, with the proper network setup. This VM needs to share one physical interface (the PXE boot one), with the servers for the controller and compute nodes.
+Please follow documentation on: [https://git.openstack.org/cgit/openstack/bifrost/tree/tools/virsh_dev_env/README.md](https://git.openstack.org/cgit/openstack/bifrost/tree/tools/virsh_dev_env/README.md) to get sample templates and instructions for creating the bifrost VM.
+
+Once the **baremetal** VM is finished, you can login by ssh and start installing bifrost there. To proceed, follow this steps:
+
+ 1. Change to root user, install git
+ 2. Clone releng project (cd /opt, git clone https://gerrit.opnfv.org/gerrit/releng)
+ 3. cd /opt/releng/prototypes/puppet-infracloud
+ 4. Copy hiera to the right folder (cp hiera/common_baremetal.yaml /var/lib/hiera/common.yaml)
+ 5. Ensure hostname is properly set ( hostnamectl set-hostname baremetal.opnfvlocal , hostname -f )
+ 6. Install puppet and modules ( ./install_puppet.sh , ./install_modules.sh )
+ 7. Apply puppet to install bifrost (puppet apply manifests/site.pp --modulepath=/etc/puppet/modules:/opt/releng/prototypes/puppet-infracloud/modules)
+
+ With these steps you will have a bifrost controller up and running.
+
+Deploy baremetal servers
+--------------------------
+Once you have bifrost controller ready, you need to use it to start deployment of the baremetal servers.
+On the same bifrost VM, follow these steps:
+
+ 1. Source bifrost env vars: source /opt/stack/bifrost/env-vars
+ 2. Export baremetal servers inventory:  export BIFROST_INVENTORY-SOURCE=/opt/stack/baremetal.json 
+ 3. Enroll the servers: ansible-playbook -vvv -i inventory/bifrost_inventory.py enroll-dynamic.yaml -e @/etc/bifrost/bifrost_global_vars
+ 4. Deploy the servers:  ansible-playbook -vvv -i inventory/bifrost_inventory.py deploy-dynamic.yaml -e @/etc/bifrost/bifrost_global_vars
+ 5. Wait until they are on **active** state, check it with: ironic node-list
+
+In case of some server needing to be redeployed, you can reset it and redeploy again with:
+
+ 1. ironic node-set-provision-state <name_of_server> deleted
+ 2. Wait and check with ironic node-list until the server is on **available** state
+ 3. Redeploy again: ansible-playbook -vvv -i inventory/bifrost_inventory.py deploy-dynamic.yaml -e @/etc/bifrost/bifrost_global_vars
+
+Deploy baremetal servers
+--------------------------
+Once all the servers are on **active** state, they can be accessed by ssh and InfraCloud manifests can be deployed on them, to properly deploy a controller and a compute.
+On each of those, follow that steps:
+
+ 1. ssh from the bifrost controller to their external ips: ssh root@172.30.13.90
+ 2. cd /opt, clone releng project (git clone https://gerrit.opnfv.org/gerrit/releng)
+ 3. Copy hiera to the right folder ( cp hiera/common_baremetal.yaml /var/lib/hiera/common.yaml)
+ 4. Install modules: ./install_modules.sh
+ 5. Apply puppet: puppet apply manifests/site.pp --modulepath=/etc/puppet/modules:/opt/releng/prototypes/puppet-infracloud/modules
+
+Once this has been done on controller and compute, you will have a working cloud. To start working with it, follow that steps:
+
+ 1. Ensure that controller00.opnfvlocal resolves properly to the external IP (this is already done in the bifrost controller)
+ 2. Copy releng/prototypes/puppet-infracloud/creds/clouds.yaml to $HOME/.config/openstack/clouds.yaml
+ 3. Install python-openstackclient
+ 4. Specify the cloud you want to use: export OS_CLOUD=opnfvlocal
+ 5. Now you can start operating in your cloud with openstack-client: openstack flavor list
+
index a1e7d5d..6b608a7 100644 (file)
@@ -239,5 +239,13 @@ class opnfv::server (
     multiple => true,
   }
 
+  # disable selinux in case of RHEL
+  if ($::osfamily == 'RedHat') {
+    class { 'selinux':
+      mode => 'disabled',
+    }
+  }
+
+  # update hosts
   create_resources('host', hiera_hash('hosts'))
 }
index bf451f3..17a27b1 100644 (file)
@@ -8,9 +8,6 @@
 ##############################################################################
 
 
-from SSHUtils import SSH_Connection
-
-
 class ApexAdapter:
 
     def __init__(self, installer_ip):
@@ -32,4 +29,4 @@ class ApexAdapter:
         pass
 
     def get_file_from_controller(self, origin, target, ip=None, options=None):
-        pass
\ No newline at end of file
+        pass
index b40a8d7..47cbc64 100644 (file)
@@ -8,9 +8,6 @@
 ##############################################################################
 
 
-from SSHUtils import SSH_Connection
-
-
 class CompassAdapter:
 
     def __init__(self, installer_ip):
@@ -32,4 +29,4 @@ class CompassAdapter:
         pass
 
     def get_file_from_controller(self, origin, target, ip=None, options=None):
-        pass
\ No newline at end of file
+        pass
index 15f0e92..672fd51 100644 (file)
@@ -1,14 +1,14 @@
 ##############################################################################
 # Copyright (c) 2016 Ericsson AB and others.
 # Author: Jose Lausuch (jose.lausuch@ericsson.com)
+#         George Paraskevopoulos (geopar@intracom-telecom.com)
 # All rights reserved. This program and the accompanying materials
 # are made available under the terms of the Apache License, Version 2.0
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-
-from SSHUtils import SSH_Connection
+import SSHUtils as ssh_utils
 import RelengLogger as rl
 
 
@@ -16,25 +16,30 @@ class FuelAdapter:
 
     def __init__(self, installer_ip, user="root", password="r00tme"):
         self.installer_ip = installer_ip
-        self.user = user
-        self.password = password
-        self.connection = SSH_Connection(
-            installer_ip, self.user, self.password, use_system_keys=False)
+        self.installer_user = user
+        self.installer_password = password
+        self.installer_connection = ssh_utils.get_ssh_client(
+            installer_ip,
+            self.installer_user,
+            password=self.installer_password)
         self.logger = rl.Logger("Handler").getLogger()
 
-    def runcmd_fuel_nodes(self):
-        output, error = self.connection.run_remote_cmd('fuel nodes')
+    def runcmd_fuel_installer(self, cmd):
+        _, stdout, stderr = (self
+                             .installer_connection
+                             .exec_command(cmd))
+        error = stderr.readlines()
         if len(error) > 0:
-            self.logger.error("error %s" % error)
+            self.logger.error("error %s" % ''.join(error))
             return error
+        output = ''.join(stdout.readlines())
         return output
 
+    def runcmd_fuel_nodes(self):
+        return self.runcmd_fuel_installer('fuel nodes')
+
     def runcmd_fuel_env(self):
-        output, error = self.connection.run_remote_cmd('fuel env')
-        if len(error) > 0:
-            self.logger.error("error %s" % error)
-            return error
-        return output
+        return self.runcmd_fuel_installer('fuel env')
 
     def get_clusters(self):
         environments = []
@@ -183,8 +188,11 @@ class FuelAdapter:
     def get_file_from_installer(self, remote_path, local_path, options=None):
         self.logger.debug("Fetching %s from %s" %
                           (remote_path, self.installer_ip))
-        if self.connection.scp_get(local_path, remote_path) != 0:
-            self.logger.error("SCP failed to retrieve the file.")
+        get_file_result = ssh_utils.get_file(self.installer_connection,
+                                             remote_path,
+                                             local_path)
+        if get_file_result is None:
+            self.logger.error("SFTP failed to retrieve the file.")
             return 1
         self.logger.info("%s successfully copied from Fuel to %s" %
                          (remote_path, local_path))
@@ -193,6 +201,7 @@ class FuelAdapter:
                                  remote_path,
                                  local_path,
                                  ip=None,
+                                 user='root',
                                  options=None):
         if ip is None:
             controllers = self.get_controller_ips(options=options)
@@ -204,16 +213,24 @@ class FuelAdapter:
         else:
             target_ip = ip
 
-        fuel_dir = '/root/scp/'
-        cmd = 'mkdir -p %s;rsync -Rav %s:%s %s' % (
-            fuel_dir, target_ip, remote_path, fuel_dir)
-        self.logger.info("Copying %s from %s to Fuel..." %
-                         (remote_path, target_ip))
-        output, error = self.connection.run_remote_cmd(cmd)
-        self.logger.debug("Copying files from Fuel to %s..." % local_path)
-        self.get_file_from_installer(
-            fuel_dir + remote_path, local_path, options)
-        cmd = 'rm -r %s' % fuel_dir
-        output, error = self.connection.run_remote_cmd(cmd)
+        installer_jumphost = {
+            'ip': self.installer_ip,
+            'username': self.installer_user,
+            'password': self.installer_password
+        }
+        controller_conn = ssh_utils.get_ssh_client(
+            target_ip,
+            user,
+            jumphost=installer_jumphost)
+
+        self.logger.debug("Fetching %s from %s" %
+                          (remote_path, target_ip))
+
+        get_file_result = ssh_utils.get_file(controller_conn,
+                                             remote_path,
+                                             local_path)
+        if get_file_result is None:
+            self.logger.error("SFTP failed to retrieve the file.")
+            return 1
         self.logger.info("%s successfully copied from %s to %s" %
                          (remote_path, target_ip, local_path))
index e78ca0f..be8c2eb 100644 (file)
@@ -8,9 +8,6 @@
 ##############################################################################
 
 
-from SSHUtils import SSH_Connection
-
-
 class JoidAdapter:
 
     def __init__(self, installer_ip):
@@ -32,4 +29,4 @@ class JoidAdapter:
         pass
 
     def get_file_from_controller(self, origin, target, ip=None, options=None):
-        pass
\ No newline at end of file
+        pass
index b38e780..6fa4ef2 100644 (file)
@@ -22,7 +22,6 @@
 #  logger.debug("message to be shown with - DEBUG -")
 
 import logging
-import os
 
 
 class Logger:
index 9c92a3b..c938886 100644 (file)
@@ -1,6 +1,7 @@
 ##############################################################################
 # Copyright (c) 2015 Ericsson AB and others.
-# Author: Jose Lausuch (jose.lausuch@ericsson.com)
+# Authors: George Paraskevopoulos (geopar@intracom-telecom.com)
+#          Jose Lausuch (jose.lausuch@ericsson.com)
 # All rights reserved. This program and the accompanying materials
 # are made available under the terms of the Apache License, Version 2.0
 # which accompanies this distribution, and is available at
 
 
 import paramiko
-from scp import SCPClient
-import time
 import RelengLogger as rl
+import os
 
+logger = rl.Logger('SSHUtils').getLogger()
 
-class SSH_Connection:
-
-    def __init__(self,
-                 host,
-                 user,
-                 password,
-                 use_system_keys=True,
-                 private_key=None,
-                 use_proxy=False,
-                 proxy_host=None,
-                 proxy_user=None,
-                 proxy_password=None,
-                 timeout=10):
-        self.host = host
-        self.user = user
-        self.password = password
-        self.use_system_keys = use_system_keys
-        self.private_key = private_key
-        self.use_proxy = use_proxy
-        self.proxy_host = proxy_host
-        self.proxy_user = proxy_user
-        self.proxy_password = proxy_password
-        self.timeout = timeout
-        paramiko.util.log_to_file("paramiko.log")
-        self.logger = rl.Logger("SSHUtils").getLogger()
-
-    def connect(self):
-        client = paramiko.SSHClient()
-        if self.use_system_keys:
-            client.load_system_host_keys()
-        elif self.private_key:
-            client.load_host_keys(self.private_key)
+
+def get_ssh_client(hostname, username, password=None, jumphost=None):
+    client = None
+    try:
+        if jumphost is None:
+            client = paramiko.SSHClient()
         else:
-            client.load_host_keys('/dev/null')
+            client = JumpHostHopClient()
+            client.configure_jump_host(jumphost['ip'],
+                                       jumphost['username'],
+                                       jumphost['password'])
+
+        if client is None:
+            raise Exception('Could not connect to client')
 
         client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+        client.connect(hostname,
+                       username=username,
+                       password=password)
+        return client
+    except Exception, e:
+        logger.error(e)
+        return None
 
-        t = self.timeout
-        proxy = None
-        if self.use_proxy:
-            proxy_command = 'ssh -o UserKnownHostsFile=/dev/null '
-            '-o StrictHostKeyChecking=no %s@%s -W %s:%s' % (self.proxy_user,
-                                                            self.proxy_host,
-                                                            self.host, 22)
-            proxy = paramiko.ProxyCommand(proxy_command)
-            self.logger.debug("Proxy command: %s" % proxy_command)
-        while t > 0:
-            try:
-                self.logger.debug(
-                    "Trying to stablish ssh connection to %s..." % self.host)
-                client.connect(self.host,
-                               username=self.user,
-                               password=self.password,
-                               look_for_keys=True,
-                               sock=proxy,
-                               pkey=self.private_key,
-                               timeout=self.timeout)
-                self.logger.debug("Successfully connected to %s!" % self.host)
-                return client
-            except:
-                time.sleep(1)
-                t -= 1
-
-        if t == 0:
-            return None
-
-    def scp_put(self, local_path, remote_path):
-        client = self.connect()
-        if client:
-            scp = SCPClient(client.get_transport())
-            try:
-                scp.put(local_path, remote_path)
-                client.close()
-                return 0
-            except Exception, e:
-                self.logger.error(e)
-                client.close()
-                return 1
-        else:
-            self.logger.error("Cannot stablish ssh connection.")
-
-    def scp_get(self, local_path, remote_path):
-        client = self.connect()
-        if client:
-            scp = SCPClient(client.get_transport())
-            try:
-                scp.get(remote_path, local_path)
-                client.close()
-                return 0
-            except Exception, e:
-                self.logger.error(e)
-                client.close()
-                return 1
-        else:
-            self.logger.error("Cannot stablish ssh connection.")
-            return 1
-
-    def run_remote_cmd(self, command):
-        client = self.connect()
-        if client:
-            try:
-                stdin, stdout, stderr = client.exec_command(command)
-                out = ''
-                for line in stdout.readlines():
-                    out += line
-                err = stderr.readlines()
-                client.close()
-                return out, err
-            except:
-                client.close()
-                return 1
-        else:
-            self.logger.error("Cannot stablish ssh connection.")
-            return 1
+
+def get_file(ssh_conn, src, dest):
+    try:
+        sftp = ssh_conn.open_sftp()
+        sftp.get(src, dest)
+        return True
+    except Exception, e:
+        logger.error("Error [get_file(ssh_conn, '%s', '%s']: %s" %
+                     (src, dest, e))
+        return None
+
+
+def put_file(ssh_conn, src, dest):
+    try:
+        sftp = ssh_conn.open_sftp()
+        sftp.put(src, dest)
+        return True
+    except Exception, e:
+        logger.error("Error [put_file(ssh_conn, '%s', '%s']: %s" %
+                     (src, dest, e))
+        return None
+
+
+class JumpHostHopClient(paramiko.SSHClient):
+    '''
+    Connect to a remote server using a jumphost hop
+    '''
+    def __init__(self, *args, **kwargs):
+        self.logger = rl.Logger("JumpHostHopClient").getLogger()
+        self.jumphost_ssh = None
+        self.jumphost_transport = None
+        self.jumphost_channel = None
+        self.jumphost_ip = None
+        self.jumphost_ssh_key = None
+        self.local_ssh_key = os.path.join(os.getcwd(), 'id_rsa')
+        super(JumpHostHopClient, self).__init__(*args, **kwargs)
+
+    def configure_jump_host(self, jh_ip, jh_user, jh_pass,
+                            jh_ssh_key='/root/.ssh/id_rsa'):
+        self.jumphost_ip = jh_ip
+        self.jumphost_ssh_key = jh_ssh_key
+        self.jumphost_ssh = paramiko.SSHClient()
+        self.jumphost_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+        self.jumphost_ssh.connect(jh_ip,
+                                  username=jh_user,
+                                  password=jh_pass)
+        self.jumphost_transport = self.jumphost_ssh.get_transport()
+
+    def connect(self, hostname, port=22, username='root', password=None,
+                pkey=None, key_filename=None, timeout=None, allow_agent=True,
+                look_for_keys=True, compress=False, sock=None, gss_auth=False,
+                gss_kex=False, gss_deleg_creds=True, gss_host=None,
+                banner_timeout=None):
+        try:
+            if self.jumphost_ssh is None:
+                raise Exception('You must configure the jump '
+                                'host before calling connect')
+
+            get_file_res = get_file(self.jumphost_ssh,
+                                    self.jumphost_ssh_key,
+                                    self.local_ssh_key)
+            if get_file_res is None:
+                raise Exception('Could\'t fetch SSH key from jump host')
+            jumphost_key = (paramiko.RSAKey
+                            .from_private_key_file(self.local_ssh_key))
+
+            self.jumphost_channel = self.jumphost_transport.open_channel(
+                "direct-tcpip",
+                (hostname, 22),
+                (self.jumphost_ip, 22))
+
+            self.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+            super(JumpHostHopClient, self).connect(hostname,
+                                                   username=username,
+                                                   pkey=jumphost_key,
+                                                   sock=self.jumphost_channel)
+            os.remove(self.local_ssh_key)
+        except Exception, e:
+            self.logger.error(e)