Merge "Disable notebook preview job"
authorRyota Mibu <r-mibu@cq.jp.nec.com>
Tue, 9 Jan 2018 14:13:35 +0000 (14:13 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Tue, 9 Jan 2018 14:13:35 +0000 (14:13 +0000)
INFO.yaml [new file with mode: 0644]
jjb/container4nfv/container4nfv-project.yml
jjb/dovetail/dovetail-run.sh
jjb/releng/opnfv-docker.yml
modules/opnfv/deployment/daisy/__init__.py [new file with mode: 0644]
modules/opnfv/deployment/daisy/adapter.py [new file with mode: 0644]
modules/opnfv/deployment/factory.py
modules/opnfv/utils/ssh_utils.py
utils/create_pod_file.py

diff --git a/INFO.yaml b/INFO.yaml
new file mode 100644 (file)
index 0000000..3bb3cbe
--- /dev/null
+++ b/INFO.yaml
@@ -0,0 +1,125 @@
+---
+project: 'Release Engineering (Releng)'
+project_creation_date: '2015-06-14'
+project_category: 'Integration & Testing'
+lifecycle_state: 'Incubation'
+project_lead: &opnfv_releng_ptl
+    name: 'Fatih Degirmenci'
+    email: 'fatih.degirmenci@ericsson.com'
+    id: 'fdegir'
+    company: 'Ericsson'
+    timezone: 'Europe/Stockholm'
+primary_contact: *opnfv_releng_ptl
+issue_tracking:
+    type: 'jira'
+    url: 'https://jira.opnfv.org/projects/RELENG'
+    key: 'RELENG'
+mailing_list:
+    type: 'mailman2'
+    url: 'opnfv-tech-discuss@lists.opnfv.org'
+    tag: '[releng]'
+realtime_discussion:
+    type: 'irc'
+    server: 'freenode.net'
+    channel: '#lf-releng'
+meetings:
+    - type: 'gotomeeting+irc'
+      agenda: 'https://wiki.opnfv.org/display/INF/Infra+Working+Group'
+      url: 'https://global.gotomeeting.com/join/819733085'
+      server: 'freenode.net'
+      channel: '#opnfv-meeting'
+      repeats: 'weekly'
+      time: '16:00 UTC'
+repositories:
+    - 'releng'
+    - 'releng-anteater'
+    - 'releng-testresults'
+    - 'releng-utils'
+    - 'releng-xci'
+committers:
+    - <<: *opnfv_releng_ptl
+    - name: 'Aric Gardner'
+      email: 'agardner@linuxfoundation.org'
+      company: 'The Linux Foundation'
+      id: 'agardner'
+      timezone: 'Canada/Atlantic'
+    - name: 'Tim Rozet'
+      email: 'trozet@redhat.com'
+      company: 'Red Hat'
+      id: 'trozet'
+      timezone: 'America/New_York'
+    - name: 'Morgan Richomme'
+      email: 'morgan.richomme@orange.com'
+      company: 'Orange'
+      id: 'mrichomme'
+      timezone: 'Europe/Paris'
+    - name: 'Jose Lausuch'
+      company: 'SUSE'
+      email: 'jose.lausuch@ericsson.com'
+      id: 'jose.lausuch'
+      timezone: 'Europe/Madrid'
+    - name: 'Ryota Mibu'
+      company: 'NEC'
+      email: 'r-mibu@cq.jp.nec.com'
+      id: 'r-mibu'
+      timezone: 'Asia/Tokyo'
+    - name: 'Mei Mei'
+      company: 'Huawei'
+      email: 'meimei@huawei.com'
+      id: 'm00133142'
+      timezone: 'Asia/Shanghai'
+    - name: 'Trevor Bramwell'
+      company: 'The Linux Foundation'
+      email: 'tbramwell@linuxfoundation.org'
+      id: 'bramwelt'
+      timezone: 'America/Los_Angeles'
+    - name: 'Serena Feng'
+      company: 'ZTE'
+      email: 'feng.xiaowei@zte.com.cn'
+      id: 'SerenaFeng'
+      timezone: 'Asia/Shanghai'
+    - name: 'Yolanda Robla Mota'
+      company: 'Red Hat'
+      email: 'yroblamo@redhat.com'
+      id: 'yrobla'
+      timezone: 'America/New_York'
+    - name: 'Markos Chandras'
+      company: 'SUSE'
+      email: 'mchandras@suse.de'
+      id: 'mchandras'
+      timezone: 'Europe/Berlin'
+    - name: 'Luke Hinds'
+      company: 'Red Hat'
+      email: 'lhinds@redhat.com'
+      id: 'lukehinds'
+      timezone: 'Europe/London'
+tsc:
+    approval: 'http://ircbot.wl.linuxfoundation.org/meetings/opnfv-meeting/2015/opnfv-meeting.2015-07-14-14.00.html'
+    changes:
+        - type: 'removal'
+          name: 'Guy Rodrigue Koffi'
+          link: ''
+        - type: 'removal'
+          name: 'Victor Laza'
+          link: 'http://meetbot.opnfv.org/meetings/opnfv-meeting/2016/opnfv-meeting.2016-02-16-14.59.html'
+        - type: 'promotion'
+          name: 'Mei Mei'
+          link: 'http://lists.opnfv.org/pipermail/opnfv-tsc/2016-March/002228.html'
+        - type: 'removal'
+          name: 'Peter Banzi'
+          link: ''
+        - type: 'promotion'
+          name: 'Trevor Bramwell'
+          link: 'http://lists.opnfv.org/pipermail/opnfv-tech-discuss/2016-July/011659.html'
+        - type: 'promotion'
+          name: 'Serena Feng'
+          link: ''
+        - type: 'promotion'
+          name: 'Yolanda Robla Mota'
+          link: ''
+        - type: 'promotion'
+          name: 'Markos'
+          link: ''
+        - type: 'promotion'
+          name: 'Luke Hinds'
+          link: ''
index 58070e1..03bbb65 100644 (file)
@@ -10,7 +10,8 @@
 
     jobs:
       - 'container4nfv-verify-{stream}'
-      - 'container4nfv-daily-{stream}'
+      - 'container4nfv-daily-upload-{stream}'
+      - 'container4nfv-daily-deploy-{stream}'
 
     stream:
       - master:
           cd $WORKSPACE/ci
           ./build.sh
 
+- job-template:
+    name: 'container4nfv-daily-upload-{stream}'
+
+    disabled: '{obj:disabled}'
+
+    concurrent: false
+
+    scm:
+      - git-scm
+
+    wrappers:
+      - fix-workspace-permissions
+
+    parameters:
+      - project-parameter:
+          project: '{project}'
+          branch: '{branch}'
+      - 'opnfv-build-ubuntu-defaults'
+      - 'container4nfv-defaults':
+          gs-pathname: '{gs-pathname}'
+
+    builders:
+      - shell: |
+          cd $WORKSPACE/ci
+          ./upload.sh
 
 - job-template:
-    name: 'container4nfv-daily-{stream}'
+    name: 'container4nfv-daily-deploy-{stream}'
 
     project-type: freestyle
 
       - shell: |
           cd $WORKSPACE/ci
           ./deploy.sh
+
+###################
+# parameter macros
+###################
+- parameter:
+    name: 'container4nfv-defaults'
+    parameters:
+      - string:
+          name: GS_URL
+          default: artifacts.opnfv.org/$PROJECT{gs-pathname}
+          description: "URL to Google Storage."
index 6ffcd31..e50242b 100755 (executable)
@@ -131,6 +131,8 @@ if [[ ! -f ${DOVETAIL_CONFIG}/pod.yaml ]]; then
         options="-u root -p r00tme"
     elif [[ ${INSTALLER_TYPE} == apex ]]; then
         options="-u stack -k /root/.ssh/id_rsa"
+    elif [[ ${INSTALLER_TYPE} == daisy ]]; then
+        options="-u root -p r00tme"
     else
         echo "Don't support to generate pod.yaml on ${INSTALLER_TYPE} currently."
         echo "HA test cases may not run properly."
@@ -171,6 +173,12 @@ if [ "$INSTALLER_TYPE" == "apex" ]; then
     sudo scp $ssh_options stack@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
 fi
 
+if [ "$INSTALLER_TYPE" == "daisy" ]; then
+    echo "Fetching id_dsa file from jump_server $INSTALLER_IP..."
+    sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_dsa ${DOVETAIL_CONFIG}/id_rsa
+fi
+
+
 image_path=${HOME}/opnfv/dovetail/images
 if [[ ! -d ${image_path} ]]; then
     mkdir -p ${image_path}
index f23594e..3351a5e 100644 (file)
@@ -62,7 +62,7 @@
       - 'qtip':
           project: 'qtip'
           dockerdir: '.'
-          dockerfile: 'docker/Dockerfile'
+          dockerfile: 'docker/Dockerfile.local'
           <<: *master
           <<: *other-receivers
       - 'storperf-master':
diff --git a/modules/opnfv/deployment/daisy/__init__.py b/modules/opnfv/deployment/daisy/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/modules/opnfv/deployment/daisy/adapter.py b/modules/opnfv/deployment/daisy/adapter.py
new file mode 100644 (file)
index 0000000..5634e24
--- /dev/null
@@ -0,0 +1,202 @@
+##############################################################################
+# Copyright (c) 2017 ZTE Corporation and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+from opnfv.deployment import manager
+from opnfv.utils import opnfv_logger as logger
+from opnfv.utils import ssh_utils
+
+logger = logger.Logger(__name__).getLogger()
+
+
+class DaisyAdapter(manager.DeploymentHandler):
+
+    def __init__(self, installer_ip, installer_user, installer_pwd):
+        super(DaisyAdapter, self).__init__(installer='daisy',
+                                           installer_ip=installer_ip,
+                                           installer_user=installer_user,
+                                           installer_pwd=installer_pwd,
+                                           pkey_file=None)
+
+    def _get_clusters(self):
+        clusters = []
+        cmd = 'source /root/daisyrc_admin; daisy cluster-list | grep -v "+--"'
+        output = self.installer_node.run_cmd(cmd)
+        lines = output.rsplit('\n')
+        if len(lines) < 2:
+            logger.info("No environments found in the deployment.")
+            return None
+        else:
+            fields = lines[0].rsplit('|')
+
+            index_id = -1
+            index_status = -1
+            index_name = -1
+            index_nodes = -1
+
+            for i in range(len(fields)):
+                if "ID" in fields[i]:
+                    index_id = i
+                elif "Status" in fields[i]:
+                    index_status = i
+                elif "Name" in fields[i]:
+                    index_name = i
+                elif "Nodes" in fields[i]:
+                    index_nodes = i
+
+            # order env info
+            for i in range(1, len(lines)):
+                fields = lines[i].rsplit('|')
+                dict = {"id": fields[index_id].strip(),
+                        "status": fields[index_status].strip(),
+                        "name": fields[index_name].strip(),
+                        "nodes": fields[index_nodes].strip()}
+                clusters.append(dict)
+
+        return clusters
+
+    def get_nodes(self, options=None):
+        if hasattr(self, 'nodes') and len(self.nodes) > 0:
+            if options and 'cluster' in options and options['cluster']:
+                nodes = []
+                for node in self.nodes:
+                    if str(node.info['cluster']) == str(options['cluster']):
+                        nodes.append(node)
+                return nodes
+            else:
+                return self.nodes
+
+        clusters = self._get_clusters()
+        nodes = []
+        for cluster in clusters:
+            if options and 'cluster' in options and options['cluster']:
+                if cluster["id"] != options['cluster']:
+                    continue
+            cmd = 'source /root/daisyrc_admin; daisy host-list ' \
+                  '--cluster-id {} | grep -v "+--"'.format(cluster["id"])
+            output = self.installer_node.run_cmd(cmd)
+            lines = output.rsplit('\n')
+            if len(lines) < 2:
+                logger.info("No nodes found in the cluster {}".format(
+                    cluster["id"]))
+                continue
+
+            fields = lines[0].rsplit('|')
+            index_id = -1
+            index_status = -1
+            index_name = -1
+
+            for i in range(len(fields)):
+                if "ID" in fields[i]:
+                    index_id = i
+                elif "Role_status" in fields[i]:
+                    index_status = i
+                elif "Name" in fields[i]:
+                    index_name = i
+
+            for i in range(1, len(lines)):
+                fields = lines[i].rsplit('|')
+                id = fields[index_id].strip().encode()
+                status_node = fields[index_status].strip().encode().lower()
+                name = fields[index_name].strip().encode()
+                ip = ".".join(name.split("-")[1:])
+
+                cmd_role = 'source /root/daisyrc_admin; ' \
+                           'daisy host-detail {} | grep "^| role"'.format(id)
+                output_role = self.installer_node.run_cmd(cmd_role)
+                role_all = output_role.rsplit('|')[2].strip().encode()
+                roles = []
+                if 'COMPUTER' in role_all:
+                    roles.append(manager.Role.COMPUTE)
+                if 'CONTROLLER_LB' in role_all or 'CONTROLLER_HA' in role_all:
+                    roles.append(manager.Role.CONTROLLER)
+
+                ssh_client = None
+                if status_node == 'active':
+                    status = manager.NodeStatus.STATUS_OK
+                    proxy = {'ip': self.installer_ip,
+                             'username': self.installer_user,
+                             'password': self.installer_pwd,
+                             'pkey_file': '/root/.ssh/id_dsa'}
+                    ssh_client = ssh_utils.get_ssh_client(hostname=ip,
+                                                          username='root',
+                                                          proxy=proxy)
+                else:
+                    status = manager.NodeStatus.STATUS_INACTIVE
+
+                node = DaisyNode(id, ip, name, status, roles, ssh_client)
+                nodes.append(node)
+        return nodes
+
+    def get_openstack_version(self):
+        cmd = 'docker exec nova_api nova-manage version 2>/dev/null'
+        version = None
+        for node in self.nodes:
+            if node.is_controller() and node.is_active():
+                version = node.run_cmd(cmd)
+                break
+        return version
+
+    def get_sdn_version(self):
+        version = None
+        for node in self.nodes:
+            if manager.Role.CONTROLLER in node.roles and node.is_active():
+                cmd = 'docker inspect --format=\'{{.Name}}\' `docker ps -q`'
+                output = node.run_cmd(cmd)
+                if '/opendaylight' in output.rsplit('\n'):
+                    cmd2 = 'docker exec opendaylight ' \
+                           'sudo yum info opendaylight 2>/dev/null ' \
+                           '| grep Version | tail -1'
+                    odl_ver = node.run_cmd(cmd2)
+                    if odl_ver:
+                        version = 'OpenDaylight: ' + odl_ver.split(' ')[-1]
+                    break
+        return version
+
+    def get_deployment_status(self):
+        clusters = self._get_clusters()
+        if clusters is None or len(clusters) == 0:
+            return 'unknown'
+        else:
+            return clusters[0]['status']
+
+
+class DaisyNode(manager.Node):
+
+    def __init__(self,
+                 id,
+                 ip,
+                 name,
+                 status,
+                 roles=None,
+                 ssh_client=None,
+                 info=None):
+        super(DaisyNode, self).__init__(id, ip, name, status,
+                                        roles, ssh_client, info)
+
+    def is_odl(self):
+        '''
+        Returns if the node is an opendaylight
+        '''
+        if manager.Role.CONTROLLER in self.roles and self.is_active():
+            cmd = 'docker inspect --format=\'{{.Name}}\' `docker ps -q`'
+            output = self.run_cmd(cmd)
+            if '/opendaylight' in output.rsplit('\n'):
+                return True
+        return False
+
+    def get_ovs_info(self):
+        '''
+        Returns the ovs version installed
+        '''
+        if self.is_active():
+            cmd = 'docker exec openvswitch_vswitchd ' \
+                  'ovs-vsctl --version | head -1 | awk \'{print $NF}\''
+            return self.run_cmd(cmd)
+        return None
index e14783f..2788e5e 100644 (file)
@@ -12,6 +12,7 @@ from opnfv.deployment.apex import adapter as apex_adapter
 from opnfv.deployment.compass import adapter as compass_adapter
 from opnfv.deployment.fuel import adapter as fuel_adapter
 from opnfv.deployment.osa import adapter as osa_adapter
+from opnfv.deployment.daisy import adapter as daisy_adapter
 from opnfv.utils import opnfv_logger as logger
 
 logger = logger.Logger(__name__).getLogger()
@@ -51,6 +52,10 @@ class Factory(object):
             return osa_adapter.OSAAdapter(installer_ip=installer_ip,
                                           installer_user=installer_user,
                                           pkey_file=pkey_file)
+        elif installer.lower() == "daisy":
+            return daisy_adapter.DaisyAdapter(installer_ip=installer_ip,
+                                              installer_user=installer_user,
+                                              installer_pwd=installer_pwd)
         else:
             raise Exception("Installer adapter is not implemented for "
                             "the given installer.")
index 4c5ff5c..175a380 100644 (file)
@@ -49,9 +49,11 @@ def get_ssh_client(hostname,
             client = paramiko.SSHClient()
         else:
             client = ProxyHopClient()
+            proxy_pkey_file = proxy.get('pkey_file', '/root/.ssh/id_rsa')
             client.configure_jump_host(proxy['ip'],
                                        proxy['username'],
-                                       proxy['password'])
+                                       proxy['password'],
+                                       proxy_pkey_file)
         if client is None:
             raise Exception('Could not connect to client')
 
@@ -115,6 +117,8 @@ class ProxyHopClient(paramiko.SSHClient):
                             jh_ssh_key='/root/.ssh/id_rsa'):
         self.proxy_ip = jh_ip
         self.proxy_ssh_key = jh_ssh_key
+        self.local_ssh_key = os.path.join(os.getcwd(),
+                                          jh_ssh_key.split('/')[-1])
         self.proxy_ssh = paramiko.SSHClient()
         self.proxy_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
         self.proxy_ssh.connect(jh_ip,
@@ -138,8 +142,12 @@ class ProxyHopClient(paramiko.SSHClient):
                                     self.local_ssh_key)
             if get_file_res is None:
                 raise Exception('Could\'t fetch SSH key from jump host')
-            proxy_key = (paramiko.RSAKey
-                         .from_private_key_file(self.local_ssh_key))
+            if self.proxy_ssh_key.split('/')[-1] == 'id_dsa':
+                proxy_key = (paramiko.DSSKey
+                             .from_private_key_file(self.local_ssh_key))
+            else:
+                proxy_key = (paramiko.RSAKey
+                             .from_private_key_file(self.local_ssh_key))
 
             self.proxy_channel = self.proxy_transport.open_channel(
                 "direct-tcpip",
index def5ecc..a60ece4 100644 (file)
@@ -92,6 +92,9 @@ def create_file(handler, INSTALLER_TYPE):
     if args.INSTALLER_TYPE == 'compass':
         for item in node_list:
             item['password'] = 'root'
+    elif args.INSTALLER_TYPE == 'daisy':
+        for item in node_list:
+            item['key_filename'] = '/root/.ssh/id_dsa'
     else:
         for item in node_list:
             item['key_filename'] = args.sshkey