<<: *master
- 'os-odl-bgpvpn-ha':
<<: *master
+ - 'os-ovn-nofeature-noha':
+ <<: *master
platform:
- 'baremetal'
abort-all-job: true
git-revision: false
+ - name: 'apex-os-ovn-nofeature-noha-baremetal-master'
+ node-parameters: false
+ current-parameters: false
+ predefined-parameters: |
+ OPNFV_CLEAN=yes
+ kill-phase-on: NEVER
+ abort-all-job: true
+ git-revision: false
+
# snapshot create
- 'os-odl-nofeature-ha'
- 'os-odl-nofeature-noha'
- 'os-odl-bgpvpn-ha'
+ - 'os-ovn-nofeature-noha'
danube:
- 'os-nosdn-nofeature-noha'
- 'os-nosdn-nofeature-ha'
fi
fi
-if [[ "$NODE_NAME" =~ "virtual" ]]; then
- POD_NAME="virtual_kvm"
-fi
-
# releng wants us to use nothing else but opnfv.iso for now. We comply.
ISO_FILE=$WORKSPACE/opnfv.iso
options="-u root -p root"
elif [[ ${INSTALLER_TYPE} == fuel ]]; then
options="-u root -p r00tme"
+elif [[ ${INSTALLER_TYPE} == apex ]]; then
+ options="-u stack -k /root/.ssh/id_rsa"
else
echo "Don't support to generate pod.yaml on ${INSTALLER_TYPE} currently."
echo "HA test cases may not run properly."
sshpass -p r00tme sudo scp $ssh_options root@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
fi
+if [ "$INSTALLER_TYPE" == "apex" ]; then
+ echo "Fetching id_rsa file from jump_server $INSTALLER_IP..."
+ scp $ssh_options stack@${INSTALLER_IP}:~/.ssh/id_rsa ${DOVETAIL_CONFIG}/id_rsa
+fi
+
# sdnvpn test case needs to download this image first before running
echo "Download image ubuntu-16.04-server-cloudimg-amd64-disk1.img ..."
wget -q -nc http://artifacts.opnfv.org/sdnvpn/ubuntu-16.04-server-cloudimg-amd64-disk1.img -P ${DOVETAIL_CONFIG}
envs=${env}" -e OPENO_MSB_ENDPOINT=${openo_msb_endpoint}"
fi
-volumes="${images_vol} ${results_vol} ${sshkey_vol} ${rc_file_vol}"
+volumes="${images_vol} ${results_vol} ${sshkey_vol} ${rc_file_vol} ${cacert_file_vol}"
tiers=(healthcheck smoke)
--- /dev/null
+###################################
+# job configuration for orchestra
+###################################
+- project:
+ name: 'orchestra-daily-jobs'
+
+ project: 'orchestra'
+
+#--------------------------------
+# BRANCH ANCHORS
+#--------------------------------
+ master: &master
+ stream: master
+ branch: '{stream}'
+ gs-pathname: ''
+ disabled: false
+
+#-------------------------------------------------------
+# POD, INSTALLER, AND BRANCH MAPPING
+#-------------------------------------------------------
+ pod:
+ - virtual:
+ slave-label: 'joid-virtual'
+ os-version: 'xenial'
+ <<: *master
+
+ jobs:
+ - 'orchestra-{pod}-daily-{stream}'
+
+################################
+# job template
+################################
+- job-template:
+ name: 'orchestra-{pod}-daily-{stream}'
+
+ project-type: multijob
+
+ disabled: '{obj:disabled}'
+
+ concurrent: false
+
+ properties:
+ - logrotate-default
+ - throttle:
+ enabled: true
+ max-total: 1
+ max-per-node: 1
+ option: 'project'
+
+ scm:
+ - git-scm
+
+ wrappers:
+ - ssh-agent-wrapper
+
+ - timeout:
+ timeout: 240
+ fail: true
+
+ triggers:
+ - timed: '@daily'
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+ - string:
+ name: DEPLOY_SCENARIO
+ default: os-nosdn-openbaton-ha
+ - '{slave-label}-defaults'
+
+ builders:
+ - description-setter:
+ description: "Built on $NODE_NAME"
+ - multijob:
+ name: deploy
+ condition: SUCCESSFUL
+ projects:
+ - name: 'joid-deploy-{pod}-daily-{stream}'
+ current-parameters: false
+ predefined-parameters: |
+ DEPLOY_SCENARIO=os-nosdn-openbaton-ha
+ COMPASS_OS_VERSION=xenial
+ node-parameters: true
+ kill-phase-on: FAILURE
+ abort-all-job: true
+ - multijob:
+ name: functest
+ condition: SUCCESSFUL
+ projects:
+ - name: 'functest-joid-{pod}-daily-{stream}'
+ current-parameters: false
+ predefined-parameters: |
+ DEPLOY_SCENARIO=os-nosdn-openbaton-ha
+ FUNCTEST_SUITE_NAME=orchestra_ims
+ node-parameters: true
+ kill-phase-on: NEVER
+ abort-all-job: true
--- /dev/null
+- project:
+
+ name: orchestra-project
+
+ project: 'orchestra'
+
+ stream:
+ - master:
+ branch: '{stream}'
+ gs-pathname: ''
+
+ jobs:
+ - 'orchestra-build-{stream}'
+
+- job-template:
+ name: 'orchestra-build-{stream}'
+
+ concurrent: true
+
+ properties:
+ - logrotate-default
+ - throttle:
+ enabled: true
+ max-total: 1
+ max-per-node: 1
+ option: 'project'
+
+ parameters:
+ - project-parameter:
+ project: '{project}'
+ branch: '{branch}'
+
+ scm:
+ - git-scm
+
+ triggers:
+ - timed: 'H 23 * * *'
+
+ builders:
+ - 'orchestra-build-macro'
+
+- builder:
+ name: 'orchestra-build-macro'
+ builders:
+ - shell: |
+ #!/bin/bash
+
+ echo "Hello world!"
+
+
echo "Document link(s):" >> gerrit_comment.txt
find "$local_path" | grep -e 'ipynb$' | \
- sed -e "s|^$local_path| https://nbviewer.jupyter.org/urls/$gs_path|" >> gerrit_comment.txt
+ sed -e "s|^$local_path| https://nbviewer.jupyter.org/url/$gs_path|" >> gerrit_comment.txt
args.user, installer_pwd=args.password)
-def create_file(handler):
+def create_file(handler, INSTALLER_TYPE):
"""
Create the yaml file of nodes info.
As Yardstick required, node name must be node1, node2, ... and node1 must
nodes = handler.nodes
node_list = []
index = 1
+ user = 'root'
+ if INSTALLER_TYPE == 'apex':
+ user = 'heat-admin'
for node in nodes:
try:
if node.roles[0].lower() == "controller":
node_info = {'name': "node%s" % index, 'role': node.roles[0],
- 'ip': node.ip, 'user': 'root'}
+ 'ip': node.ip, 'user': user}
node_list.append(node_info)
index += 1
except Exception:
node_info = {'name': node.name, 'role': 'unknown', 'ip': node.ip,
- 'user': 'root'}
+ 'user': user}
node_list.append(node_info)
for node in nodes:
try:
if node.roles[0].lower() == "compute":
node_info = {'name': "node%s" % index, 'role': node.roles[0],
- 'ip': node.ip, 'user': 'root'}
+ 'ip': node.ip, 'user': user}
node_list.append(node_info)
index += 1
except Exception:
node_info = {'name': node.name, 'role': 'unknown', 'ip': node.ip,
- 'user': 'root'}
+ 'user': user}
node_list.append(node_info)
if args.INSTALLER_TYPE == 'compass':
for item in node_list:
if not handler:
print("Error: failed to get the node's handler.")
return 1
- create_file(handler)
+ create_file(handler, args.INSTALLER_TYPE)
if __name__ == '__main__':