* morgan.richomme@orange.com
* vlaza@cloudbasesolutions.com
* matthew.lijun@huawei.com
-* pbandzi@cisco.com
+* meimei@huawei.com
* jose.lausuch@ericsson.com
* koffirodrigue@gmail.com
* r-mibu@cq.jp.nec.com
+Or Add the group releng-contributors
+
Or just email a request for submission to opnfv-helpdesk@rt.linuxfoundation.org
The Current merge and verify jobs for jenkins job builder can be found
- throttle:
max-per-node: 1
max-total: 10
+ option: 'project'
builders:
- 'apex-unit-test'
- throttle:
max-per-node: 1
max-total: 10
+ option: 'project'
builders:
- 'apex-build'
- throttle:
max-per-node: 1
max-total: 10
+ option: 'project'
builders:
- 'apex-deploy'
build-step-failure-threshold: 'never'
failure-threshold: 'never'
unstable-threshold: 'FAILURE'
+ - trigger-builds:
+ - project: 'apex-deploy-baremetal-os-onos-nofeature-ha-{stream1}'
+ predefined-parameters: |
+ BUILD_DIRECTORY=apex-build-{stream1}/build
+ OPNFV_CLEAN=yes
+ git-revision: true
+ same-node: true
+ block: true
+ - trigger-builds:
+ - project: 'functest-apex-{daily-slave}-daily-{stream1}'
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-onos-nofeature-ha
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
+ - trigger-builds:
+ - project: 'yardstick-apex-{slave}-daily-{stream1}'
+ predefined-parameters:
+ DEPLOY_SCENARIO=os-onos-nofeature-ha
+ block: true
+ same-node: true
+ block-thresholds:
+ build-step-failure-threshold: 'never'
+ failure-threshold: 'never'
+ unstable-threshold: 'FAILURE'
- job-template:
name: 'apex-gs-clean-{stream}'
enabled: true
max-total: 1
max-per-node: 1
+ option: 'project'
- build-blocker:
use-build-blocker: true
blocking-jobs:
enabled: true
max-total: 1
max-per-node: 1
+ option: 'project'
- build-blocker:
use-build-blocker: true
blocking-jobs:
enabled: true
max-total: 1
max-per-node: 1
+ option: 'project'
parameters:
- project-parameter:
enabled: true
max-total: 1
max-per-node: 1
+ option: 'project'
parameters:
- project-parameter:
- throttle:
enabled: true
max-per-node: 1
+ option: 'project'
- build-blocker:
use-build-blocker: true
blocking-jobs:
- throttle:
enabled: true
max-per-node: 1
+ option: 'project'
- build-blocker:
use-build-blocker: true
blocking-jobs:
- throttle:
enabled: true
max-per-node: 1
+ option: 'project'
- build-blocker:
use-build-blocker: true
blocking-jobs:
- throttle:
enabled: true
max-per-node: 1
+ option: 'project'
- build-blocker:
use-build-blocker: true
blocking-jobs:
enabled: true
max-total: 1
max-per-node: 1
+ option: 'project'
parameters:
- project-parameter:
enabled: true
max-total: 1
max-per-node: 1
+ option: 'project'
parameters:
- project-parameter:
- throttle:
enabled: true
max-per-node: 1
+ option: 'project'
wrappers:
- build-name:
enabled: true
max-total: 3
max-per-node: 2
+ option: 'project'
parameters:
- project-parameter:
enabled: true
max-total: 3
max-per-node: 2
+ option: 'project'
parameters:
- project-parameter:
auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
- 'os-onos-sfc-ha':
auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
+ - 'os-onos-nofeature-ha':
+ auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
- 'os-odl_l2-sfc-ha':
auto-trigger-name: 'fuel-{scenario}-{pod}-daily-{stream}-trigger'
- 'os-odl_l2-bgpvpn-ha':
enabled: true
max-total: 4
max-per-node: 1
+ option: 'project'
- build-blocker:
use-build-blocker: true
blocking-jobs:
enabled: true
max-total: 4
max-per-node: 1
+ option: 'project'
- build-blocker:
use-build-blocker: true
blocking-jobs:
- trigger:
name: 'fuel-os-nosdn-nofeature-ha-baremetal-daily-master-trigger'
triggers:
- - timed: '0 19 * * *'
+ - timed: '0 20 * * *'
- trigger:
name: 'fuel-os-odl_l2-nofeature-ha-baremetal-daily-master-trigger'
triggers:
- trigger:
name: 'fuel-os-odl_l3-nofeature-ha-baremetal-daily-master-trigger'
triggers:
- - timed: '0 5 * * *'
+ - timed: '0 4 * * *'
- trigger:
name: 'fuel-os-onos-sfc-ha-baremetal-daily-master-trigger'
triggers:
- - timed: '0 10 * * *'
+ - timed: '0 8 * * *'
+- trigger:
+ name: 'fuel-os-onos-nofeature-ha-baremetal-daily-master-trigger'
+ triggers:
+ - timed: '0 12 * * *'
- trigger:
name: 'fuel-os-odl_l2-sfc-ha-baremetal-daily-master-trigger'
triggers:
- - timed: '0 15 * * *'
+ - timed: '0 16 * * *'
- trigger:
name: 'fuel-os-odl_l2-bgpvpn-ha-baremetal-daily-master-trigger'
triggers:
name: 'fuel-os-onos-sfc-ha-virtual-daily-master-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'fuel-os-onos-nofeature-ha-virtual-daily-master-trigger'
+ triggers:
+ - timed: ''
- trigger:
name: 'fuel-os-odl_l2-bgpvpn-ha-virtual-daily-master-trigger'
triggers:
name: 'fuel-os-onos-sfc-ha-zte-pod1-daily-master-trigger'
triggers:
- timed: ''
+- trigger:
+ name: 'fuel-os-onos-nofeature-ha-zte-pod1-daily-master-trigger'
+ triggers:
+ - timed: ''
- trigger:
name: 'fuel-os-odl_l2-bgpvpn-ha-zte-pod1-daily-master-trigger'
triggers:
enabled: true
max-total: 1
max-per-node: 1
+ option: 'project'
parameters:
- project-parameter:
enabled: true
max-total: 2
max-per-node: 1
+ option: 'project'
- build-blocker:
use-build-blocker: true
blocking-jobs:
- throttle:
enabled: true
max-per-node: 1
+ option: 'project'
- build-blocker:
use-build-blocker: true
blocking-jobs:
- throttle:
enabled: true
max-per-node: 1
+ option: 'project'
wrappers:
- build-name:
- choice:
name: FUNCTEST_SUITE_NAME
choices:
+ - 'healthcheck'
- 'tempest'
- 'rally'
- 'odl'
# labconfig is used only for joid
labconfig=""
if [[ ${INSTALLER_TYPE} == 'apex' ]]; then
+ ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
if sudo virsh list | grep instack; then
instack_mac=$(sudo virsh domiflist instack | grep default | \
grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
fi
INSTALLER_IP=$(/usr/sbin/arp -e | grep ${instack_mac} | awk {'print $1'})
sshkey="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa"
- sudo scp root@${INSTALLER_IP}:/home/stack/stackrc .
- stackrc="-v ./stackrc:/home/opnfv/functest/conf/stackrc"
+ sudo scp $ssh_options root@${INSTALLER_IP}:/home/stack/stackrc ${HOME}/stackrc
+ stackrc="-v ${HOME}/stackrc:/home/opnfv/functest/conf/stackrc"
if sudo iptables -C FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then
sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable
auto-trigger-name: 'daily-trigger-disabled'
- 'os-nosdn-nofeature-ha':
auto-trigger-name: 'joid-{scenario}-{pod}-{stream}-trigger'
+ - 'os-nosdn-lxd-ha':
+ auto-trigger-name: 'daily-trigger-disabled'
+ - 'os-nosdn-lxd-noha':
+ auto-trigger-name: 'joid-{scenario}-{pod}-{stream}-trigger'
- 'os-odl_l2-nofeature-ha':
auto-trigger-name: 'joid-{scenario}-{pod}-{stream}-trigger'
- 'os-onos-nofeature-ha':
enabled: true
max-total: 4
max-per-node: 1
+ option: 'project'
- build-blocker:
use-build-blocker: true
blocking-jobs:
same-node: true
block: true
- trigger-builds:
- - project: 'functest-joid-{pod}-daily-{stream}'
+ - project: 'yardstick-joid-{pod}-daily-{stream}'
current-parameters: false
predefined-parameters:
DEPLOY_SCENARIO={scenario}
- same-node: true
block: true
+ same-node: true
block-thresholds:
build-step-failure-threshold: 'never'
failure-threshold: 'never'
unstable-threshold: 'FAILURE'
- trigger-builds:
- - project: 'yardstick-joid-{pod}-daily-{stream}'
+ - project: 'functest-joid-{pod}-daily-{stream}'
current-parameters: false
predefined-parameters:
DEPLOY_SCENARIO={scenario}
enabled: true
max-total: 4
max-per-node: 1
+ option: 'project'
- build-blocker:
use-build-blocker: true
blocking-jobs:
name: 'joid-os-onos-nofeature-ha-juniper-pod1-brahmaputra-trigger'
triggers:
- timed: ''
+
+# os-nosdn-lxd-noha trigger - branch: master
+- trigger:
+ name: 'joid-os-nosdn-lxd-noha-baremetal-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'joid-os-nosdn-lxd-noha-virtual-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'joid-os-nosdn-lxd-noha-orange-pod2-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'joid-os-nosdn-lxd-noha-juniper-pod1-master-trigger'
+ triggers:
+ - timed: ''
+
+# os-nosdn-lxd-noha trigger - branch: stable/brahmaputra
+- trigger:
+ name: 'joid-os-nosdn-lxd-noha-baremetal-brahmaputra-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'joid-os-nosdn-lxd-noha-virtual-brahmaputra-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'joid-os-nosdn-lxd-noha-orange-pod2-brahmaputra-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'joid-os-nosdn-lxd-noha-juniper-pod1-brahmaputra-trigger'
+ triggers:
+ - timed: ''
+
+# os-nosdn-lxd-ha trigger - branch: master
+- trigger:
+ name: 'joid-os-nosdn-lxd-ha-baremetal-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'joid-os-nosdn-lxd-ha-virtual-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'joid-os-nosdn-lxd-ha-orange-pod2-master-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'joid-os-nosdn-lxd-ha-juniper-pod1-master-trigger'
+ triggers:
+ - timed: ''
+
+# os-nosdn-lxd-ha trigger - branch: stable/brahmaputra
+- trigger:
+ name: 'joid-os-nosdn-lxd-ha-baremetal-brahmaputra-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'joid-os-nosdn-lxd-ha-virtual-brahmaputra-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'joid-os-nosdn-lxd-ha-orange-pod2-brahmaputra-trigger'
+ triggers:
+ - timed: ''
+- trigger:
+ name: 'joid-os-nosdn-lxd-ha-juniper-pod1-brahmaputra-trigger'
+ triggers:
+ - timed: ''
##
echo "------ Deploy with juju ------"
-echo "Execute: ./deploy.sh -t $HA_MODE -o $OS_RELEASE -s $SDN_CONTROLLER -l $POD_NAME -d $UBUNTU_DISTRO"
+echo "Execute: ./deploy.sh -t $HA_MODE -o $OS_RELEASE -s $SDN_CONTROLLER -l $POD_NAME -d $UBUNTU_DISTRO -f $NFV_FEATURES"
-./deploy.sh -t $HA_MODE -o $OS_RELEASE -s $SDN_CONTROLLER -l $POD_NAME -d $UBUNTU_DISTRO
+./deploy.sh -t $HA_MODE -o $OS_RELEASE -s $SDN_CONTROLLER -l $POD_NAME -d $UBUNTU_DISTRO -f $NFV_FEATURES
exit_on_error $? "Main deploy FAILED"
##
# get Keystone ip
case "$HA_MODE" in
"ha")
- KEYSTONE=$(cat bundles.yaml |shyaml get-value openstack-phase2.services.keystone.options.vip)
+ KEYSTONE=$(cat bundles.yaml |shyaml get-value openstack-phase1.services.keystone.options.vip)
;;
*)
KEYSTONE=$(juju status keystone |grep public-address|sed -- 's/.*\: //')
enabled: true
max-total: 3
max-per-node: 2
+ option: 'project'
parameters:
- project-parameter:
enabled: true
max-total: 3
max-per-node: 2
+ option: 'project'
parameters:
- project-parameter:
- throttle:
enabled: true
max-per-node: 1
+ option: 'project'
wrappers:
- build-name:
# Check if controller is alive (online='True')
controller_ip=$(sshpass -p r00tme ssh 2>/dev/null $ssh_options root@${installer_ip} \
- 'fuel node | grep controller | grep "True\| 1" | awk "{print \$10}" | tail -1') &> /dev/null
+ 'fuel node | grep controller | grep "True\| 1" | awk -F\| "{print \$5}" | tail -1' | \
+ sed 's/ //g') &> /dev/null
if [ -z $controller_ip ]; then
error "The controller $controller_ip is not up. Please check that the POD is correctly deployed."
print 'No kittez. Got an error code:', e
test_results = results['results']
+ test_results.reverse()
scenario_results = {}
for r in test_results:
- if not r['version'] in scenario_results.keys():
- scenario_results[r['version']] = []
- scenario_results[r['version']].append(r)
+ if not r['scenario'] in scenario_results.keys():
+ scenario_results[r['scenario']] = []
+ scenario_results[r['scenario']].append(r)
for s, s_result in scenario_results.items():
scenario_results[s] = s_result[0:5]
from opnfv_testapi.tornado_swagger import swagger\r
\r
\r
+@swagger.model()\r
class CreateResponse(object):\r
def __init__(self, href=''):\r
self.href = href\r
@param body: pod to be created
@type body: L{PodCreateRequest}
@in body: body
- @rtype: L{Pod}
+ @rtype: L{CreateResponse}
@return 200: pod is created.
@raise 403: pod already exists
@raise 400: body or name not provided
@param body: project to be created
@type body: L{ProjectCreateRequest}
@in body: body
- @rtype: L{Project}
+ @rtype: L{CreateResponse}
@return 200: project is created.
@raise 403: project already exists
@raise 400: body or name not provided
@param body: result to be created
@type body: L{ResultCreateRequest}
@in body: body
- @rtype: L{TestResult}
+ @rtype: L{CreateResponse}
@return 200: result is created.
@raise 404: pod/project/testcase not exist
@raise 400: body/pod_name/project_name/case_name not provided
t.case_name = a_dict.get('case_name')
t.pod_name = a_dict.get('pod_name')
t.project_name = a_dict.get('project_name')
- t.description = a_dict.get('description')
t.start_date = str(a_dict.get('start_date'))
t.stop_date = str(a_dict.get('stop_date'))
t.details = a_dict.get('details')
"case_name": self.case_name,
"project_name": self.project_name,
"pod_name": self.pod_name,
- "description": self.description,
"start_date": str(self.start_date),
"stop_date": str(self.stop_date),
"version": self.version,
"case_name": self.case_name,
"project_name": self.project_name,
"pod_name": self.pod_name,
- "description": self.description,
"start_date": str(self.start_date),
"stop_date": str(self.stop_date),
"version": self.version,
@param body: testcase to be created
@type body: L{TestcaseCreateRequest}
@in body: body
- @rtype: L{Testcase}
+ @rtype: L{CreateResponse}
@return 200: testcase is created in this project.
@raise 403: project not exist
or testcase already exists in this project
logger = logging.getLogger('create_kibana_dashboards')
logger.setLevel(logging.DEBUG)
-file_handler = logging.FileHandler('/var/log/{}.log'.format(__name__))
+file_handler = logging.FileHandler('/var/log/{}.log'.format('create_kibana_dashboards'))
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
logger.addHandler(file_handler)
# see class VisualizationState for details on format
_testcases = [
- ('functest', 'Tempest',
+ ('functest', 'tempest_smoke_serial',
[
{
"metrics": [
],
"type": "line",
"metadata": {
- "label": "Tempest duration",
+ "label": "tempest_smoke_serial duration",
"test_family": "VIM"
}
},
],
"type": "histogram",
"metadata": {
- "label": "Tempest nr of tests/failures",
+ "label": "tempest_smoke_serial nr of tests/failures",
"test_family": "VIM"
}
},
],
"type": "line",
"metadata": {
- "label": "Tempest success percentage",
+ "label": "tempest_smoke_serial success percentage",
"test_family": "VIM"
}
}
]
),
- ('functest', 'Rally',
+ ('functest', 'rally_sanity',
[
{
"metrics": [
],
"type": "line",
"metadata": {
- "label": "Rally duration",
+ "label": "rally_sanity duration",
"test_family": "VIM"
}
},
],
"type": "histogram",
"metadata": {
- "label": "Rally nr of tests",
+ "label": "rally_sanity nr of tests",
"test_family": "VIM"
}
},
],
"type": "line",
"metadata": {
- "label": "Rally success percentage",
+ "label": "rally_sanity success percentage",
"test_family": "VIM"
}
}
]
),
- ('functest', 'vPing',
+ ('functest', 'vping_ssh',
[
{
"metrics": [
]
),
- ('functest', 'vPing_userdata',
+ ('functest', 'vping_userdata',
[
{
"metrics": [
]
),
- ('functest', 'ODL',
+ ('functest', 'odl',
[
{
"metrics": [
]
),
- ('functest', 'ONOS',
+ ('functest', 'onos',
[
{
"metrics": [
]
),
- ('functest', 'vIMS',
+ ('functest', 'vims',
[
{
"metrics": [
class KibanaDashboard(dict):
- def __init__(self, project_name, case_name, installer, pod, versions, visualization_detail):
+ def __init__(self, project_name, case_name, installer, pod, scenarios, visualization_detail):
super(KibanaDashboard, self).__init__()
self.project_name = project_name
self.case_name = case_name
self.installer = installer
self.pod = pod
- self.versions = versions
+ self.scenarios = scenarios
self.visualization_detail = visualization_detail
self._visualization_title = None
self._kibana_visualizations = []
self._create()
def _create_visualizations(self):
- for version in self.versions:
+ for scenario in self.scenarios:
self._kibana_visualizations.append(KibanaVisualization(self.project_name,
self.case_name,
self.installer,
self.pod,
- version,
+ scenario,
self.visualization_detail))
self._visualization_title = self._kibana_visualizations[0].vis_state_title
for visualization in self._kibana_visualizations:
url = urlparse.urljoin(base_elastic_url, '/.kibana/visualization/{}'.format(visualization.id))
logger.debug("publishing visualization '{}'".format(url))
- shared_utils.publish_json(visualization, es_user, es_passwd, url)
+ shared_utils.publish_json(visualization, es_creds, url)
def _construct_panels(self):
size_x = 6
},
separators=(',', ':'))
self['uiStateJSON'] = "{}"
- self['version'] = 1
+ self['scenario'] = 1
self['timeRestore'] = False
self['kibanaSavedObjectMeta'] = {
'searchSourceJSON': json.dumps({
def _publish(self):
url = urlparse.urljoin(base_elastic_url, '/.kibana/dashboard/{}'.format(self.id))
logger.debug("publishing dashboard '{}'".format(url))
- shared_utils.publish_json(self, es_user, es_passwd, url)
+ shared_utils.publish_json(self, es_creds, url)
def publish(self):
self._publish_visualizations()
]
"""
- def __init__(self, project_name, case_name, installer, pod, version):
+ def __init__(self, project_name, case_name, installer, pod, scenario):
super(KibanaSearchSourceJSON, self).__init__()
self["filter"] = [
{"match": {"project_name": {"query": project_name, "type": "phrase"}}},
{"match": {"case_name": {"query": case_name, "type": "phrase"}}},
{"match": {"installer": {"query": installer, "type": "phrase"}}},
- {"match": {"version": {"query": version, "type": "phrase"}}}
+ {"match": {"scenario": {"query": scenario, "type": "phrase"}}}
]
if pod != 'all':
self["filter"].append({"match": {"pod_name": {"query": pod, "type": "phrase"}}})
{
"type": type, # default date_histogram
"params": {
- "field": field # default creation_date
+ "field": field # default start_date
},
{segment2}
],
"type": type, # default area
"mode": mode, # default grouped for type 'histogram', stacked for other types
"metadata": {
- "label": "Tempest duration",# mandatory, no default
+ "label": "tempest_smoke_serial duration",# mandatory, no default
"test_family": "VIM" # mandatory, no default
}
}
"type": 'date_histogram' if 'type' not in segment else segment['type'],
"schema": "metric",
"params": {
- "field": "creation_date" if ('params' not in segment or 'field' not in segment['params'])
+ "field": "start_date" if ('params' not in segment or 'field' not in segment['params'])
else segment['params']['field'],
"interval": "auto",
"customInterval": "2h",
"type": 'date_histogram',
"schema": "segment",
"params": {
- "field": "creation_date",
+ "field": "start_date",
"interval": "auto",
"customInterval": "2h",
"min_doc_count": 1,
class KibanaVisualization(dict):
- def __init__(self, project_name, case_name, installer, pod, version, detail):
+ def __init__(self, project_name, case_name, installer, pod, scenario, detail):
"""
We need two things
1. filter created from
case_name
installer
pod
- version
+ scenario
2. visualization state
field for y axis (metric) with type (avg, sum, etc.)
field for x axis (segment) with type (date_histogram)
self.vis_state_title,
installer,
pod,
- version)
+ scenario)
self.id = self['title'].replace(' ', '-').replace('/', '-')
self['visState'] = json.dumps(vis_state, separators=(',', ':'))
self['uiStateJSON'] = "{}"
self['description'] = "Kibana visualization for project_name '{}', case_name '{}', data '{}', installer '{}'," \
- " pod '{}' and version '{}'".format(project_name,
+ " pod '{}' and scenario '{}'".format(project_name,
case_name,
self.vis_state_title,
installer,
pod,
- version)
- self['version'] = 1
+ scenario)
+ self['scenario'] = 1
self['kibanaSavedObjectMeta'] = {"searchSourceJSON": json.dumps(KibanaSearchSourceJSON(project_name,
case_name,
installer,
pod,
- version),
+ scenario),
separators=(',', ':'))}
-def _get_pods_and_versions(project_name, case_name, installer):
+def _get_pods_and_scenarios(project_name, case_name, installer):
query_json = json.JSONEncoder().encode({
"query": {
"bool": {
})
elastic_data = shared_utils.get_elastic_data(urlparse.urljoin(base_elastic_url, '/test_results/mongo2elastic'),
- es_user, es_passwd, query_json)
+ es_creds, query_json)
- pods_and_versions = {}
+ pods_and_scenarios = {}
for data in elastic_data:
pod = data['pod_name']
- if pod in pods_and_versions:
- pods_and_versions[pod].add(data['version'])
+ if pod in pods_and_scenarios:
+ pods_and_scenarios[pod].add(data['scenario'])
else:
- pods_and_versions[pod] = {data['version']}
+ pods_and_scenarios[pod] = {data['scenario']}
- if 'all' in pods_and_versions:
- pods_and_versions['all'].add(data['version'])
+ if 'all' in pods_and_scenarios:
+ pods_and_scenarios['all'].add(data['scenario'])
else:
- pods_and_versions['all'] = {data['version']}
+ pods_and_scenarios['all'] = {data['scenario']}
- return pods_and_versions
+ return pods_and_scenarios
def construct_dashboards():
"""
iterate over testcase and installer
1. get available pods for each testcase/installer pair
- 2. get available version for each testcase/installer/pod tuple
+ 2. get available scenario for each testcase/installer/pod tuple
3. construct KibanaInput and append
:return: list of KibanaDashboards
kibana_dashboards = []
for project_name, case_name, visualization_details in _testcases:
for installer in _installers:
- pods_and_versions = _get_pods_and_versions(project_name, case_name, installer)
+ pods_and_scenarios = _get_pods_and_scenarios(project_name, case_name, installer)
for visualization_detail in visualization_details:
- for pod, versions in pods_and_versions.iteritems():
- kibana_dashboards.append(KibanaDashboard(project_name, case_name, installer, pod, versions,
+ for pod, scenarios in pods_and_scenarios.iteritems():
+ kibana_dashboards.append(KibanaDashboard(project_name, case_name, installer, pod, scenarios,
visualization_detail))
return kibana_dashboards
parser = argparse.ArgumentParser(description='Create Kibana dashboards from data in elasticsearch')
parser.add_argument('-e', '--elasticsearch-url', default='http://localhost:9200',
help='the url of elasticsearch, defaults to http://localhost:9200')
+
parser.add_argument('-js', '--generate_js_inputs', action='store_true',
help='Use this argument to generate javascript inputs for kibana landing page')
+
parser.add_argument('--js_path', default='/usr/share/nginx/html/kibana_dashboards/conf.js',
help='Path of javascript file with inputs for kibana landing page')
+
parser.add_argument('-k', '--kibana_url', default='https://testresults.opnfv.org/kibana/app/kibana',
help='The url of kibana for javascript inputs')
- parser.add_argument('-u', '--elasticsearch-username',
- help='the username for elasticsearch')
-
- parser.add_argument('-p', '--elasticsearch-password',
- help='the password for elasticsearch')
+ parser.add_argument('-u', '--elasticsearch-username', default=None,
+ help='The username with password for elasticsearch in format username:password')
args = parser.parse_args()
base_elastic_url = args.elasticsearch_url
generate_inputs = args.generate_js_inputs
input_file_path = args.js_path
kibana_url = args.kibana_url
- es_user = args.elasticsearch_username
- es_passwd = args.elasticsearch_password
+ es_creds = args.elasticsearch_username
dashboards = construct_dashboards()
if generate_inputs:
generate_js_inputs(input_file_path, kibana_url, dashboards)
+
logger = logging.getLogger('clear_kibana')
logger.setLevel(logging.DEBUG)
-file_handler = logging.FileHandler('/var/log/{}.log'.format(__name__))
+file_handler = logging.FileHandler('/var/log/{}.log'.format('clear_kibana'))
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
logger.addHandler(file_handler)
-def delete_all(url, es_user, es_passwd):
- ids = shared_utils.get_elastic_data(url, es_user, es_passwd, body=None, field='_id')
+def delete_all(url, es_creds):
+ ids = shared_utils.get_elastic_data(url, es_creds, body=None, field='_id')
for id in ids:
del_url = '/'.join([url, id])
- shared_utils.delete_request(del_url, es_user, es_passwd)
+ shared_utils.delete_request(del_url, es_creds)
if __name__ == '__main__':
parser.add_argument('-e', '--elasticsearch-url', default='http://localhost:9200',
help='the url of elasticsearch, defaults to http://localhost:9200')
- parser.add_argument('-u', '--elasticsearch-username',
- help='the username for elasticsearch')
-
- parser.add_argument('-p', '--elasticsearch-password',
- help='the password for elasticsearch')
+ parser.add_argument('-u', '--elasticsearch-username', default=None,
+ help='The username with password for elasticsearch in format username:password')
args = parser.parse_args()
base_elastic_url = args.elasticsearch_url
- es_user = args.elasticsearch_username
- es_passwd = args.elasticsearch_password
+ es_creds = args.elasticsearch_username
urls = (urlparse.urljoin(base_elastic_url, '/.kibana/visualization'),
urlparse.urljoin(base_elastic_url, '/.kibana/dashboard'),
urlparse.urljoin(base_elastic_url, '/.kibana/search'))
for url in urls:
- delete_all(url, es_user, es_passwd)
+ delete_all(url, es_creds)
+
logger = logging.getLogger('mongo_to_elasticsearch')
logger.setLevel(logging.DEBUG)
-file_handler = logging.FileHandler('/var/log/{}.log'.format(__name__))
+file_handler = logging.FileHandler('/var/log/{}.log'.format('mongo_to_elasticsearch'))
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
logger.addHandler(file_handler)
for dictionary in dict_list:
# iterate over dictionaries in input list
if not isinstance(dictionary, dict):
- logger.info("Skipping non-dict details testcase [{}]".format(testcase))
+ logger.info("Skipping non-dict details testcase '{}'".format(testcase))
continue
if keys == set(dictionary.keys()):
# check the dictionary structure
"""
testcase_details = testcase['details']
+ if 'FUNCvirNet' not in testcase_details:
+ return modify_default_entry(testcase)
+
funcvirnet_details = testcase_details['FUNCvirNet']['status']
funcvirnet_statuses = _get_dicts_from_list(testcase, funcvirnet_details, {'Case result', 'Case name:'})
'failures': failed_tests,
'success_percentage': 100 * passed_tests / float(all_tests)
}
+ logger.debug("Modified odl testcase: '{}'".format(json.dumps(testcase, indent=2)))
return True
'case_name',
'project_name',
'details']
- mandatory_fields_to_modify = {'creation_date': _fix_date}
+ mandatory_fields_to_modify = {'start_date': _fix_date}
+ fields_to_swap_or_add = {'scenario': 'version'}
if '_id' in testcase:
mongo_id = testcase['_id']
else:
else:
testcase[key] = mandatory_fields_to_modify[key](value)
del mandatory_fields_to_modify[key]
+ elif key in fields_to_swap_or_add:
+ if value is None:
+ swapped_key = fields_to_swap_or_add[key]
+ swapped_value = testcase[swapped_key]
+ logger.info("Swapping field '{}' with value None for '{}' with value '{}'.".format(key, swapped_key, swapped_value))
+ testcase[key] = swapped_value
+ del fields_to_swap_or_add[key]
+ else:
+ del fields_to_swap_or_add[key]
elif key in optional_fields:
if value is None:
# empty optional field, remove
logger.info("Skipping testcase with mongo _id '{}' because the testcase was missing"
" mandatory field(s) '{}'".format(mongo_id, mandatory_fields))
return False
+ elif len(mandatory_fields_to_modify) > 0:
+ # some mandatory fields are missing
+ logger.info("Skipping testcase with mongo _id '{}' because the testcase was missing"
+ " mandatory field(s) '{}'".format(mongo_id, mandatory_fields_to_modify.keys()))
+ return False
else:
+ if len(fields_to_swap_or_add) > 0:
+ for key, swap_key in fields_to_swap_or_add.iteritems():
+ testcase[key] = testcase[swap_key]
+
return True
if verify_mongo_entry(testcase):
project = testcase['project_name']
case_name = testcase['case_name']
+ logger.info("Processing mongo test case '{}'".format(case_name))
if project == 'functest':
- if case_name == 'Rally':
+ if case_name == 'rally_sanity':
return modify_functest_rally(testcase)
- elif case_name == 'ODL':
+ elif case_name.lower() == 'odl':
return modify_functest_odl(testcase)
- elif case_name == 'ONOS':
+ elif case_name.lower() == 'onos':
return modify_functest_onos(testcase)
- elif case_name == 'vIMS':
+ elif case_name.lower() == 'vims':
return modify_functest_vims(testcase)
- elif case_name == 'Tempest':
+ elif case_name == 'tempest_smoke_serial':
return modify_functest_tempest(testcase)
return modify_default_entry(testcase)
else:
for mongo_json_line in fobj:
test_result = json.loads(mongo_json_line)
if modify_mongo_entry(test_result):
- shared_utils.publish_json(test_result, es_user, es_passwd, output_destination)
+ status, data = shared_utils.publish_json(test_result, es_creds, output_destination)
+ if status > 300:
+ project = test_result['project_name']
+ case_name = test_result['case_name']
+ logger.info('project {} case {} publish failed, due to [{}]'
+ .format(project, case_name, json.loads(data)['error']['reason']))
finally:
if os.path.exists(tmp_filename):
os.remove(tmp_filename)
def get_mongo_data(days):
past_time = datetime.datetime.today() - datetime.timedelta(days=days)
mongo_json_lines = subprocess.check_output(['mongoexport', '--db', 'test_results_collection', '-c', 'results',
- '--query', '{{"creation_date":{{$gt:"{}"}}}}'
+ '--query', '{{"start_date":{{$gt:"{}"}}}}'
.format(past_time)]).splitlines()
mongo_data = []
return mongo_data
-def publish_difference(mongo_data, elastic_data, output_destination, es_user, es_passwd):
+def publish_difference(mongo_data, elastic_data, output_destination, es_creds):
for elastic_entry in elastic_data:
if elastic_entry in mongo_data:
mongo_data.remove(elastic_entry)
logger.info('number of parsed test results: {}'.format(len(mongo_data)))
for parsed_test_result in mongo_data:
- shared_utils.publish_json(parsed_test_result, es_user, es_passwd, output_destination)
+ shared_utils.publish_json(parsed_test_result, es_creds, output_destination)
if __name__ == '__main__':
parser.add_argument('-e', '--elasticsearch-url', default='http://localhost:9200',
help='the url of elasticsearch, defaults to http://localhost:9200')
- parser.add_argument('-u', '--elasticsearch-username',
- help='the username for elasticsearch')
-
- parser.add_argument('-p', '--elasticsearch-password',
- help='the password for elasticsearch')
-
- parser.add_argument('-m', '--mongodb-url', default='http://localhost:8082',
- help='the url of mongodb, defaults to http://localhost:8082')
+ parser.add_argument('-u', '--elasticsearch-username', default=None,
+ help='The username with password for elasticsearch in format username:password')
args = parser.parse_args()
- base_elastic_url = urlparse.urljoin(args.elasticsearch_url, '/results/mongo2elastic')
+ base_elastic_url = urlparse.urljoin(args.elasticsearch_url, '/test_results/mongo2elastic')
output_destination = args.output_destination
days = args.merge_latest
- es_user = args.elasticsearch_username
- es_passwd = args.elasticsearch_password
+ es_creds = args.elasticsearch_username
if output_destination == 'elasticsearch':
output_destination = base_elastic_url
# parsed_test_results will be printed/sent to elasticsearch
if days == 0:
- # TODO get everything from mongo
publish_mongo_data(output_destination)
elif days > 0:
body = '''{{
"query" : {{
"range" : {{
- "creation_date" : {{
+ "start_date" : {{
"gte" : "now-{}d"
}}
}}
}}
}}'''.format(days)
- elastic_data = shared_utils.get_elastic_data(base_elastic_url, es_user, es_passwd, body)
+ elastic_data = shared_utils.get_elastic_data(base_elastic_url, es_creds, body)
logger.info('number of hits in elasticsearch for now-{}d: {}'.format(days, len(elastic_data)))
mongo_data = get_mongo_data(days)
- publish_difference(mongo_data, elastic_data, output_destination, es_user, es_passwd)
+ publish_difference(mongo_data, elastic_data, output_destination, es_creds)
else:
raise Exception('Update must be non-negative')
+
http = urllib3.PoolManager()
-def delete_request(url, username, password, body=None):
- headers = urllib3.util.make_headers(basic_auth=':'.join([username, password]))
+def delete_request(url, creds, body=None):
+ headers = urllib3.make_headers(basic_auth=creds)
http.request('DELETE', url, headers=headers, body=body)
-def publish_json(json_ojb, username, password, output_destination):
+def publish_json(json_ojb, creds, output_destination):
json_dump = json.dumps(json_ojb)
if output_destination == 'stdout':
print json_dump
+ return 200, None
else:
- headers = urllib3.util.make_headers(basic_auth=':'.join([username, password]))
- http.request('POST', output_destination, headers=headers, body=json_dump)
+ headers = urllib3.make_headers(basic_auth=creds)
+ result = http.request('POST', output_destination, headers=headers, body=json_dump)
+ return result.status, result.data
def _get_nr_of_hits(elastic_json):
return elastic_json['hits']['total']
-def get_elastic_data(elastic_url, username, password, body, field='_source'):
+def get_elastic_data(elastic_url, creds, body, field='_source'):
# 1. get the number of results
- headers = urllib3.util.make_headers(basic_auth=':'.join([username, password]))
+ headers = urllib3.make_headers(basic_auth=creds)
elastic_json = json.loads(http.request('GET', elastic_url + '/_search?size=0', headers=headers, body=body).data)
nr_of_hits = _get_nr_of_hits(elastic_json)
for hit in elastic_json['hits']['hits']:
elastic_data.append(hit[field])
return elastic_data
+