Run ODL CSIT on Apex from Cperf container 67/58467/15
authorJamo Luhrsen <jluhrsen@redhat.com>
Wed, 13 Jun 2018 00:00:04 +0000 (17:00 -0700)
committerTim Rozet <trozet@redhat.com>
Mon, 16 Jul 2018 19:49:21 +0000 (15:49 -0400)
This patch adds the ability to deploy a cperf container and execute ODL
csit against an OPNFV deployment with ODL. The cperf job to execute csit
requires an RC file, SSH key to access OPNFV nodes, and a yaml
descriptor file that contains per node information about each
control/compute node.

This patch also adds triggering the cperf CSIT job inside of the 3rd
party ODL netvirt verification suite. That job uses Apex snapshots to
bring up deployment (no undercloud).

Additionally this patch includes some changes to allow multi version
snapshots to work. Multiple snapshots are now being created for
different OpenStack branches along with noha or HA type topologies. This
patch includes the ability to detect the desired scenario as triggered
by ODL Netvirt gerrit. Now in ODL netvirt gerrit a user may provide:
"check-opnfv <OS version>-<noha|ha>" style syntax to initiate 3rd Party
OPNFV CI on a particular OS version/HA setup.

Change-Id: I51a27545c985ce74c1c72fe0933eb451939a8c05
Signed-off-by: Jamo Luhrsen <jluhrsen@redhat.com>
Signed-off-by: Tim Rozet <trozet@redhat.com>
Signed-off-by: Jamo Luhrsen <jluhrsen@redhat.com>
jjb/3rd_party_ci/detect-snapshot.sh [new file with mode: 0755]
jjb/3rd_party_ci/install-netvirt.sh
jjb/3rd_party_ci/odl-netvirt.yaml
jjb/apex/apex-snapshot-deploy.sh
jjb/cperf/cperf-ci-jobs.yaml
jjb/cperf/cperf-prepare-robot.sh [new file with mode: 0755]
jjb/cperf/cperf-robot-netvirt-csit.sh [new file with mode: 0755]
jjb/cperf/parse-node-yaml.py [new file with mode: 0644]

diff --git a/jjb/3rd_party_ci/detect-snapshot.sh b/jjb/3rd_party_ci/detect-snapshot.sh
new file mode 100755 (executable)
index 0000000..46d4dfa
--- /dev/null
@@ -0,0 +1,31 @@
+#!/bin/bash
+set -o errexit
+set -o nounset
+set -o pipefail
+
+echo "Detecting requested OpenStack branch and topology type in gerrit comment"
+parsed_comment=$(echo $GERRIT_EVENT_COMMENT_TEXT | sed -n 's/^opnfv-check\s*//p')
+parsed_comment=$(echo $parsed_comment | sed -n 's/\s*$//p')
+if [ ! -z "$parsed_comment" ]; then
+  if echo $parsed_comment | grep -E '^[a-z]+-(no)?ha'; then
+    IFS='-' read -r -a array <<< "$parsed_comment"
+    os_version=${array[0]}
+    topo=${array[1]}
+    echo "OS version detected in gerrit comment: ${os_version}"
+    echo "Topology type detected in gerrit comment: ${topo}"
+  else
+    echo "Invalid format given for scenario in gerrit comment: ${parsed_comment}...aborting"
+    exit 1
+  fi
+else
+  echo "No scenario given in gerrit comment, will use default (master OpenStack, noha)"
+  os_version='master'
+  topo='noha'
+fi
+
+echo "Writing variables to file"
+echo > detected_snapshot << EOI
+OS_VERSION=$os_version
+TOPOLOGY=$topo
+SNAP_CACHE=$HOME/snap_cache/$os_version/$topo
+EOI
index 07bbe77..232d60e 100755 (executable)
@@ -3,7 +3,7 @@ set -o errexit
 set -o nounset
 set -o pipefail
 
-SNAP_CACHE=$HOME/snap_cache
+SNAP_CACHE=$HOME/snap_cache/$OS_VERSION/$TOPOLOGY
 # clone opnfv sdnvpn repo
 git clone https://gerrit.opnfv.org/gerrit/p/sdnvpn.git $WORKSPACE/sdnvpn
 
index c78de9b..77263d0 100644 (file)
     builders:
       - description-setter:
           description: "Built on $NODE_NAME"
+      - detect-opnfv-snapshot
+      - inject:
+          properties-file: detected_snapshot
       - multijob:
           name: create-apex-vms
           condition: SUCCESSFUL
                 NETVIRT_ARTIFACT=$NETVIRT_ARTIFACT
                 APEX_ENV_NUMBER=$APEX_ENV_NUMBER
                 GERRIT_EVENT_COMMENT_TEXT=$GERRIT_EVENT_COMMENT_TEXT
+                TOPOLOGY=$TOPOLOGY
+                OS_VERSION=$OS_VERSION
               node-parameters: true
               kill-phase-on: FAILURE
               abort-all-job: true
             - name: 'odl-netvirt-verify-virtual-install-netvirt-{stream}'
               current-parameters: false
               predefined-parameters: |
-                ODL_BRANCH={branch}
+                ODL_BRANCH=$BRANCH
                 BRANCH=$BRANCH
                 GERRIT_REFSPEC=$GERRIT_REFSPEC
                 GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
                 GERRIT_PATCHSET_NUMBER=$GERRIT_PATCHSET_NUMBER
                 GERRIT_PATCHSET_REVISION=$GERRIT_PATCHSET_REVISION
                 NETVIRT_ARTIFACT=$NETVIRT_ARTIFACT
+                TOPOLOGY=$TOPOLOGY
+                OS_VERSION=$OS_VERSION
               node-parameters: true
               kill-phase-on: FAILURE
               abort-all-job: true
           projects:
             - name: 'functest-netvirt-virtual-suite-master'
               predefined-parameters: |
-                DEPLOY_SCENARIO=os-odl-nofeature-ha
+                DEPLOY_SCENARIO=os-odl-nofeature-$TOPOLOGY
                 FUNCTEST_MODE=testcase
                 FUNCTEST_SUITE_NAME=tempest_smoke
                 RC_FILE_PATH=$HOME/cloner-info/overcloudrc
               node-parameters: true
               kill-phase-on: FAILURE
               abort-all-job: false
+      - multijob:
+          name: csit
+          condition: ALWAYS
+          projects:
+            - name: cperf-apex-csit-{stream}
+              predefined-parameters: |
+                ODL_BRANCH=$BRANCH
+                RC_FILE_PATH=$SNAP_CACHE/overcloudrc
+                NODE_FILE_PATH=$SNAP_CACHE/node.yaml
+                SSH_KEY_PATH=$SNAP_CACHE/id_rsa
+                ODL_CONTAINERIZED=false
+                OS_VERSION=$OS_VERSION
+              node-parameters: true
+              kill-phase-on: NEVER
+              abort-all-job: false
       - multijob:
           name: postprocess
           condition: ALWAYS
     builders:
       - shell:
           !include-raw: ./postprocess-netvirt.sh
+
+- builder:
+    name: 'detect-opnfv-snapshot'
+    builders:
+      - shell:
+          !include-raw-escape: ./detect-snapshot.sh
index 0a47506..9738ecb 100644 (file)
@@ -25,27 +25,7 @@ pushd ci > /dev/null
 sudo opnfv-clean
 popd > /dev/null
 
-echo "Detecting requested OpenStack branch and topology type in gerrit comment"
-parsed_comment=$(echo $GERRIT_EVENT_COMMENT_TEXT | sed -n 's/^opnfv-check\s*//p')
-parsed_comment=$(echo $parsed_comment | sed -n 's/\s*$//p')
-if [ ! -z "$parsed_comment" ]; then
-  if echo $parsed_comment | grep -E '^[a-z]+-(no)?ha'; then
-    IFS='-' read -r -a array <<< "$parsed_comment"
-    os_version=${array[0]}
-    topo=${array[1]}
-    echo "OS version detected in gerrit comment: ${os_version}"
-    echo "Topology type detected in gerrit comment: ${topo}"
-  else
-    echo "Invalid format given for scenario in gerrit comment: ${parsed_comment}...aborting"
-    exit 1
-  fi
-else
-  echo "No scenario given in gerrit comment, will use default (master OpenStack, noha)"
-  os_version='master'
-  topo='noha'
-fi
-
-full_snap_url=http://$GS_URL/${os_version}/${topo}
+full_snap_url=http://$GS_URL/${OS_VERSION}/${TOPOLOGY}
 
 echo "Downloading latest snapshot properties file"
 if ! wget -O $WORKSPACE/opnfv.properties ${full_snap_url}/snapshot.properties; then
@@ -61,7 +41,7 @@ if [ -z "$latest_snap_checksum" ]; then
 fi
 
 local_snap_checksum=""
-SNAP_CACHE=${SNAP_CACHE}/${os_version}/${topo}
+SNAP_CACHE=${SNAP_CACHE}/${OS_VERSION}/${TOPOLOGY}
 
 # check snap cache directory exists
 # if snapshot cache exists, find the checksum
index fdd3509..59afb89 100644 (file)
@@ -9,47 +9,29 @@
     # -------------------------------
     # BRANCH ANCHORS
     # -------------------------------
-    master: &master
-      stream: master
-      branch: '{stream}'
-      gs-pathname: ''
-      docker-tag: 'latest'
-    danube: &danube
-      stream: danube
-      branch: 'stable/{stream}'
-      gs-pathname: '/{stream}'
-      docker-tag: 'stable'
+    stream: master
+    branch: '{stream}'
+    gs-pathname: ''
+    docker-tag: 'latest'
 
-    # -------------------------------
-    # POD, INSTALLER, AND BRANCH MAPPING
-    # -------------------------------
-    pod:
-      # -------------------------------
-      #        master
-      # -------------------------------
-      - intel-pod2:
-          installer: apex
-          <<: *master
-      - intel-pod2:
-          installer: apex
-          <<: *danube
+    installer: apex
 
     testsuite:
-      - 'daily'
+      - csit
+      - cbench
 
     jobs:
-      - 'cperf-{installer}-{pod}-{testsuite}-{stream}'
+      - 'cperf-{installer}-{testsuite}-{stream}'
 
 ################################
 # job template
 ################################
 - job-template:
-    name: 'cperf-{installer}-{pod}-{testsuite}-{stream}'
+    name: 'cperf-{installer}-{testsuite}-{stream}'
 
     concurrent: true
 
     properties:
-      - logrotate-default
       - throttle:
           enabled: true
           max-per-node: 1
 
     wrappers:
       - build-name:
-          name: '$BUILD_NUMBER Suite: $CPERF_SUITE_NAME Scenario: $DEPLOY_SCENARIO'
+          name: '$BUILD_NUMBER Suite: $CPERF_SUITE_NAME ODL BRANCH: $ODL_BRANCH'
       - timeout:
           timeout: 400
           abort: true
 
     parameters:
-      - project-parameter:
-          project: '{project}'
-          branch: '{branch}'
-      - '{pod}-defaults'
-      - '{installer}-defaults'
       - cperf-parameter:
           testsuite: '{testsuite}'
           gs-pathname: '{gs-pathname}'
           docker-tag: '{docker-tag}'
-
-    scm:
-      - git-scm
+          stream: '{stream}'
 
     builders:
       - 'cperf-{testsuite}-builder'
           name: CPERF_SUITE_NAME
           default: '{testsuite}'
           description: "Suite name to run"
+      - string:
+          name: ODL_BRANCH
+          default: 'master'
+          description: "Branch that OpenDaylight is running"
+      - string:
+          name: OS_VERSION
+          default: 'master'
+          description: "OpenStack version (short name, no stable/ prefix)"
       - string:
           name: GS_PATHNAME
           default: '{gs-pathname}'
           name: DOCKER_TAG
           default: '{docker-tag}'
           description: 'Tag to pull docker image'
+      - string:
+          name: RC_FILE_PATH
+          default: ''
+          description: "Path to the OS credentials file if given"
+      - string:
+          name: SSH_KEY_PATH
+          default: ''
+          description: "Path to the private SSH key to access OPNFV nodes"
+      - string:
+          name: NODE_FILE_PATH
+          default: ''
+          description: "Path to the yaml file describing overcloud nodes"
+      - string:
+          name: ODL_CONTAINERIZED
+          default: 'true'
+          description: "boolean set true if ODL on overcloud is a container"
 
 ########################
 # trigger macros
 # builder macros
 ########################
 - builder:
-    name: cperf-daily-builder
+    name: cperf-csit-builder
+    builders:
+      - 'cperf-cleanup'
+      - 'cperf-prepare-robot'
+      - 'cperf-robot-netvirt-csit'
+
+- builder:
+    name: cperf-cbench-builder
     builders:
       - 'cperf-cleanup'
+      - 'cperf-prepare-robot'
       - 'cperf-robot-cbench'
 
+- builder:
+    name: cperf-prepare-robot
+    builders:
+      - shell:
+          !include-raw: ./cperf-prepare-robot.sh
+
 - builder:
     name: cperf-robot-cbench
     builders:
           set -o errexit
           set -o nounset
           set -o pipefail
-          undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \
-                            grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
-          INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'})
-
-          sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/overcloudrc /tmp/overcloudrc
-          sudo chmod 755 /tmp/overcloudrc
-          source /tmp/overcloudrc
-
-          # robot suites need the ssh key to log in to controller nodes, so throwing it
-          # in tmp, and mounting /tmp as $HOME as far as robot is concerned
-          sudo rm -rf /tmp/.ssh
-          sudo mkdir /tmp/.ssh
-          sudo chmod 0700 /tmp/.ssh
-          sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/.ssh/id_rsa /tmp/.ssh/
-          sudo chown -R jenkins-ci:jenkins-ci /tmp/.ssh
-          # done with sudo. jenkins-ci is the user from this point
-          chmod 0600 /tmp/.ssh/id_rsa
 
           # cbench requires the openflow drop test feature to be installed.
           sshpass -p karaf ssh -o StrictHostKeyChecking=no \
                                -p 8101 karaf@$SDN_CONTROLLER_IP \
                                 feature:install odl-openflowplugin-flow-services-ui odl-openflowplugin-drop-test
 
-          docker pull opnfv/cperf:$DOCKER_TAG
-
           robot_cmd="pybot -e exclude -L TRACE -d /tmp \
                       -v ODL_SYSTEM_1_IP:${SDN_CONTROLLER_IP} \
                       -v ODL_SYSTEM_IP:${SDN_CONTROLLER_IP} \
 
           docker run -i -v /tmp:/tmp opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite}
 
+- builder:
+    name: cperf-robot-netvirt-csit
+    builders:
+      - shell:
+          !include-raw: ./cperf-robot-netvirt-csit.sh
+
 - builder:
     name: cperf-cleanup
     builders:
diff --git a/jjb/cperf/cperf-prepare-robot.sh b/jjb/cperf/cperf-prepare-robot.sh
new file mode 100755 (executable)
index 0000000..d88c6d5
--- /dev/null
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+if [ -z ${RC_FILE_PATH+x} ]; then
+  undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \
+                   grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
+  INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'})
+  sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/overcloudrc /tmp/overcloudrc
+else
+  cp -f $RC_FILE_PATH ${WORKSPACE}/overcloudrc
+fi
+
+sudo chmod 755 ${WORKSPACE}/overcloudrc
+source ${WORKSPACE}/overcloudrc
+
+# copy ssh key for robot
+
+if [ -z ${SSH_KEY_PATH+x} ]; then
+  sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/.ssh/id_rsa ${WORKSPACE}/
+  sudo chown -R jenkins-ci:jenkins-ci ${WORKSPACE}/
+  # done with sudo. jenkins-ci is the user from this point
+  chmod 0600 ${WORKSPACE}/id_rsa
+else
+  cp -f ${SSH_KEY_PATH} ${WORKSPACE}/
+fi
+
+docker pull opnfv/cperf:$DOCKER_TAG
+
+sudo mkdir -p /tmp/robot_results
diff --git a/jjb/cperf/cperf-robot-netvirt-csit.sh b/jjb/cperf/cperf-robot-netvirt-csit.sh
new file mode 100755 (executable)
index 0000000..3ef7471
--- /dev/null
@@ -0,0 +1,105 @@
+#!/usr/bin/env bash
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+source ${WORKSPACE}/overcloudrc
+# note SDN_CONTROLLER_IP is set in overcloudrc, which is the VIP
+# for admin/public network (since we are running single network deployment)
+
+if [ "$OS_VERSION" == 'master' ]; then
+  FULL_OS_VER='master'
+else
+  FULL_OS_VER="stable/${OS_VERSION}"
+fi
+
+if [ "$ODL_BRANCH" == 'master' ]; then
+  ODL_STREAM='flourine'
+else
+  ODL_STREAM=${ODL_BRANCH}
+fi
+
+NUM_CONTROL_NODES=$(python ./parse-node-yaml.py num_nodes --file $NODE_FILE_PATH)
+NUM_COMPUTE_NODES=$(python ./parse-node-yaml.py num_nodes --node-type compute --file $NODE_FILE_PATH)
+
+idx=1
+EXTRA_ROBOT_ARGS=""
+for idx in `seq 1 $NUM_CONTROL_NODES`; do
+  CONTROLLER_IP=$(python ./parse-node-yaml.py get_value -k address --node-number ${idx} --file $NODE_FILE_PATH)
+  EXTRA_ROBOT_ARGS+=" -v ODL_SYSTEM_${idx}_IP:${CONTROLLER_IP} \
+                      -v OS_CONTROL_NODE_${idx}_IP:${CONTROLLER_IP} \
+                      -v ODL_SYSTEM_${idx}_IP:${CONTROLLER_IP} \
+                      -v HA_PROXY_${idx}_IP:${SDN_CONTROLLER_IP}"
+done
+
+idx=1
+for idx in `seq 1 $NUM_COMPUTE_NODES`; do
+  COMPUTE_IP=$(python ./parse-node-yaml.py get_value -k address --node-type compute --node-number ${idx} --file $NODE_FILE_PATH)
+  EXTRA_ROBOT_ARGS+=" -v OS_COMPUTE_${idx}_IP:${COMPUTE_IP}"
+done
+
+CONTROLLER_1_IP=$(python ./parse-node-yaml.py get_value -k address --node-number 1 --file $NODE_FILE_PATH)
+
+if [ "$ODL_CONTAINERIZED" == 'false' ]; then
+  EXTRA_ROBOT_ARGS+=" -v NODE_KARAF_COUNT_COMMAND:'ps axf | grep org.apache.karaf | grep -v grep | wc -l || echo 0' \
+                      -v NODE_START_COMMAND:'sudo systemctl start opendaylight_api' \
+                      -v NODE_KILL_COMMAND:'sudo systemctl stop opendaylight_api' \
+                      -v NODE_STOP_COMMAND:'sudo systemctl stop opendaylight_api' \
+                      -v NODE_FREEZE_COMMAND:'sudo systemctl stop opendaylight_api' "
+else
+  EXTRA_ROBOT_ARGS+=" -v NODE_KARAF_COUNT_COMMAND:\"sudo docker exec opendaylight_api /bin/bash -c 'ps axf | \
+                                grep org.apache.karaf | grep -v grep | wc -l' || echo 0\" \
+                      -v NODE_START_COMMAND:\"sudo docker start opendaylight_api\" \
+                      -v NODE_KILL_COMMAND:\"sudo docker stop opendaylight_api\" \
+                      -v NODE_STOP_COMMAND:\"sudo docker stop opendaylight_api\" \
+                      -v NODE_FREEZE_COMMAND:\"sudo docker stop opendaylight_api\" "
+fi
+
+robot_cmd="pybot \
+  --removekeywords wuks \
+  --xunit robotxunit.xml \
+  -c critical \
+  -e exclude \
+  -d /tmp/robot_results \
+  -v BUNDLEFOLDER:/opt/opendaylight \
+  -v CONTROLLER_USER:heat-admin \
+  -v DEFAULT_LINUX_PROMPT:\$ \
+  -v DEFAULT_LINUX_PROMPT_STRICT:]\$ \
+  -v DEFAULT_USER:heat-admin \
+  -v DEVSTACK_DEPLOY_PATH:/tmp \
+  -v HA_PROXY_IP:$SDN_CONTROLLER_IP \
+  -v NUM_ODL_SYSTEM:$NUM_CONTROL_NODES \
+  -v NUM_OS_SYSTEM:$NUM_CONTROL_NODES \
+  -v NUM_TOOLS_SYSTEM:0 \
+  -v ODL_SNAT_MODE:conntrack \
+  -v ODL_STREAM:$ODL_STREAM \
+  -v ODL_SYSTEM_IP: $CONTROLLER_1_IP \
+  -v OS_CONTROL_NODE_IP:$CONTROLLER_1_IP \
+  -v OPENSTACK_BRANCH:$FULL_OS_VER \
+  -v OS_USER:heat-admin \
+  -v ODL_ENABLE_L3_FWD:yes \
+  -v ODL_SYSTEM_USER:heat-admin \
+  -v ODL_SYSTEM_PROMPT:\$ \
+  -v PRE_CLEAN_OPENSTACK_ALL:True \
+  -v PUBLIC_PHYSICAL_NETWORK:datacentre \
+  -v RESTCONFPORT:8081 \
+  -v ODL_RESTCONF_USER:admin \
+  -v ODL_RESTCONF_PASSWORD:admin \
+  -v KARAF_PROMPT_LOGIN:'opendaylight-user' \
+  -v KARAF_PROMPT:'opendaylight-user.*root.*>' \
+  -v SECURITY_GROUP_MODE:stateful \
+  -v USER:heat-admin \
+  -v USER_HOME:\$HOME \
+  -v TOOLS_SYSTEM_IP:'' \
+  -v NODE_ROLE_INDEX_START:0 \
+  -v WORKSPACE:/tmp  \
+  $EXTRA_ROBOT_ARGS \
+  -v of_port:6653 "
+
+docker run -i --net=host \
+  -v ${WORKSPACE}/id_rsa:/tmp/id_rsa \
+  -v ${WORKSPACE}/overcloudrc:/tmp/overcloudrc \
+  opnfv/cperf:$DOCKER_TAG \
+  /bin/bash -c "source /tmp/overcloudrc; mkdir -p \$HOME/.ssh; cp /tmp/id_rsa \$HOME/.ssh; \
+  $robot_cmd /home/opnfv/repos/odl_test/csit/suites/openstack/connectivity/l2.robot;"
diff --git a/jjb/cperf/parse-node-yaml.py b/jjb/cperf/parse-node-yaml.py
new file mode 100644 (file)
index 0000000..5a75755
--- /dev/null
@@ -0,0 +1,71 @@
+##############################################################################
+# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import argparse
+import sys
+import yaml
+
+
+def get_node_data_by_number(node_type, node_number):
+    node_idx = 1
+    for node_name, node_data in data['servers'].items():
+        if node_type == node_data['type']:
+            if node_idx == node_number:
+                return node_name, node_data
+            else:
+                node_idx += 1
+
+
+def get_node_value(node_type, node_number, key):
+    node_name, node_data = get_node_data_by_number(node_type, node_number)
+    if not key and node_name is not None:
+        return node_name
+    elif node_data and isinstance(node_data, dict) and key in node_data:
+        return node_data[key]
+
+
+def get_number_of_nodes(node_type):
+    nodes = data['servers']
+    num_nodes = 0
+    for node_name, node_data in nodes.items():
+        if node_data['type'] == node_type:
+            num_nodes += 1
+    return num_nodes
+
+
+FUNCTION_MAP = {'num_nodes':
+                {'func': get_number_of_nodes,
+                 'args': ['node_type']},
+                'get_value':
+                    {'func': get_node_value,
+                     'args': ['node_type', 'node_number', 'key']},
+                }
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()
+    parser.add_argument('command', choices=FUNCTION_MAP.keys())
+    parser.add_argument('-f', '--file',
+                        dest='node_file',
+                        required=True)
+    parser.add_argument('--node-type',
+                        default='controller',
+                        required=False)
+    parser.add_argument('--node-number',
+                        default=1,
+                        type=int,
+                        required=False)
+    parser.add_argument('-k', '--key',
+                        required=False)
+    args = parser.parse_args(sys.argv[1:])
+    with open(args.node_file, 'r') as fh:
+        data = yaml.safe_load(fh)
+    assert 'servers' in data
+    func = FUNCTION_MAP[args.command]['func']
+    args = [getattr(args, x) for x in FUNCTION_MAP[args.command]['args']]
+    print(func(*args))