Merge "Add Storperf API usage in bottlenecks"
authorAce Lee <liyin11@huawei.com>
Thu, 21 Dec 2017 06:41:53 +0000 (06:41 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Thu, 21 Dec 2017 06:41:53 +0000 (06:41 +0000)
23 files changed:
docs/release/release-notes/release_notes.rst
docs/testing/user/userguide/overview.rst
docs/testing/user/userguide/posca_guide.rst
monitor/automate_cadvisor_client.py [new file with mode: 0644]
monitor/automate_collectd_client.py [new file with mode: 0644]
monitor/cadvisor_install.sh [new file with mode: 0644]
monitor/config/collectd-client.conf [new file with mode: 0644]
monitor/config/collectd.conf
monitor/config/prometheus.yaml
monitor/install-collectd-client.sh [new file with mode: 0644]
monitor/monitoring.sh
testsuites/posca/testcase_cfg/posca_feature_moon_tenants.yaml [new file with mode: 0644]
testsuites/posca/testcase_cfg/posca_feature_vnf_scale_out.yaml [moved from testsuites/posca/testcase_cfg/posca_factor_vnf_scale_out.yaml with 53% similarity]
testsuites/posca/testcase_dashboard/posca_stress_ping.py
testsuites/posca/testcase_dashboard/posca_vnf_scale_out.py [new file with mode: 0755]
testsuites/posca/testcase_dashboard/system_bandwidth.py
testsuites/posca/testcase_script/posca_factor_vnf_scale_out.py [deleted file]
testsuites/posca/testcase_script/posca_feature_moon_tenants.py [new file with mode: 0644]
testsuites/posca/testcase_script/posca_feature_vnf_scale_out.py [new file with mode: 0644]
testsuites/run_testsuite.py
utils/env_prepare/quota_prepare.py
utils/env_prepare/stack_prepare.py
utils/infra_setup/runner/docker_env.py

index d8d143a..b52830e 100644 (file)
@@ -56,6 +56,9 @@ Version History
 | Sept 15th, 2017|  1.6               | Bottlenecks Euphrates release 5.0 |
 |                |                    |                                   |
 +----------------+--------------------+-----------------------------------+
+| Nov 15th, 2017 |  1.6.1             | Bottlenecks Euphrates release 5.1 |
+|                |                    |                                   |
++----------------+--------------------+-----------------------------------+
 
 Summary
 =======
@@ -81,6 +84,30 @@ Release Data
 Euphrates Release Data
 -----------------------
 
++--------------------------------------+--------------------------------+
+| **Project**                          | Bottlenecks                    |
+|                                      |                                |
++--------------------------------------+--------------------------------+
+| **Repo/tag**                         | * Bottlenecks/5.1.0            |
+|                                      |                                |
+|                                      |                                |
++--------------------------------------+--------------------------------+
+| **Bottlenecks Docker image tag**     | * 5.1.0                        |
+|                                      |                                |
+|                                      |                                |
++--------------------------------------+--------------------------------+
+| **Release designation**              | * 5.1.0                        |
+|                                      |                                |
+|                                      |                                |
++--------------------------------------+--------------------------------+
+| **Release date**                     | * Nov 15th 2017                |
+|                                      |                                |
+|                                      |                                |
++--------------------------------------+--------------------------------+
+| **Purpose of the delivery**          | Euphrates stable release       |
+|                                      |                                |
++--------------------------------------+--------------------------------+
+
 +--------------------------------------+--------------------------------+
 | **Project**                          | Bottlenecks                    |
 |                                      |                                |
@@ -238,7 +265,7 @@ Known restrictions/issues
   * Sometimes, Yardstick will return empty test results with test flag indicating test is excuted succefully.
   It maybe due to the environment issue or poor internet connection causing testing tools are not installed successfully.
 
-* Sometimes, a process will go to sleep state. In this case when a running tool go to sleep state, we try to call it twice. Normally, it will response.
+* Sometimes, a process will go to sleep state. In this case when a running tool go to sleep state, we try to call it twice. Normally, it will response. This applies to the traffic generator, i.e., netperf.
 
 
 Test results
index 5d76c76..746f24c 100644 (file)
@@ -58,17 +58,17 @@ Integration Description
 
 Test suite & Test case Description
 ==================================
-+--------+-------------------------------+
-|POSCA   | posca_factor_ping             |
-|        +-------------------------------+
-|        | posca_factor_system_bandwidth |
-|        +-------------------------------+
-|        | posca_facotor_througputs      |
-|        +-------------------------------+
-|        | posca_feature_scaleup         |
-|        +-------------------------------+
-|        | posca_feature_scaleout        |
-+--------+-------------------------------+
++--------+-------------------------------------+
+|POSCA   | posca_factor_ping                   |
+|        +-------------------------------------+
+|        | posca_factor_system_bandwidth       |
+|        +-------------------------------------+
+|        | posca_facotor_soak_througputs       |
+|        +-------------------------------------+
+|        | posca_feature_vnf_scale_up          |
+|        +-------------------------------------+
+|        | posca_feature_vnf_scale_out         |
++--------+-------------------------------------+
 
 As for the abandoned test suite in the previous Bottlenecks releases, please
 refer to http://docs.opnfv.org/en/stable-danube/submodules/bottlenecks/docs/testing/user/userguide/deprecated.html.
index 009aedb..ec623d0 100644 (file)
@@ -30,7 +30,7 @@ Scopes of the POSCA testsuite:
  b) Parameters choosing and Algorithms.
 
 Test stories of POSCA testsuite:
- a) Factor test (Stress test): base test cases that Feature test and Optimization will be dependant on.
+ a) Factor test (Stress test): base test cases that Feature test and Optimization will be dependant on or stress test to validate system.
  b) Feature test: test cases for features/scenarios.
  c) Optimization test: test to tune the system parameter.
 
@@ -45,6 +45,8 @@ Preinstall Packages
 * Docker: https://docs.docker.com/engine/installation/
     * For Ubuntu, please refer to https://docs.docker.com/engine/installation/linux/ubuntu/
 
+[Since Euphrates release, the docker-compose package is not required.]
+
 * Docker-Compose: https://docs.docker.com/compose/
 
 .. code-block:: bash
@@ -97,12 +99,14 @@ Edit admin_rc.sh and add the following line
 
     export OS_CACERT=/tmp/os_cacert
 
-If you are using compass, fuel, apex or joid to deploy your openstack
-environment, you could use the following command to get the required files.
+If you have deployed your openstack environment by compass,
+you could use the following command to get the required files. For Fuel, Apex and JOID installer, we only provide limited support now
+for retrieving the configuration/description files. If you find that the following command can not do the magic, you should put the
+required files in /tmp manually.
 
 .. code-block:: bash
 
-    bash /utils/env_prepare/config_prepare.sh -i <installer> [--debug]
+    bash ./utils/env_prepare/config_prepare.sh -i <installer> [--debug]
 
 Note that if we execute the command above, then admin_rc.sh and pod.yml gets created automatically in /tmp folder along with the line `export OS_CACERT=/tmp/os_cacert` added in admin_rc.sh file.
 
@@ -116,9 +120,9 @@ Executing Specified Testcase
 
     bottlenecks testcase|teststory run <testname>
 
-    For the *testcase* command, testname should be as the same name of the test case configuration file located in testsuites/posca/testcase_cfg.
-    For stress tests in Danube/Euphrates, *testcase* should be replaced by either *posca_factor_ping* or *posca_factor_system_bandwidth*.
-    For the *teststory* command, a user can specify the test cases to be executed by defining it in a teststory configuration file located in testsuites/posca/testsuite_story. There is also an example there named *posca_factor_test*.
+For the *testcase* command, testname should be as the same name of the test case configuration file located in testsuites/posca/testcase_cfg.
+For stress tests in Danube/Euphrates, *testcase* should be replaced by either *posca_factor_ping* or *posca_factor_system_bandwidth*.
+For the *teststory* command, a user can specify the test cases to be executed by defining it in a teststory configuration file located in testsuites/posca/testsuite_story. There is also an example there named *posca_factor_test*.
 
 2. There are also other 2 ways to run test cases and test stories.
 
diff --git a/monitor/automate_cadvisor_client.py b/monitor/automate_cadvisor_client.py
new file mode 100644 (file)
index 0000000..95b98e9
--- /dev/null
@@ -0,0 +1,37 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import logging
+import sys
+import yaml
+sys.path.insert(0, '/home/opnfv/bottlenecks/utils/infra_setup/passwordless_SSH/')
+import ssh
+
+logger = logging.getLogger(__name__)
+with open('/tmp/pod.yaml') as f:
+    dataMap = yaml.safe_load(f)
+    for x in dataMap:
+        for y in dataMap[x]:
+            if (y['role']=='Controller') or (y['role']=='Compute'):
+                ip = str(y['ip'])
+                user = str(y['user'])
+                pwd = str(y['password'])
+                ssh_d = ssh.SSH(user, host= ip, password= pwd)
+                status, stdout, stderr = ssh_d.execute("cd /etc && mkdir cadvisor-config")
+                if status:
+                    raise Exception("Command failed with non-zero status.")
+                    logger.info(stdout.splitlines())
+                with open("/home/opnfv/bottlenecks/monitor/cadvisor_install.sh") as stdin_file:
+                    ssh_d.run("cat > /etc/cadvisor-config/install.sh", stdin=stdin_file)
+                status, stdout, stderr = ssh_d.execute("sudo apt-get install docker.io")
+                if status:
+                    raise Exception("Command for installing docker failed.")
+                    logger.info(stdout.splitlines())
+                ssh_d.run("cd /etc/cadvisor-config/ && bash ./install.sh")
+
diff --git a/monitor/automate_collectd_client.py b/monitor/automate_collectd_client.py
new file mode 100644 (file)
index 0000000..6dd7067
--- /dev/null
@@ -0,0 +1,38 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import logging
+import sys
+import yaml
+sys.path.insert(0, '/home/opnfv/bottlenecks/utils/infra_setup/passwordless_SSH/')
+import ssh
+
+logger = logging.getLogger(__name__)
+with open('/tmp/pod.yaml') as f:
+    dataMap = yaml.safe_load(f)
+    for x in dataMap:
+        for y in dataMap[x]:
+            if (y['role']=='Controller') or (y['role']=='Compute'):
+                ip = str(y['ip'])
+                user = str(y['user'])
+                pwd = str(y['password'])
+                ssh_d = ssh.SSH(user, host= ip, password= pwd)
+                status, stdout, stderr = ssh_d.execute("cd /etc && mkdir collectd-config")
+                if status:
+                    raise Exception("Command failed with non-zero status.")
+                    logger.info(stdout.splitlines())
+                with open("/home/opnfv/bottlenecks/monitor/install-collectd-client.sh") as stdin_file:
+                    ssh_d.run("cat > /etc/collectd-config/install.sh", stdin=stdin_file)
+                with open("/home/opnfv/bottlenecks/monitor/config/collectd-client.conf") as stdin_file:
+                    ssh_d.run("cat > /etc/collectd-config/collectd.conf", stdin=stdin_file)
+                status, stdout, stderr = ssh_d.execute("sudo apt-get install docker.io")
+                if status:
+                    raise Exception("Command for installing docker failed.")
+                    logger.info(stdout.splitlines())
+                ssh_d.run("cd /etc/collectd-config/ && bash ./install.sh")
diff --git a/monitor/cadvisor_install.sh b/monitor/cadvisor_install.sh
new file mode 100644 (file)
index 0000000..524e24d
--- /dev/null
@@ -0,0 +1,10 @@
+sudo docker run \
+  --volume=/:/rootfs:ro \
+  --volume=/var/run:/var/run:rw \
+  --volume=/sys:/sys:ro \
+  --volume=/var/lib/docker/:/var/lib/docker:ro \
+  --volume=/dev/disk/:/dev/disk:ro \
+  --publish=8080:8080 \
+  --detach=true \
+  --name=cadvisor \
+  google/cadvisor:v0.25.0 \ -storage_driver=Prometheus
diff --git a/monitor/config/collectd-client.conf b/monitor/config/collectd-client.conf
new file mode 100644 (file)
index 0000000..96a2a69
--- /dev/null
@@ -0,0 +1,125 @@
+#
+# Config file for collectd(1).
+# Please read collectd.conf(5) for a list of options.
+# http://collectd.org/
+#
+
+##############################################################################
+# Global                                                                     #
+#----------------------------------------------------------------------------#
+# Global settings for the daemon.                                            #
+##############################################################################
+
+#Hostname    "localhost"
+#FQDNLookup   true
+#BaseDir     "${prefix}/var/lib/collectd"
+#PIDFile     "${prefix}/var/run/collectd.pid"
+#PluginDir   "${exec_prefix}/lib/collectd"
+#TypesDB     "/usr/share/collectd/types.db"
+
+#----------------------------------------------------------------------------#
+# When enabled, plugins are loaded automatically with the default options    #
+# when an appropriate <Plugin ...> block is encountered.                     #
+# Disabled by default.                                                       #
+#----------------------------------------------------------------------------#
+#AutoLoadPlugin false
+
+#----------------------------------------------------------------------------#
+# When enabled, internal statistics are collected, using "collectd" as the   #
+# plugin name.                                                               #
+# Disabled by default.                                                       #
+#----------------------------------------------------------------------------#
+#CollectInternalStats false
+
+#----------------------------------------------------------------------------#
+# Interval at which to query values. This may be overwritten on a per-plugin #
+# base by using the 'Interval' option of the LoadPlugin block:               #
+#   <LoadPlugin foo>                                                         #
+#       Interval 60                                                          #
+#   </LoadPlugin>                                                            #
+#----------------------------------------------------------------------------#
+#Interval     10
+
+#MaxReadInterval 86400
+#Timeout         2
+#ReadThreads     5
+#WriteThreads    5
+
+# Limit the size of the write queue. Default is no limit. Setting up a limit is
+# recommended for servers handling a high volume of traffic.
+#WriteQueueLimitHigh 1000000
+#WriteQueueLimitLow   800000
+
+##############################################################################
+# Logging                                                                    #
+#----------------------------------------------------------------------------#
+# Plugins which provide logging functions should be loaded first, so log     #
+# messages generated when loading or configuring other plugins can be        #
+# accessed.                                                                  #
+##############################################################################
+
+LoadPlugin syslog
+#<Plugin syslog>
+#       LogLevel info
+#</Plugin>
+
+##############################################################################
+# LoadPlugin section                                                         #
+#----------------------------------------------------------------------------#
+# Lines beginning with a single `#' belong to plugins which have been built  #
+# but are disabled by default.                                               #
+#                                                                            #
+# Lines begnning with `##' belong to plugins which have not been built due   #
+# to missing dependencies or because they have been deactivated explicitly.  #
+##############################################################################
+
+LoadPlugin cpu
+LoadPlugin interface
+LoadPlugin memory
+LoadPlugin network
+LoadPlugin rrdtool
+LoadPlugin write_http
+
+##############################################################################
+# Plugin configuration                                                       #
+#----------------------------------------------------------------------------#
+# In this section configuration stubs for each plugin are provided. A desc-  #
+# ription of those options is available in the collectd.conf(5) manual page. #
+##############################################################################
+
+#<Plugin cpu>
+#  ReportByCpu true
+#  ReportByState true
+#  ValuesPercentage false
+#</Plugin>
+
+#<Plugin interface>
+#       Interface "eth0"
+#       IgnoreSelected false
+#</Plugin>
+
+#<Plugin memory>
+#       ValuesAbsolute true
+#       ValuesPercentage false
+#</Plugin>
+
+<Plugin network>
+  Server "192.168.121.2" "25826"
+</Plugin>
+
+#<Plugin rrdtool>
+#       DataDir "${prefix}/var/lib/collectd/rrd"
+#       CreateFilesAsync false
+#       CacheTimeout 120
+#       CacheFlush   900
+#       WritesPerSecond 50
+#</Plugin>
+
+#<Plugin write_http>
+#  <Node "collectd_exporter">
+#    URL "http://192.168.121.2:9103/collectd-post"
+#    Format "JSON"
+#    StoreRates false
+#  </Node>
+#</Plugin>
+
index 62be9fb..6be610e 100644 (file)
@@ -751,7 +751,7 @@ LoadPlugin write_http
 #</Plugin>
 
 <Plugin network>
-  Server "192.168.104.2" "25826"
+  Listen "192.168.121.2" "25826"
 </Plugin>
 
 #<Plugin network>
@@ -1021,13 +1021,13 @@ LoadPlugin write_http
 #       CollectStatistics true
 #</Plugin>
 
-#<Plugin rrdtool>
-#       DataDir "${prefix}/var/lib/collectd/rrd"
-#       CreateFilesAsync false
-#       CacheTimeout 120
-#       CacheFlush   900
-#       WritesPerSecond 50
-#</Plugin>
+<Plugin rrdtool>
+       DataDir "/var/lib/collectd/rrd"
+       CreateFilesAsync false
+       CacheTimeout 120
+       CacheFlush   900
+       WritesPerSecond 50
+</Plugin>
 
 #<Plugin sensors>
 #       SensorConfigFile "/etc/sensors.conf"
@@ -1325,7 +1325,7 @@ LoadPlugin write_http
 
 <Plugin write_http>
   <Node "collectd_exporter">
-    URL "http://192.168.104.2:9103/collectd-post"
+    URL "http://192.168.121.2:9103/collectd-post"
     Format "JSON"
     StoreRates false
   </Node>
index 35bf040..3736d8e 100644 (file)
@@ -25,7 +25,15 @@ scrape_configs:
     scrape_interval: 5s
 
     static_configs:
-      - targets: ['192.168.104.2:9090']
+      - targets: ['192.168.121.2:9090']
+
+  - job_name: 'cadvisor'
+
+    # Override the global default and scrape targets from this job every 5 seconds.
+    scrape_interval: 5s
+
+    static_configs:
+      - targets: ['192.168.121.2:8080','10.1.0.50:8080','10.1.0.51:8080']
 
   - job_name: 'collectd'
 
@@ -33,7 +41,7 @@ scrape_configs:
     scrape_interval: 5s
 
     static_configs:
-      - targets: ['192.168.104.2:9103']
+      - targets: ['192.168.121.2:9103']
 
   - job_name: 'node'
 
@@ -41,4 +49,4 @@ scrape_configs:
     scrape_interval: 5s
 
     static_configs:
-      - targets: ['192.168.104.2:9100']
\ No newline at end of file
+      - targets: ['192.168.121.2:9100']
diff --git a/monitor/install-collectd-client.sh b/monitor/install-collectd-client.sh
new file mode 100644 (file)
index 0000000..00fa4c8
--- /dev/null
@@ -0,0 +1,8 @@
+MONITOR_CONFIG="/etc/collectd-config"
+
+# Collectd
+sudo docker run --name bottlenecks-automated-collectd -d \
+  --privileged \
+  -v ${MONITOR_CONFIG}:/etc/collectd:ro \
+  -v /proc:/mnt/proc:ro \
+  fr3nd/collectd:5.5.0-1
index a4d404c..16cb308 100644 (file)
@@ -18,7 +18,7 @@ sudo docker run --name bottlenecks-node-exporter \
   -v "/sys:/host/sys:ro" \
   -v "/:/rootfs:ro" \
   --net="host" \
-  quay.io/prometheus/node-exporter \
+  quay.io/prometheus/node-exporter:v0.14.0 \
     -collector.procfs /host/proc \
     -collector.sysfs /host/sys \
     -collector.filesystem.ignored-mount-points "^/(sys|proc|dev|host|etc)($|/)"
@@ -28,22 +28,43 @@ sudo docker run --name bottlenecks-collectd -d \
   --privileged \
   -v ${MONITOR_CONFIG}:/etc/collectd:ro \
   -v /proc:/mnt/proc:ro \
-  fr3nd/collectd
+  fr3nd/collectd:5.5.0-1
 
 # Collectd-Exporter
 sudo docker run --name bottlenecks-collectd-exporter \
   -d -p 9103:9103 \
-  -p 25826:25826/udp prom/collectd-exporter \
+  -p 25826:25826/udp prom/collectd-exporter:0.3.1 \
   -collectd.listen-address=":25826"
 
 # Prometheus
 sudo docker run --name bottlenecks-prometheus \
   -d -p 9090:9090 \
   -v ${MONITOR_CONFIG}/prometheus.yaml:/etc/prometheus/prometheus.yml \
-  prom/prometheus
+  prom/prometheus:v1.7.1
 
 # Grafana
 sudo  docker run --name bottlenecks-grafana \
   -d -p 3000:3000 \
   -v ${GRAFANA}/config/grafana.ini:/etc/grafana/grafana.ini \
-  grafana/grafana
+  grafana/grafana:4.5.0
+
+# Cadvisor
+sudo docker run \
+  --volume=/:/rootfs:ro \
+  --volume=/var/run:/var/run:rw \
+  --volume=/sys:/sys:ro \
+  --volume=/var/lib/docker/:/var/lib/docker:ro \
+  --volume=/dev/disk/:/dev/disk:ro \
+  --publish=8080:8080 \
+  --detach=true \
+  --name=cadvisor \
+  google/cadvisor:v0.25.0 \ -storage_driver=Prometheus
+
+# Automate Collectd Client
+python automate_collectd_client.py
+
+# Automate Cadvisor Client
+python automate_cadvisor_client.py
+
+# Automate Prometheus Datasource and Grafana Dashboard creation
+python automated-dashboard-datasource.py
diff --git a/testsuites/posca/testcase_cfg/posca_feature_moon_tenants.yaml b/testsuites/posca/testcase_cfg/posca_feature_moon_tenants.yaml
new file mode 100644 (file)
index 0000000..3b621a9
--- /dev/null
@@ -0,0 +1,35 @@
+##############################################################################
+# Copyright (c) 2017 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+load_manager:
+  scenarios:
+    tool: https request
+    # info that the cpus and memes have the same number of data.
+    resources: 20
+    initial: 0
+    threshhold: 5
+    timeout: 30
+    SLA: 5
+
+
+  runners:
+    stack_create: yardstick
+    Debug: False
+    yardstick_test_dir: "samples"
+    yardstick_testcase: "bottlenecks_moon_tenants"
+
+  runner_exta:
+    # info this section is for yardstick do some exta env prepare.
+    installation_method: yardstick
+    installation_type: testpmd
+
+contexts:
+  # info that dashboard if have data, we will create the data dashboard.
+  dashboard: "Bottlenecks-ELK"
+  yardstick: "Bottlenecks-yardstick"
\ No newline at end of file
@@ -7,12 +7,19 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-test_config:
-  num_vnfs: [1, 40]
-runner_config:
-  dashboard: "y"
-  dashboard_ip:
-  stack_create: yardstick
-  yardstick_test_ip:
-  yardstick_test_dir: "samples/vnf_samples/nsut/acl"
-  yardstick_testcase: "tc_heat_rfc2544_ipv4_1rule_1flow_64B_packetsize_scale_out.yaml"
+load_manager:
+  scenarios:
+    number_vnfs: 1, 2, 4
+    iterations: 10
+    interval: 35
+
+  runners:
+    stack_create: yardstick
+    flavor:
+    yardstick_test_dir: "samples/vnf_samples/nsut/acl"
+    yardstick_testcase: "tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_correlated_traffic_scale_out"
+
+contexts:
+  dashboard: "Bottlenecks-ELK"
+  yardstick: "Bottlenecks_yardstick"
+  yardstick_envpre: False
index 7a5a8fb..64ce383 100644 (file)
@@ -32,7 +32,7 @@ def dashboard_send_data(runner_config, test_data):
                    doc_type=test_data["testcase"],
                    body=test_data["data_body"])
     if res['created'] == "False":
-        LOG.error("date send to kibana have errors ", test_data["data_body"])
+        LOG.error("date send to kibana have errors %s", test_data["data_body"])
 
 
 def posca_stress_ping(runner_config):
diff --git a/testsuites/posca/testcase_dashboard/posca_vnf_scale_out.py b/testsuites/posca/testcase_dashboard/posca_vnf_scale_out.py
new file mode 100755 (executable)
index 0000000..6720b7f
--- /dev/null
@@ -0,0 +1,35 @@
+#!/usr/bin/python
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import ConfigParser
+from elasticsearch import Elasticsearch
+import os
+import utils.logger as log
+from utils.parser import Parser as conf_parser
+
+LOG = log.Logger(__name__).getLogger()
+config = ConfigParser.ConfigParser()
+es = Elasticsearch()
+dashboard_path = os.path.join(conf_parser.test_dir,
+                              "posca",
+                              "testcase_dashboard")
+dashboard_dir = dashboard_path + "/"
+
+
+def dashboard_send_data(runner_config, test_data):
+    global es
+    # es_ip = runner_config['dashboard_ip'].split(':')
+    es = Elasticsearch([{'host': "172.17.0.5"}])
+    for i in test_data:
+        res = es.index(index="bottlenecks",
+                       doc_type="vnf_scale_out",
+                       body=i)
+        if res['created'] == "False":
+            LOG.error("date send to kibana have errors %s",
+                      test_data["data_body"])
index 4501dee..5479b67 100755 (executable)
@@ -31,7 +31,7 @@ def dashboard_send_data(runner_config, test_data):
                    doc_type=test_data["testcase"],
                    body=test_data["data_body"])
     if res['created'] == "False":
-        LOG.error("date send to kibana have errors ", test_data["data_body"])
+        LOG.error("date send to kibana have errors %s", test_data["data_body"])
 
 
 def dashboard_system_bandwidth(runner_config):
diff --git a/testsuites/posca/testcase_script/posca_factor_vnf_scale_out.py b/testsuites/posca/testcase_script/posca_factor_vnf_scale_out.py
deleted file mode 100644 (file)
index 2241d02..0000000
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/usr/bin/env python
-##############################################################################
-# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-"""This file realize the function of run systembandwidth script.
-for example this contain two part first run_script,
-second is algorithm, this part is about how to judge the bottlenecks.
-This test is using yardstick as a tool to begin test."""
-
-import os
-import time
-import utils.logger as log
-import utils.infra_setup.runner.yardstick as Runner
-from utils.parser import Parser as conf_parser
-import testsuites.posca.testcase_dashboard.system_bandwidth as DashBoard
-# --------------------------------------------------
-# logging configuration
-# --------------------------------------------------
-LOG = log.Logger(__name__).getLogger()
-
-testfile = os.path.basename(__file__)
-testcase, file_format = os.path.splitext(testfile)
-
-
-def env_pre(con_dic):
-    Runner.Create_Incluxdb(con_dic['runner_config'])
-
-
-def config_to_result(test_config, test_result):
-    testdata = {}
-    test_result["throughput"] = float(test_result["throughput"])
-    test_result.update(test_config)
-    testdata["data_body"] = test_result
-    testdata["testcase"] = testcase
-    return testdata
-
-
-def do_test(test_config, con_dic):
-    test_case = con_dic['runner_config']['yardstick_testcase']
-    test_dict = {
-        "action": "runTestCase",
-        "args": {
-            "opts": {
-                "task-args": test_config
-            },
-            "testcase": test_case
-        }
-    }
-    Task_id = Runner.Send_Data(test_dict, con_dic['runner_config'])
-    time.sleep(con_dic['test_config']['test_time'])
-    Data_Reply = Runner.Get_Reply(con_dic['runner_config'], Task_id)
-    try:
-        test_date =\
-            Data_Reply[con_dic['runner_config']['yardstick_testcase']][0]
-    except IndexError:
-        test_date = do_test(test_config, con_dic)
-
-    save_data = config_to_result(test_config, test_date)
-    if con_dic['runner_config']['dashboard'] == 'y':
-        DashBoard.dashboard_send_data(con_dic['runner_config'], save_data)
-
-    return save_data["data_body"]
-
-
-def run(con_dic):
-    # can we specify these ranges from command line?
-    low, high = con_dic['test_config']['num_vnfs']
-    data = {
-        "num_vnfs": range(low, high)
-    }
-    con_dic["result_file"] = os.path.dirname(
-        os.path.abspath(__file__)) + "/test_case/result"
-    pre_role_result = 1
-    data_return = {}
-    data_max = {}
-    data_return["throughput"] = 1
-
-    if con_dic["runner_config"]["yardstick_test_ip"] is None:
-        con_dic["runner_config"]["yardstick_test_ip"] =\
-            conf_parser.ip_parser("yardstick_test_ip")
-
-    env_pre(con_dic)
-
-    if con_dic["runner_config"]["dashboard"] == 'y':
-        if con_dic["runner_config"]["dashboard_ip"] is None:
-            con_dic["runner_config"]["dashboard_ip"] =\
-                conf_parser.ip_parser("dashboard")
-        LOG.info("Create Dashboard data")
-        DashBoard.dashboard_system_bandwidth(con_dic["runner_config"])
-
-    bandwidth_tmp = 1
-    # vcpus and mem are scaled together
-    for num_vnfs in data["scale_up_values"]:
-        data_max["throughput"] = 1
-        test_config = {
-            "num_vnfs": num_vnfs,
-            "test_time": con_dic['test_config']['test_time']
-        }
-        data_reply = do_test(test_config, con_dic)
-        conf_parser.result_to_file(data_reply, con_dic["out_file"])
-        # TODO: figure out which KPI to use
-        bandwidth = data_reply["throughput"]
-        if data_max["throughput"] < bandwidth:
-            data_max = data_reply
-        if abs(bandwidth_tmp - bandwidth) / float(bandwidth_tmp) < 0.025:
-            LOG.info("this group of data has reached top output")
-            break
-        else:
-            pre_reply = data_reply
-            bandwidth_tmp = bandwidth
-        cur_role_result = float(pre_reply["throughput"])
-        if (abs(pre_role_result - cur_role_result) /
-                float(pre_role_result) < 0.025):
-            LOG.info("The performance increases slowly")
-        if data_return["throughput"] < data_max["throughput"]:
-            data_return = data_max
-        pre_role_result = cur_role_result
-    LOG.info("Find bottlenecks of this config")
-    LOG.info("The max data is %d", data_return["throughput"])
-    return data_return
diff --git a/testsuites/posca/testcase_script/posca_feature_moon_tenants.py b/testsuites/posca/testcase_script/posca_feature_moon_tenants.py
new file mode 100644 (file)
index 0000000..8f4061d
--- /dev/null
@@ -0,0 +1,166 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+'''This file realize the function of run systembandwidth script.
+for example this contain two part first run_script,
+second is algorithm, this part is about how to judge the bottlenecks.
+This test is using yardstick as a tool to begin test.'''
+
+import os
+import time
+import uuid
+import json
+import Queue
+import multiprocessing
+import utils.logger as log
+from utils.parser import Parser as conf_parser
+import utils.env_prepare.stack_prepare as stack_prepare
+import utils.infra_setup.runner.docker_env as docker_env
+import utils.infra_setup.runner.yardstick as yardstick_task
+
+# --------------------------------------------------
+# logging configuration
+# --------------------------------------------------
+LOG = log.Logger(__name__).getLogger()
+
+testfile = os.path.basename(__file__)
+testcase, file_format = os.path.splitext(testfile)
+# cidr = "/home/opnfv/repos/yardstick/samples/pvp_throughput_bottlenecks.yaml"
+runner_switch = True
+runner_DEBUG = True
+
+
+def env_pre(con_dic):
+    LOG.info("yardstick environment prepare!")
+    stack_prepare._prepare_env_daemon(True)
+
+
+def config_to_result(test_config, test_result):
+    final_data = []
+    print(test_result)
+    out_data = test_result["result"]["testcases"]
+    test_data = out_data["pvp_throughput_bottlenecks"]["tc_data"]
+    for result in test_data:
+        testdata = {}
+        testdata["vcpu"] = test_config["vcpu"]
+        testdata["memory"] = test_config["memory"]
+        testdata["nrFlows"] = result["data"]["nrFlows"]
+        testdata["packet_size"] = result["data"]["packet_size"]
+        testdata["throughput"] = result["data"]["throughput_rx_mbps"]
+        final_data.append(testdata)
+    return final_data
+
+
+def testcase_parser(runner_conf, out_file="yardstick.out", **parameter_info):
+    cidr = "/home/opnfv/repos/yardstick/" + \
+           runner_conf["yardstick_test_dir"] + \
+           runner_conf["yardstick_testcase"]
+    cmd = yardstick_task.yardstick_command_parser(debug=runner_DEBUG,
+                                                  cidr=cidr,
+                                                  outfile=out_file,
+                                                  parameter=parameter_info)
+    return cmd
+
+
+def do_test(runner_conf, test_config, Use_Dashboard, context_conf):
+    yardstick_container = docker_env.yardstick_info['container']
+    out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
+    cmd = testcase_parser(runner_conf, out_file=out_file, **test_config)
+    print(cmd)
+    stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
+    LOG.info(stdout)
+    loop_value = 0
+    while loop_value < 60:
+        time.sleep(2)
+        loop_value = loop_value + 1
+        with open(out_file) as f:
+            data = json.load(f)
+            if data["status"] == 1:
+                LOG.info("yardstick run success")
+                break
+            elif data["status"] == 2:
+                LOG.error("yardstick error exit")
+                exit()
+    # data = json.load(output)
+
+    save_data = config_to_result(test_config, data)
+    if Use_Dashboard is True:
+        print("use dashboard")
+        # DashBoard.dashboard_send_data(context_conf, save_data)
+
+    # return save_data["data_body"]
+    return save_data
+
+
+def run(test_config):
+    load_config = test_config["load_manager"]
+    scenarios_conf = load_config["scenarios"]
+    runner_conf = test_config["runners"]
+    Use_Dashboard = False
+
+    env_pre(None)
+    if test_config["contexts"]["yardstick_ip"] is None:
+        load_config["contexts"]["yardstick_ip"] =\
+            conf_parser.ip_parser("yardstick_test_ip")
+
+    if "dashboard" in test_config["contexts"].keys():
+        if test_config["contexts"]["dashboard_ip"] is None:
+            test_config["contexts"]["dashboard_ip"] =\
+                conf_parser.ip_parser("dashboard")
+        LOG.info("Create Dashboard data")
+        Use_Dashboard = True
+        # DashBoard.dashboard_system_bandwidth(test_config["contexts"])
+
+    resources = conf_parser.str_to_list(scenarios_conf["resources"])
+    initial = conf_parser.str_to_list(scenarios_conf["initial"])
+    threshhold = conf_parser.str_to_list(scenarios_conf["threshhold"])
+    timeout = conf_parser.str_to_list(scenarios_conf["timeout"])
+    SLA = conf_parser.str_to_list(scenarios_conf["SLA"])
+    case_config = {"SLA": SLA,
+                   "resources": resources}
+
+    process_queue = Queue.Queue()
+
+    load_config["result_file"] = os.path.dirname(
+        os.path.abspath(__file__)) + "/test_case/result"
+
+    result = 0
+
+    if initial is 0:
+        tenant_number = threshhold
+    else:
+        tenant_number = initial
+
+    while runner_switch is True:
+        for tenant in range(0, tenant_number):
+            process = multiprocessing.Process(target=do_test,
+                                              args=(runner_conf,
+                                                    case_config,
+                                                    Use_Dashboard,
+                                                    test_config["contexts"],
+                                                    ))
+            process.start()
+            process_queue.put(process)
+
+        result = result + tenant_number
+        tenant_number = threshhold
+        time.sleep(timeout)
+
+    while process_queue.qsize():
+        process = process_queue.get()
+        process.terminate()
+
+    if result is initial:
+        result = 0
+    else:
+        result = result - threshhold
+
+    LOG.info("Finished bottlenecks testcase")
+    LOG.info("The result data is %s", result)
+    return result
diff --git a/testsuites/posca/testcase_script/posca_feature_vnf_scale_out.py b/testsuites/posca/testcase_script/posca_feature_vnf_scale_out.py
new file mode 100644 (file)
index 0000000..6d53515
--- /dev/null
@@ -0,0 +1,151 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+"""This file realize the function of run systembandwidth script.
+for example this contain two part first run_script,
+second is algorithm, this part is about how to judge the bottlenecks.
+This test is using yardstick as a tool to begin test."""
+
+import utils.logger as log
+import uuid
+import json
+import os
+import time
+from utils.parser import Parser as conf_parser
+import utils.env_prepare.quota_prepare as quota_prepare
+import utils.env_prepare.stack_prepare as stack_prepare
+
+import utils.infra_setup.runner.docker_env as docker_env
+import utils.infra_setup.runner.yardstick as yardstick_task
+# --------------------------------------------------
+# logging configuration
+# --------------------------------------------------
+LOG = log.Logger(__name__).getLogger()
+
+
+testcase_name = ("tc_heat_rfc2544_ipv4_1rule_"
+                 "1flow_64B_trex_correlated_traffic_scale_out")
+testfile = os.path.basename(__file__)
+testcase, file_format = os.path.splitext(testfile)
+cidr = ("/home/opnfv/repos/yardstick/samples/vnf_samples/nsut/acl/"
+        "tc_heat_rfc2544_ipv4_1rule_1flow_64B_trex_correlated_"
+        "traffic_scale_out.yaml")
+runner_DEBUG = True
+
+
+def env_pre(test_config):
+    test_yardstick = False
+    if "yardstick" in test_config["contexts"].keys():
+        test_yardstick = True
+    print(test_yardstick)
+    stack_prepare._prepare_env_daemon(test_yardstick)
+    quota_prepare.quota_env_prepare()
+    cmd = ('yardstick env prepare')
+    LOG.info("yardstick environment prepare!")
+    print docker_env.yardstick_info['container']
+    if(test_config["contexts"]['yardstick_envpre']):
+        yardstick_container = docker_env.yardstick_info['container']
+        stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
+        LOG.debug(stdout)
+
+
+def config_to_result(test_config, test_result):
+    final_data = []
+    print(test_result)
+    out_data = test_result["result"]["testcases"]
+    test_data = out_data[testcase_name]["tc_data"]
+    for result in test_data:
+        testdata = {}
+        testdata["sequence"] = result["sequence"]
+        traffic_result = result["data"]["tg__0"]
+        if traffic_result:
+            testdata["RxThroughput"] = traffic_result["RxThroughput"]
+            testdata["TxThroughput"] = traffic_result["TxThroughput"]
+            testdata["DropPercentage"] = traffic_result["DropPercentage"]
+        final_data.append(testdata)
+    return final_data
+
+
+def testcase_parser(out_file="yardstick.out", **parameter_info):
+    cmd = yardstick_task.yardstick_command_parser(debug=runner_DEBUG,
+                                                  cidr=cidr,
+                                                  outfile=out_file,
+                                                  parameter=parameter_info)
+    return cmd
+
+
+def do_test(test_config, Use_Dashboard, context_conf):
+    yardstick_container = docker_env.yardstick_info['container']
+    out_file = ("/tmp/yardstick_" + str(uuid.uuid4()) + ".out")
+    cmd = testcase_parser(out_file=out_file, **test_config)
+    print(cmd)
+    stdout = docker_env.docker_exec_cmd(yardstick_container, cmd)
+    LOG.info(stdout)
+    loop_value = 0
+    while loop_value < 60:
+        time.sleep(2)
+        loop_value = loop_value + 1
+        with open(out_file) as f:
+            data = json.load(f)
+            if data["status"] == 1:
+                LOG.info("yardstick run success")
+                break
+            elif data["status"] == 2:
+                LOG.error("yardstick error exit")
+                exit()
+    # data = json.load(output)
+
+    save_data = config_to_result(test_config, data)
+    print("^^^^^^^^^^^^^^^^^^^^^^^^^")
+    print save_data
+    if Use_Dashboard is True:
+        print("use dashboard")
+        # DashBoard.dashboard_send_data(context_conf, save_data)
+
+    # return save_data["data_body"]
+    return save_data
+
+
+def run(test_config):
+    print test_config
+    load_config = test_config["load_manager"]
+    scenarios_conf = load_config["scenarios"]
+    Use_Dashboard = True
+    env_pre(test_config)
+    if test_config["contexts"]["yardstick_ip"] is None:
+        load_config["contexts"]["yardstick_ip"] =\
+            conf_parser.ip_parser("yardstick_test_ip")
+
+    if "dashboard" in test_config["contexts"].keys():
+        if test_config["contexts"]["dashboard_ip"] is None:
+            test_config["contexts"]["dashboard_ip"] =\
+                conf_parser.ip_parser("dashboard")
+        LOG.info("Create Dashboard data")
+        Use_Dashboard = True
+
+    num_vnfs = conf_parser.str_to_list(scenarios_conf["number_vnfs"])
+    iterations = scenarios_conf["iterations"]
+    interval = scenarios_conf["interval"]
+    load_config["result_file"] = os.path.dirname(
+        os.path.abspath(__file__)) + "/test_case/result"
+
+    result = []
+
+    for i in range(0, len(num_vnfs)):
+        print i
+        case_config = {"num_vnfs": int(num_vnfs[i]),
+                       "iterations": iterations,
+                       "interval": interval}
+        data_reply = do_test(case_config, Use_Dashboard,
+                             test_config["contexts"])
+        result.append(data_reply)
+
+    LOG.info("Finished bottlenecks testcase")
+    LOG.info("The result data is %s", result)
+    return result
index e727668..2e82b20 100644 (file)
@@ -110,7 +110,7 @@ def testsuite_run(test_level, test_name, REPORT="False"):
         try:
             posca_testcase_run(tester_parser[0], testcase, config[testcase])
         except Exception, e:
-            LOG.warning('e.message:\t', e.message)
+            LOG.warning('e.message:\t%s', e.message)
         stop_date = datetime.datetime.now()
         LOG.info("End of %s testcase in POSCA testsuite", testcase)
         criteria = "FAIL"
index 2401595..267e70a 100644 (file)
@@ -22,7 +22,9 @@ neutron_quota = {"subnet": -1,
                  "subnetpool": -1,
                  "router": -1,
                  "port": -1,
-                 "security_group": -1}
+                 "security_group": -1,
+                 "security_group_rule": -1,
+                 "rbac_policy": -1}
 
 nova_quota = {"ram": -1,
               "cores": -1,
index 5de6218..25c2a29 100644 (file)
@@ -37,16 +37,6 @@ def _prepare_env_daemon(test_yardstick):
                             config.bottlenecks_config["yardstick_rc_dir"])
         docker_env.docker_exec_cmd(yardstick_contain,
                                    cmd)
-        file_orig = ("/home/opnfv/repos/yardstick/etc"
-                     "/yardstick/yardstick.conf.sample")
-        file_after = "/etc/yardstick/yardstick.conf"
-        cmd = "cp %s %s" % (file_orig,
-                            file_after)
-        docker_env.docker_exec_cmd(yardstick_contain,
-                                   cmd)
-        cmd = "sed -i '13s/http/file/g' /etc/yardstick/yardstick.conf"
-        docker_env.docker_exec_cmd(yardstick_contain,
-                                   cmd)
 
     # update the external_network
     _source_file(rc_file)
index 64d049b..438d3d1 100644 (file)
@@ -45,13 +45,13 @@ def env_yardstick(docker_name):
     volume = get_self_volume()
     yardstick_tag = os.getenv("Yardstick_TAG")
     if yardstick_tag is None:
-        yardstick_tag = "danube.3.1"
+        yardstick_tag = "latest"
     env_docker = client.containers.run(image="opnfv/yardstick:%s"
                                              % yardstick_tag,
                                        privileged=True,
                                        tty=True,
                                        detach=True,
-                                       ports={'5000': '8888'},
+                                       ports={'5000': '8888'},
                                        volumes=volume,
                                        name=docker_name)
     yardstick_info["container"] = env_docker