Bug fix of Bottlenecks can't use CLI 17/37617/1
authorliyin <liyin11@huawei.com>
Mon, 17 Jul 2017 09:45:38 +0000 (17:45 +0800)
committerliyin <liyin11@huawei.com>
Mon, 17 Jul 2017 09:45:38 +0000 (17:45 +0800)
JIRA: BOTTLENECK-171

Bug fix of Bottlenecks can't use CLI to start test.
Some docker-compose function delete to fit elk docker support.
After this patch you could use bottlenecks testcase run
posca_factor_ping to start this test.
you also need to put the rc file to bottlenecks docker.

Change-Id: I340e31185a499c81f9595a902df89d8f0ddf0cd3
Signed-off-by: liyin <liyin11@huawei.com>
docker/bottleneck-compose/bottlenecks/Dockerfile [deleted file]
docker/bottleneck-compose/docker-compose.yml [deleted file]
docker/bottleneck-compose/kibana/Dockerfile [deleted file]
docker/bottleneck-compose/kibana/config/kibana.yml [deleted file]
docker/bottleneck-compose/kibana/entrypoint.sh [deleted file]
run_tests.sh
testsuites/posca/testcase_script/posca_factor_ping.py
testsuites/run_testsuite.py
utils/env_prepare/quota_prepare.py

diff --git a/docker/bottleneck-compose/bottlenecks/Dockerfile b/docker/bottleneck-compose/bottlenecks/Dockerfile
deleted file mode 100644 (file)
index 8e30511..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-FROM opnfv/bottlenecks:latest
-
-USER root
-
-RUN apt-get update && apt-get install -y \
-    openssh-server
-
-RUN mkdir /var/run/sshd
-EXPOSE 22
-CMD ["/usr/sbin/sshd", "-D"]
diff --git a/docker/bottleneck-compose/docker-compose.yml b/docker/bottleneck-compose/docker-compose.yml
deleted file mode 100644 (file)
index a0f3df4..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-elasticsearch:
-  image: elasticsearch:2.4.0
-  command: elasticsearch -Des.network.host=0.0.0.0
-  privileged: true
-  ports:
-    - "9200:9200"
-    - "9300:9300"
-
-kibana:
-  build: kibana/
-  volumes:
-    - ./kibana/config/:/opt/kibana/config/
-  privileged: true
-  ports:
-    - "5601:5601"
-  links:
-    - elasticsearch
-
-yardstick:
-  restart: always
-  image: opnfv/yardstick:latest
-  volumes:
-    - /var/run/docker.sock:/var/run/docker.sock
-    - /tmp/:/tmp/
-  ports:
-    - "8888:5000"
-  privileged: true
-  environment:
-    - INSTALLER_IP=192.168.200.2
-    - INSTALLER_TYPE=compass
-
-bottlenecks:
-  restart: always
-  build: bottlenecks/
-  volumes:
-    - /var/run/docker.sock:/var/run/docker.sock
-    - /tmp/:/tmp/
-  privileged: true
-  environment:
-    - INSTALLER_IP=192.168.200.2
-    - INSTALLER_TYPE=compass
-    - DEBUG=true
-    - NODE_NAME=${NODE_NAME}
-    - DEPLOY_SCENARIO=${DEPLOY_SCENARIO}
-    - BUILD_TAG=${BUILD_TAG}
-    - BRANCH=${BRANCH}
diff --git a/docker/bottleneck-compose/kibana/Dockerfile b/docker/bottleneck-compose/kibana/Dockerfile
deleted file mode 100644 (file)
index 50c72bb..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-FROM kibana:4.6.1
-
-RUN apt-get update && apt-get install -y netcat
-
-COPY entrypoint.sh /tmp/entrypoint.sh
-RUN chmod +x /tmp/entrypoint.sh
-
-RUN kibana plugin --install elastic/sense
-
-CMD ["/tmp/entrypoint.sh"]
diff --git a/docker/bottleneck-compose/kibana/config/kibana.yml b/docker/bottleneck-compose/kibana/config/kibana.yml
deleted file mode 100644 (file)
index 820c776..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-# Kibana is served by a back end server. This controls which port to use.
-port: 5601
-
-# The host to bind the server to.
-host: "0.0.0.0"
-
-# The Elasticsearch instance to use for all your queries.
-elasticsearch_url: "http://elasticsearch:9200"
-
-# preserve_elasticsearch_host true will send the hostname specified in `elasticsearch`. If you set it to false,
-# then the host you use to connect to *this* Kibana instance will be sent.
-elasticsearch_preserve_host: true
-
-# Kibana uses an index in Elasticsearch to store saved searches, visualizations
-# and dashboards. It will create a new index if it doesn't already exist.
-kibana_index: ".kibana"
-
-# If your Elasticsearch is protected with basic auth, this is the user credentials
-# used by the Kibana server to perform maintence on the kibana_index at statup. Your Kibana
-# users will still need to authenticate with Elasticsearch (which is proxied thorugh
-# the Kibana server)
-# kibana_elasticsearch_username: user
-# kibana_elasticsearch_password: pass
-
-# If your Elasticsearch requires client certificate and key
-# kibana_elasticsearch_client_crt: /path/to/your/client.crt
-# kibana_elasticsearch_client_key: /path/to/your/client.key
-
-# If you need to provide a CA certificate for your Elasticsarech instance, put
-# the path of the pem file here.
-# ca: /path/to/your/CA.pem
-
-# The default application to load.
-default_app_id: "discover"
-
-# Time in milliseconds to wait for elasticsearch to respond to pings, defaults to
-# request_timeout setting
-# ping_timeout: 1500
-
-# Time in milliseconds to wait for responses from the back end or elasticsearch.
-# This must be > 0
-request_timeout: 300000
-
-# Time in milliseconds for Elasticsearch to wait for responses from shards.
-# Set to 0 to disable.
-shard_timeout: 0
-
-# Time in milliseconds to wait for Elasticsearch at Kibana startup before retrying
-# startup_timeout: 5000
-
-# Set to false to have a complete disregard for the validity of the SSL
-# certificate.
-verify_ssl: true
-
-# SSL for outgoing requests from the Kibana Server (PEM formatted)
-# ssl_key_file: /path/to/your/server.key
-# ssl_cert_file: /path/to/your/server.crt
-
-# Set the path to where you would like the process id file to be created.
-# pid_file: /var/run/kibana.pid
-
-# If you would like to send the log output to a file you can set the path below.
-# This will also turn off the STDOUT log output.
-# log_file: ./kibana.log
-# Plugins that are included in the build, and no longer found in the plugins/ folder
-bundled_plugin_ids:
- - plugins/dashboard/index
- - plugins/discover/index
- - plugins/doc/index
- - plugins/kibana/index
- - plugins/markdown_vis/index
- - plugins/metric_vis/index
- - plugins/settings/index
- - plugins/table_vis/index
- - plugins/vis_types/index
- - plugins/visualize/index
diff --git a/docker/bottleneck-compose/kibana/entrypoint.sh b/docker/bottleneck-compose/kibana/entrypoint.sh
deleted file mode 100644 (file)
index c08d70a..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env bash
-
-# Wait for the Elasticsearch container to be ready before starting Kibana.
-echo "Stalling for Elasticsearch"
-while true; do
-    nc -q 1 elasticsearch 9200 2>/dev/null && break
-done
-
-echo "Starting Kibana"
-exec kibana
index b92e9f2..9901269 100755 (executable)
@@ -103,13 +103,8 @@ function run_test(){
                   If you want to run VSTF, please refer to earlier releases."
         ;;
         *)
-            info "Composing up dockers"
-            docker-compose -f /home/opnfv/bottlenecks/docker/bottleneck-compose/docker-compose.yml up -d
-            info "Pulling tutum/influxdb for yardstick"
-            docker pull tutum/influxdb:0.13
-            sleep 5
             info "Running posca $test_level: $test_exec"
-            docker exec bottleneckcompose_bottlenecks_1 python ${POSCA_SUITE}/../run_testsuite.py $test_level $test_exec $REPORT
+            python ${POSCA_SUITE}/../run_testsuite.py $test_level $test_exec $REPORT
         ;;
     esac
 }
index 5f56cda..e46a919 100644 (file)
@@ -67,8 +67,8 @@ def do_test(test_config, con_dic):
     loop_walue = 0
     while loop_walue < 150:
         time.sleep(2)
+        loop_walue = loop_walue + 1
         with open(out_file) as f:
-            loop_walue = loop_walue + 1
             data = json.load(f)
             if data["status"] == 1:
                 if data["result"]["criteria"] == "PASS":
index 7ec67ef..1e8d895 100644 (file)
@@ -77,6 +77,7 @@ def report(testcase, start_date, stop_date, criteria, details_doc):
 
 
 def docker_env_prepare(config):
+    LOG.info("Begin to prepare docker environment")
     if 'contexts' in config.keys() and config["contexts"] is not None:
         context_config = config["contexts"]
         if 'yardstick' in context_config.keys() and \
@@ -89,6 +90,7 @@ def docker_env_prepare(config):
             conf_parser.Parser.convert_docker_env(config, "dashboard")
             LOG.debug('Waiting for ELK init')
             time.sleep(15)
+    LOG.info("Docker environment have prepared")
     return
 
 
index 850a46a..2401595 100644 (file)
@@ -48,7 +48,7 @@ def quota_env_prepare():
 
     result = commands.getstatusoutput(cmd)
     if result[0] == 0:
-        LOG.info(result[1])
+        LOG.info("Get %s project id is %s" % (tenant_name, result[1]))
     else:
         LOG.error("can't get openstack project id")
         return 1