Tools: Jupyter Notebook Bug-fixes and Update. 55/69555/1
authoropensource-tnbt <sridhar.rao@spirent.com>
Wed, 15 Jan 2020 09:22:05 +0000 (14:52 +0530)
committeropensource-tnbt <sridhar.rao@spirent.com>
Wed, 15 Jan 2020 09:24:20 +0000 (14:54 +0530)
Separate volume for Jupyter Data is created.
Results will be stored in that volume.
Notebook is updated to use the volume.

Signed-off-by: Sridhar K. N. Rao <sridhar.rao@spirent.com>
Change-Id: I2a359d64cf3d4281686d4d3c3d6f3ee6587c1e13

tools/docker/results/docker-compose.yml
tools/docker/results/jupyter/Dockerfile
tools/docker/results/notebooks/testresult-analysis.ipynb

index 5c3ab1d..87ba7fc 100644 (file)
@@ -4,6 +4,7 @@ volumes:
     influx-data:
     grafana-data:
     mongo-data:
+    jupyter-data:
     testapi-logs:
 services:
   influxdb:
@@ -69,7 +70,7 @@ services:
     volumes:
       - ./notebooks:/notebooks
       - ./notebooks/testresult-analysis.ipynb:/notebooks/testresult-analysis.ipynb
-      - ./data:/data
+      - jupyter-data:/data
   postgres:
     image: postgres
     restart: always
index d281695..94f9bd3 100644 (file)
@@ -10,4 +10,7 @@ RUN pip install -U graphviz paramiko
 RUN echo "c.NotebookApp.token=''" >> $HOME/.jupyter/jupyter_notebook_config.py 
 
 VOLUME /notebooks
+VOLUME /data
+
+RUN mkdir /data/results
 WORKDIR /notebooks
index a7e9335..4f12ed6 100644 (file)
@@ -86,7 +86,7 @@
     "stderr_data = []\n",
     "if directory_to_download:\n",
     "    # zip the collectd results to make the download faster\n",
-    "    zip_command = 'sudo -S tar -czvf '+ directory_to_download + '/collectd.tar.gz -C ' + directory_to_download + '/csv .'\n",
+    "    zip_command = 'sudo -S tar -czvf '+ directory_to_download + '/collectd.tar.gz -C ' + '/tmp/csv .'\n",
     "    session = client.open_channel(kind='session')\n",
     "    session.get_pty()\n",
     "    session.exec_command(zip_command)\n",
     "    for path,files  in sftp_walk(directory_to_download):\n",
     "        for file in files:\n",
     "            remote = os.path.join(path,file).replace(\"\\\\\",\"/\")\n",
-    "            local = os.path.join('./results', file).replace(\"\\/\",\"/\")\n",
+    "            local = os.path.join('/data/results', file).replace(\"\\/\",\"/\")\n",
     "            sftp.get(remote, local)\n",
     "# Untar the collectd results if we got it.\n",
-    "path = os.path.join('./results', 'collectd.tar.gz')\n",
+    "path = os.path.join('/data/results', 'collectd.tar.gz')\n",
     "if os.path.exists(path):\n",
     "    tar = tarfile.open(path)\n",
     "    tar.extractall()\n",
     "strings = ('* OS:', '* Kernel Version:', '* Board:', '* CPU:', '* CPU cores:',\n",
     "           '* Memory:', '* Virtual Switch Set-up:',\n",
     "           '* Traffic Generator:','* vSwitch:', '* DPDK Version:', '* VNF:')\n",
-    "filename = os.path.basename(glob.glob('./results/result*.rst')[0])\n",
+    "filename = os.path.basename(glob.glob('/data/results/result*.rst')[0])\n",
     "info_dict = {}\n",
-    "with open(os.path.join('./results', filename), 'r') as file:\n",
+    "with open(os.path.join('/data/results', filename), 'r') as file:\n",
     "    for line in file:\n",
     "        if any(s in line for s in strings):\n",
     "            info_dict[line.split(':', 1)[0]] = line.split(':', 1)[1].rstrip()\n",
    "metadata": {},
    "outputs": [],
    "source": [
-    "filename = os.path.basename(glob.glob('./results/vsperf*.conf')[0])\n",
-    "file = os.path.join('./results', filename)\n",
+    "filename = os.path.basename(glob.glob('/data/results/vsperf*.conf')[0])\n",
+    "file = os.path.join('/data/results', filename)\n",
     "with open(file, 'r') as f:\n",
     "    for line in f:\n",
     "        if line.startswith('TRAFFICGEN_DURATION'):\n",
     "            value = value.rstrip()\n",
     "            value = value.lstrip()\n",
     "            traffic_duration = int(value)\n",
+    "            print(traffic_duration)\n",
     "        elif line.startswith('VSWITCH_PMD_CPU_MASK'):\n",
     "            value = line.split('=')[1]\n",
     "            value = value.rstrip()\n",
     "            pmd_cores_mask = value.lstrip()\n",
+    "            print(pmd_cores_mask)\n",
     "        elif line.startswith('GUEST_CORE_BINDING'):\n",
     "            value = line.split('=')[1]\n",
     "            value = value.rstrip()\n",
     "            value = value.lstrip()\n",
     "            guest_cores = value[1:-2]\n",
+    "            print(guest_cores)"
     "\n",
-    "print(traffic_duration)\n",
-    "print(pmd_cores_mask)\n",
-    "print(guest_cores)"
    ]
   },
   {
     "portcores = collections.OrderedDict()\n",
     "chunks = []\n",
     "current_chunk = []\n",
-    "file = os.path.join('./results', 'ovs-cores.log')\n",
+    "file = os.path.join('/data/results', 'ovs-cores.log')\n",
     "with open(file, 'r') as f:\n",
     "    for line in f:\n",
     "        if line.startswith('pmd') and current_chunk:\n",
     "portcores = collections.OrderedDict()\n",
     "chunks = []\n",
     "current_chunk = []\n",
-    "file = os.path.join('./results', 'ovs-cores.log')\n",
+    "file = os.path.join('/data/results', 'ovs-cores.log')\n",
     "with open(file, 'r') as f:\n",
     "    for line in f:\n",
     "        if line.startswith('pmd') and current_chunk:\n",
    "source": [
     "lines_seen = set() # holds lines already seen\n",
     "outfile = open('./counts.dat', \"w\")\n",
-    "file = os.path.join('./results', 'trex-liveresults-counts.dat')\n",
+    "file = os.path.join('/data/results', 'trex-liveresults-counts.dat')\n",
     "for line in open(file, \"r\"):\n",
     "    if line not in lines_seen: # not a duplicate\n",
     "        outfile.write(line)\n",
    },
    "outputs": [],
    "source": [
-    "file = os.path.join('./results', 'RUNirq.irq.log')\n",
+    "file = os.path.join('/data/results', 'RUNirq.irq.log')\n",
     "tdf = pd.read_csv(file)\n",
     "tdf.columns\n",
     "exclude = ['          <1', '         < 5', '        < 10','        < 50', '       < 100', '       < 500', '      < 1000']\n",
    "outputs": [],
    "source": [
     "from datetime import datetime\n",
-    "filename = os.path.basename(glob.glob('./results/vsperf-overall*.log')[0])\n",
-    "logfile = os.path.join('./results', filename)\n",
+    "filename = os.path.basename(glob.glob('/data/results/vsperf-overall*.log')[0])\n",
+    "logfile = os.path.join('/data/results', filename)\n",
     "linecnt = 0\n",
     "times = {}\n",
     "with open(logfile) as f:\n",
    "outputs": [],
    "source": [
     "import glob\n",
-    "filename = os.path.basename(glob.glob('./results/result*.csv')[0])\n",
+    "filename = os.path.basename(glob.glob('/data/results/result*.csv')[0])\n",
     "filename\n",
-    "tdf = pd.read_csv(os.path.join('./results', filename))\n",
+    "tdf = pd.read_csv(os.path.join('/data/results', filename))\n",
     "pkts = ['tx_frames', 'rx_frames']\n",
     "fps =  ['tx_rate_fps', 'throughput_rx_fps']\n",
     "mbps = ['tx_rate_mbps', 'throughput_rx_mbps']\n",