Merge "Tools: Deployment and TestControl Containers"
[vswitchperf.git] / tools / docker / results / notebooks / testresult-analysis.ipynb
1 {
2  "cells": [
3   {
4    "cell_type": "markdown",
5    "metadata": {
6     "hide_input": true
7    },
8    "source": [
9     "# OPNFV VSPERF\n",
10     "# Beyond Performance Metrics: Towards Causation Analysis"
11    ]
12   },
13   {
14    "cell_type": "markdown",
15    "metadata": {},
16    "source": [
17     "### sridhar.rao@spirent.com and acm@research.att.com"
18    ]
19   },
20   {
21    "cell_type": "code",
22    "execution_count": null,
23    "metadata": {},
24    "outputs": [],
25    "source": [
26     "# Import packages\n",
27     "import numpy as np\n",
28     "import pandas as pd\n",
29     "import matplotlib.pyplot as plt\n",
30     "import seaborn as sns\n",
31     "from graphviz import Digraph\n",
32     "import collections\n",
33     "import glob\n",
34     "import os"
35    ]
36   },
37   {
38    "cell_type": "markdown",
39    "metadata": {},
40    "source": [
41     "## Get the results to analyze: \n",
42     "Getting Latest one, if ``directory_to_download`` is empty"
43    ]
44   },
45   {
46    "cell_type": "code",
47    "execution_count": null,
48    "metadata": {
49     "hide_input": true
50    },
51    "outputs": [],
52    "source": [
53     "import paramiko\n",
54     "import tarfile\n",
55     "import os\n",
56     "from stat import S_ISDIR\n",
57     "RECV_BYTES = 4096\n",
58     "hostname = '10.10.120.24'\n",
59     "port = 22\n",
60     "uname='opnfv'\n",
61     "pwd='opnfv' \n",
62     "stdout_data = []\n",
63     "stderr_data = []\n",
64     "client = paramiko.Transport((hostname, port))\n",
65     "client.connect(username=uname, password=pwd)\n",
66     "session = client.open_channel(kind='session')\n",
67     "directory_to_download = ''\n",
68     "\n",
69     "session.exec_command('ls /tmp | grep results')\n",
70     "if not directory_to_download:\n",
71     "    while True:\n",
72     "        if session.recv_ready():\n",
73     "            stdout_data.append(session.recv(RECV_BYTES))\n",
74     "        if session.recv_stderr_ready():\n",
75     "            stderr_data.append(session.recv_stderr(RECV_BYTES))\n",
76     "        if session.exit_status_ready():\n",
77     "            break\n",
78     "    if stdout_data:\n",
79     "        line = stdout_data[0]\n",
80     "        filenames = line.decode(\"utf-8\").rstrip('\\n').split('\\n')\n",
81     "        filenames = sorted(filenames)\n",
82     "        latest = filenames[-1]\n",
83     "        directory_to_download = os.path.join('/tmp', latest).replace(\"\\\\\",\"/\")\n",
84     "        print(directory_to_download)\n",
85     "stdout_data = []\n",
86     "stderr_data = []\n",
87     "if directory_to_download:\n",
88     "    # zip the collectd results to make the download faster\n",
89     "    zip_command = 'sudo -S tar -czvf '+ directory_to_download + '/collectd.tar.gz -C ' + directory_to_download + '/csv .'\n",
90     "    session = client.open_channel(kind='session')\n",
91     "    session.get_pty()\n",
92     "    session.exec_command(zip_command)\n",
93     "    while True:\n",
94     "        if session.recv_ready():\n",
95     "            stdout_data.append(session.recv(RECV_BYTES))\n",
96     "        if session.recv_stderr_ready():\n",
97     "            stderr_data.append(session.recv_stderr(RECV_BYTES))\n",
98     "        if session.exit_status_ready():\n",
99     "            break\n",
100     "    if stderr_data:\n",
101     "        print(stderr_data[0])\n",
102     "    if stdout_data:\n",
103     "        print(stdout_data[0])\n",
104     "\n",
105     "    # Begin the actual downlaod\n",
106     "    sftp = paramiko.SFTPClient.from_transport(client)\n",
107     "    def sftp_walk(remotepath):\n",
108     "        path=remotepath\n",
109     "        files=[]\n",
110     "        folders=[]\n",
111     "        for f in sftp.listdir_attr(remotepath):\n",
112     "            if S_ISDIR(f.st_mode):\n",
113     "                folders.append(f.filename)\n",
114     "            else:\n",
115     "                files.append(f.filename)\n",
116     "        if files:\n",
117     "            yield path, files\n",
118     "    # Filewise download happens here\n",
119     "    for path,files  in sftp_walk(directory_to_download):\n",
120     "        for file in files:\n",
121     "            remote = os.path.join(path,file).replace(\"\\\\\",\"/\")\n",
122     "            local = os.path.join('./results', file).replace(\"\\/\",\"/\")\n",
123     "            sftp.get(remote, local)\n",
124     "# Untar the collectd results if we got it.\n",
125     "path = os.path.join('./results', 'collectd.tar.gz')\n",
126     "if os.path.exists(path):\n",
127     "    tar = tarfile.open(path)\n",
128     "    tar.extractall()\n",
129     "    tar.close()\n",
130     "# Ready to work with downloaded data, close the session and client.\n",
131     "session.close()\n",
132     "client.close()"
133    ]
134   },
135   {
136    "cell_type": "code",
137    "execution_count": null,
138    "metadata": {
139     "hide_input": true
140    },
141    "outputs": [],
142    "source": [
143     "strings = ('* OS:', '* Kernel Version:', '* Board:', '* CPU:', '* CPU cores:',\n",
144     "           '* Memory:', '* Virtual Switch Set-up:',\n",
145     "           '* Traffic Generator:','* vSwitch:', '* DPDK Version:', '* VNF:')\n",
146     "filename = os.path.basename(glob.glob('./results/result*.rst')[0])\n",
147     "info_dict = {}\n",
148     "with open(os.path.join('./results', filename), 'r') as file:\n",
149     "    for line in file:\n",
150     "        if any(s in line for s in strings):\n",
151     "            info_dict[line.split(':', 1)[0]] = line.split(':', 1)[1].rstrip()\n",
152     "df = pd.DataFrame.from_dict(info_dict, orient='index', columns=['Value'])\n",
153     "df"
154    ]
155   },
156   {
157    "cell_type": "markdown",
158    "metadata": {},
159    "source": [
160     "## Understand the configuration used for the test."
161    ]
162   },
163   {
164    "cell_type": "code",
165    "execution_count": null,
166    "metadata": {},
167    "outputs": [],
168    "source": [
169     "filename = os.path.basename(glob.glob('./results/vsperf*.conf')[0])\n",
170     "file = os.path.join('./results', filename)\n",
171     "with open(file, 'r') as f:\n",
172     "    for line in f:\n",
173     "        if line.startswith('TRAFFICGEN_DURATION'):\n",
174     "            value = line.split('=')[1]\n",
175     "            value = value.rstrip()\n",
176     "            value = value.lstrip()\n",
177     "            traffic_duration = int(value)\n",
178     "        elif line.startswith('VSWITCH_PMD_CPU_MASK'):\n",
179     "            value = line.split('=')[1]\n",
180     "            value = value.rstrip()\n",
181     "            pmd_cores_mask = value.lstrip()\n",
182     "        elif line.startswith('GUEST_CORE_BINDING'):\n",
183     "            value = line.split('=')[1]\n",
184     "            value = value.rstrip()\n",
185     "            value = value.lstrip()\n",
186     "            guest_cores = value[1:-2]\n",
187     "\n",
188     "print(traffic_duration)\n",
189     "print(pmd_cores_mask)\n",
190     "print(guest_cores)"
191    ]
192   },
193   {
194    "cell_type": "markdown",
195    "metadata": {},
196    "source": [
197     "## OVS-Ports and Cores"
198    ]
199   },
200   {
201    "cell_type": "code",
202    "execution_count": null,
203    "metadata": {
204     "hide_input": true
205    },
206    "outputs": [],
207    "source": [
208     "import collections\n",
209     "portcores = collections.OrderedDict()\n",
210     "chunks = []\n",
211     "current_chunk = []\n",
212     "file = os.path.join('./results', 'ovs-cores.log')\n",
213     "with open(file, 'r') as f:\n",
214     "    for line in f:\n",
215     "        if line.startswith('pmd') and current_chunk:\n",
216     "            # if line starts with token and the current chunk is not empty\n",
217     "            chunks.append(current_chunk[:]) #  add not empty chunk to chunks\n",
218     "            current_chunk = [] #  make current chunk blank\n",
219     "        # just append a line to the current chunk on each iteration\n",
220     "        if \"port:\" in line or 'pmd' in line:\n",
221     "            current_chunk.append(line)\n",
222     "    chunks.append(current_chunk)  #  append the last chunk outside the loop\n",
223     "\n",
224     "core_ids = []\n",
225     "for ch in chunks:\n",
226     "    port_id = ''\n",
227     "    core_id = ''\n",
228     "    for line in ch:\n",
229     "        if 'pmd' in line:\n",
230     "            core_id = line.split()[-1][:-1]\n",
231     "            if core_id not in core_ids:\n",
232     "                core_ids.append(core_id)\n",
233     "        elif 'port:' in line:\n",
234     "            port_id = line.split()[1]\n",
235     "        if port_id and core_id:\n",
236     "            if port_id not in portcores:\n",
237     "                portcores[port_id] = core_id\n",
238     "\n",
239     "# import graphviz\n",
240     "from graphviz import Digraph\n",
241     "ps = Digraph(name='ovs-ports-cores', node_attr={'shape': 'box'}, edge_attr={'arrowhead':\"none\"})\n",
242     "with ps.subgraph(name=\"cluster_0\") as c:\n",
243     "    c.node_attr.update(style='filled', color='green')\n",
244     "    c.node('t0', 'TGen-Port-0')\n",
245     "    c.node('t1', 'TGen-Port-1')\n",
246     "    c.attr(label='TGEN')\n",
247     "    c.attr(color='blue')\n",
248     "with ps.subgraph(name=\"cluster_1\") as c:\n",
249     "    c.node_attr.update(style='filled', color='yellow')\n",
250     "    c.node('v0', 'VNF-Port-0')\n",
251     "    c.node('v1', 'VNF-Port-1')\n",
252     "    c.attr(label='VNF')\n",
253     "    c.attr(color='blue')\n",
254     "    \n",
255     "with ps.subgraph(name='cluster_2') as c: \n",
256     "    c.attr(label='OVS-DPDK')\n",
257     "    c.attr(color='blue')\n",
258     "    count = 0\n",
259     "    for port, core in portcores.items():\n",
260     "        id = 'o'+str(count)\n",
261     "        c.node(id, port+'\\nCore-ID:'+ core)\n",
262     "        count += 1\n",
263     "        num = port[-1]\n",
264     "        if 'dpdkvhost' in port:\n",
265     "            ps.edge(id, 'v'+num)\n",
266     "        else:\n",
267     "            ps.edge(id, 't'+num)\n",
268     "\n",
269     "ps"
270    ]
271   },
272   {
273    "cell_type": "markdown",
274    "metadata": {},
275    "source": [
276     "## Dropped Packets"
277    ]
278   },
279   {
280    "cell_type": "code",
281    "execution_count": null,
282    "metadata": {
283     "hide_input": true
284    },
285    "outputs": [],
286    "source": [
287     "portcores = collections.OrderedDict()\n",
288     "chunks = []\n",
289     "current_chunk = []\n",
290     "file = os.path.join('./results', 'ovs-cores.log')\n",
291     "with open(file, 'r') as f:\n",
292     "    for line in f:\n",
293     "        if line.startswith('pmd') and current_chunk:\n",
294     "            # if line starts with token and the current chunk is not empty\n",
295     "            chunks.append(current_chunk[:]) #  add not empty chunk to chunks\n",
296     "            current_chunk = [] #  make current chunk blank\n",
297     "        # just append a line to the current chunk on each iteration\n",
298     "        if \"port:\" in line or 'pmd' in line:\n",
299     "            current_chunk.append(line)\n",
300     "    chunks.append(current_chunk)  #  append the last chunk outside the loop\n",
301     "\n",
302     "core_ids = []\n",
303     "for ch in chunks:\n",
304     "    port_id = ''\n",
305     "    core_id = ''\n",
306     "    for line in ch:\n",
307     "        if 'pmd' in line:\n",
308     "            core_id = line.split()[-1][:-1]\n",
309     "            if core_id not in core_ids:\n",
310     "                core_ids.append(core_id)\n",
311     "        elif 'port:' in line:\n",
312     "            port_id = line.split()[1]\n",
313     "        if port_id and core_id:\n",
314     "            if port_id not in portcores:\n",
315     "                portcores[port_id] = core_id\n",
316     "\n",
317     "ps = Digraph(name='ovs-dropped', node_attr={'shape': 'box'}, edge_attr={'arrowhead':\"none\"})\n",
318     "\n",
319     "def get_dropped(port_id):\n",
320     "    # port_id = 'dpdk0'\n",
321     "    if glob.glob('./pod12-node4/*'+port_id):\n",
322     "        dirname = os.path.basename(glob.glob('./pod12-node4/*'+port_id)[0])\n",
323     "        if dirname:\n",
324     "            if glob.glob('./pod12-node4/'+dirname+ '/*dropped*'):\n",
325     "                filename = os.path.basename(glob.glob('./pod12-node4/'+dirname+ '/*dropped*')[0])\n",
326     "                if filename:\n",
327     "                    with open(os.path.join('./pod12-node4', dirname, filename), 'r') as f:\n",
328     "                        line = f.readlines()[-1]\n",
329     "                        fields = line.split(',')\n",
330     "                        return fields[1], fields[2]\n",
331     "    return 'NA','NA'\n",
332     "\n",
333     "with ps.subgraph(name=\"cluster_0\") as c:\n",
334     "    c.node_attr.update(style='filled', color='pink')\n",
335     "    c.attr(label='OVS-DPDK')\n",
336     "    c.attr(color='blue')\n",
337     "    count = 0\n",
338     "    for port, core in portcores.items():\n",
339     "        id = 'o'+str(count)\n",
340     "        rx,tx = get_dropped(port)\n",
341     "        c.node(id, port+'\\nRX-Dropped:'+ rx + '\\nTX-Dropped:' + tx)\n",
342     "        count += 1\n",
343     "        num = port[-1]\n",
344     "ps"
345    ]
346   },
347   {
348    "cell_type": "markdown",
349    "metadata": {},
350    "source": [
351     "## Plotting Live Results - T-Rex"
352    ]
353   },
354   {
355    "cell_type": "code",
356    "execution_count": null,
357    "metadata": {
358     "hide_input": true
359    },
360    "outputs": [],
361    "source": [
362     "lines_seen = set() # holds lines already seen\n",
363     "outfile = open('./counts.dat', \"w\")\n",
364     "file = os.path.join('./results', 'trex-liveresults-counts.dat')\n",
365     "for line in open(file, \"r\"):\n",
366     "    if line not in lines_seen: # not a duplicate\n",
367     "        outfile.write(line)\n",
368     "        lines_seen.add(line)\n",
369     "outfile.close()\n",
370     "tdf = pd.read_csv('./counts.dat')\n",
371     "print(tdf.columns)\n",
372     "ax = tdf.loc[(tdf.rx_port == 1)].plot(y='rx_pkts')\n",
373     "def highlight(indices,ax):\n",
374     "    i=0\n",
375     "    while i<len(indices):\n",
376     "        ax.axvspan(indices[i][0], indices[i][1], facecolor='RED', edgecolor='BLUE', alpha=.2)\n",
377     "        i+=1\n",
378     "\n",
379     "ind = 0\n",
380     "indv = tdf.ts[0]\n",
381     "ax.set_xlabel(\"Index\")\n",
382     "ax.set_ylabel('Count')\n",
383     "for i in range(len(tdf.ts)):\n",
384     "    if tdf.ts[i] - indv > int(traffic_duration):\n",
385     "        highlight([(ind, i)], ax)\n",
386     "        ind = i\n",
387     "        indv = tdf.ts[i]\n",
388     "highlight([(ind,i)], ax)"
389    ]
390   },
391   {
392    "cell_type": "markdown",
393    "metadata": {},
394    "source": [
395     "## IRQ Latency Histogram"
396    ]
397   },
398   {
399    "cell_type": "code",
400    "execution_count": null,
401    "metadata": {
402     "hide_input": true
403    },
404    "outputs": [],
405    "source": [
406     "file = os.path.join('./results', 'RUNirq.irq.log')\n",
407     "tdf = pd.read_csv(file)\n",
408     "tdf.columns\n",
409     "exclude = ['          <1', '         < 5', '        < 10','        < 50', '       < 100', '       < 500', '      < 1000']\n",
410     "ax = tdf.loc[:, tdf.columns.difference(exclude)].plot(x='      number', xticks=tdf['      number'], figsize=(20,10))\n",
411     "ax.set_xlabel('Core #')\n",
412     "ax.set_ylabel('Count')\n",
413     "#tdf.plot(x='number')"
414    ]
415   },
416   {
417    "cell_type": "markdown",
418    "metadata": {},
419    "source": [
420     "## Sample Collectd Metric Display - L3 Cache Occupancy in Bytes"
421    ]
422   },
423   {
424    "cell_type": "code",
425    "execution_count": null,
426    "metadata": {},
427    "outputs": [],
428    "source": [
429     "import math\n",
430     "def cpumask2coreids(mask):\n",
431     "    intmask = int(mask, 16)\n",
432     "    i = 1\n",
433     "    coreids = []\n",
434     "    while (i < intmask):\n",
435     "        if (i & intmask):\n",
436     "            coreids.append(str(math.frexp(i)[-1]-1))\n",
437     "        i = i << 1\n",
438     "    return (coreids)\n",
439     "\n",
440     "vswitch_cpus = \"['2']\"\n",
441     "ps = Digraph(name='cpu-map', node_attr={'shape': 'box'}, edge_attr={'arrowhead':\"none\"})\n",
442     "with ps.subgraph(name=\"cluster_0\") as c:\n",
443     "    c.node_attr.update(style='filled', color='pink')\n",
444     "    c.attr(label='CPU-MAPPINGS')\n",
445     "    c.attr(color='blue')\n",
446     "    c.node('vscpus', 'vSwitch: \\n' + vswitch_cpus)\n",
447     "    # vnf_cpus = cpumask2coreids(guest_cores)\n",
448     "    c.node('vncpus', 'VNF: \\n' + guest_cores)\n",
449     "    pmd_cpus = cpumask2coreids(pmd_cores_mask[1:-1])\n",
450     "    c.node('pmcpus', 'PMDs: \\n' + str(pmd_cpus))\n",
451     "\n",
452     "ps"
453    ]
454   },
455   {
456    "cell_type": "code",
457    "execution_count": null,
458    "metadata": {
459     "hide_input": true
460    },
461    "outputs": [],
462    "source": [
463     "# Path where collectd results are stored.\n",
464     "mypath = \"./pod12-node4\"\n",
465     "file_count = 0\n",
466     "cpu_names = []\n",
467     "for level1 in os.listdir(mypath):\n",
468     "    if \"intel_rdt\" in level1:\n",
469     "        l2path = os.path.join(mypath, level1)\n",
470     "        for level2 in os.listdir(l2path):\n",
471     "            if \"bytes\" in level2:\n",
472     "                l3path = os.path.join(l2path, level2)\n",
473     "                if file_count == 0:\n",
474     "                    file_count += 1\n",
475     "                    df = pd.read_csv(l3path)\n",
476     "                    nn = 'cpu-'+ level1[len('intel_rdt-'):]\n",
477     "                    # nn = 'cpu-'+ level1.split('-')[1]\n",
478     "                    cpu_names.append(nn)\n",
479     "                    # print(nn)\n",
480     "                    df.rename(columns={'value': nn}, inplace=True)\n",
481     "                else:\n",
482     "                    file_count += 1\n",
483     "                    tdf = pd.read_csv(l3path)\n",
484     "                    nn = 'cpu-'+ level1[len('intel_rdt-'):]\n",
485     "                    cpu_names.append(nn)\n",
486     "                    tdf.rename(columns={'value': nn}, inplace=True)\n",
487     "                    df[nn] = tdf[nn]            \n",
488     "\n",
489     "ax = df.plot(x='epoch', y=cpu_names)\n",
490     "ax.set_ylabel(\"MBytes\")\n",
491     "ax.set_xlabel('Time')\n",
492     "\n",
493     "\n",
494     "        \n",
495     "#    df = pd.read_csv()"
496    ]
497   },
498   {
499    "cell_type": "markdown",
500    "metadata": {},
501    "source": [
502     "## Events "
503    ]
504   },
505   {
506    "cell_type": "code",
507    "execution_count": null,
508    "metadata": {
509     "hide_input": true
510    },
511    "outputs": [],
512    "source": [
513     "from datetime import datetime\n",
514     "filename = os.path.basename(glob.glob('./results/vsperf-overall*.log')[0])\n",
515     "logfile = os.path.join('./results', filename)\n",
516     "linecnt = 0\n",
517     "times = {}\n",
518     "with open(logfile) as f:\n",
519     "    for line in f:\n",
520     "        line = line.strip('\\n')\n",
521     "        if linecnt == 0:\n",
522     "            times['Start-Test'] = line.split(\" : \")[0]\n",
523     "            linecnt += 1\n",
524     "        if 'Binding NICs' in line:\n",
525     "            times['Binding-NICs'] = line.split(\" : \")[0]\n",
526     "        if 'Starting traffic at' in line:\n",
527     "            sline = line.split(\" : \")[1]\n",
528     "            time = line.split(\" : \")[0]\n",
529     "            speed = sline.split('at',1)[1]\n",
530     "            times[speed] = time \n",
531     "        elif 'Starting vswitchd' in line:\n",
532     "            times['vSwitch-Start'] = line.split(\" : \")[0]\n",
533     "        elif 'Starting ovs-vswitchd' in line:\n",
534     "            times['ovsvswitch-start'] = line.split(\" : \")[0]\n",
535     "        elif 'Adding Ports' in line:\n",
536     "            times['Ports-Added'] = line.split(\" : \")[0]\n",
537     "        elif 'Flows Added' in line:\n",
538     "            times['Flows-Added'] = line.split(\" : \")[0]\n",
539     "        elif 'send_traffic with' in line:\n",
540     "            times['Traffic Start']  = line.split(\" : \")[0]\n",
541     "        elif 'l2 framesize 1280' in line:\n",
542     "            times['Traffic-Start-1280'] = line.split(\" : \")[0]\n",
543     "        elif 'Starting qemu' in line:\n",
544     "            times['VNF-Start'] = line.split(\" : \")[0]\n",
545     "        elif 'l2 framesize 64' in line:\n",
546     "            times['Traffic-Start-64'] = line.split(\" : \")[0]\n",
547     "        elif 'l2 framesize 128' in line:\n",
548     "            times['Traffic-Start-128'] = line.split(\" : \")[0]\n",
549     "        elif 'l2 framesize 256' in line:\n",
550     "            times['Traffic-Start-256'] = line.split(\" : \")[0]\n",
551     "        elif 'l2 framesize 512' in line:\n",
552     "            times['Traffic-Start-512'] = line.split(\" : \")[0]\n",
553     "        elif 'l2 framesize 1024' in line:\n",
554     "            times['Traffic-Start-1024'] = line.split(\" : \")[0]\n",
555     "        elif 'l2 framesize 1518' in line:\n",
556     "            times['Traffic-Start-1518'] = line.split(\" : \")[0]\n",
557     "        elif 'dump flows' in line:\n",
558     "            times['Traffic-End'] = line.split(\" : \")[0]\n",
559     "        elif 'Wait for QEMU' in line:\n",
560     "            times['VNF-Stop'] = line.split(\" : \")[0]\n",
561     "        elif 'delete flow' in line:\n",
562     "            times['flow-removed'] = line.split(\" : \")[0]\n",
563     "        elif 'delete port' in line:\n",
564     "            times['port-removed'] = line.split(\" : \")[0]\n",
565     "        elif 'Killing ovs-vswitchd' in line:\n",
566     "            times['vSwitch-Stop'] = line.split(\" : \")[0]\n",
567     "\n",
568     "times['Test-Stop'] = line.split(\" : \")[0]\n",
569     "#print(times)\n",
570     "ddf = pd.DataFrame.from_dict(times, orient='index', columns=['timestamp'])\n",
571     "names = ddf.index.values\n",
572     "dates = ddf['timestamp'].tolist()\n",
573     "datefmt=\"%Y-%m-%d %H:%M:%S,%f\"\n",
574     "dates = [datetime.strptime(ii, datefmt) for ii in dates]\n",
575     "# print(names)\n",
576     "# print(dates)"
577    ]
578   },
579   {
580    "cell_type": "code",
581    "execution_count": null,
582    "metadata": {
583     "hide_input": true
584    },
585    "outputs": [],
586    "source": [
587     "import matplotlib.dates as mdates\n",
588     "from matplotlib import ticker\n",
589     "\n",
590     "levels = np.array([-5, 5, -3, 3, -1, 1])\n",
591     "fig, ax = plt.subplots(figsize=(40, 5))\n",
592     "\n",
593     "# Create the base line\n",
594     "start = min(dates)\n",
595     "stop = max(dates)\n",
596     "ax.plot((start, stop), (0, 0), 'k', alpha=.5)\n",
597     "\n",
598     "pos_list = np.arange(len(dates))\n",
599     "\n",
600     "# Iterate through releases annotating each one\n",
601     "for ii, (iname, idate) in enumerate(zip(names, dates)):\n",
602     "    level = levels[ii % 6]\n",
603     "    vert = 'top' if level < 0 else 'bottom'\n",
604     "    ax.scatter(idate, 0, s=100, facecolor='w', edgecolor='k', zorder=9999)\n",
605     "    # Plot a line up to the text\n",
606     "    ax.plot((idate, idate), (0, level), c='r', alpha=.7)\n",
607     "    # Give the text a faint background and align it properly\n",
608     "    ax.text(idate, level, iname,\n",
609     "            horizontalalignment='right', verticalalignment=vert, fontsize=14,\n",
610     "            backgroundcolor=(1., 1., 1., .3))\n",
611     "ax.set(title=\"VSPERF Main Events\")\n",
612     "# Set the xticks formatting\n",
613     "ax.get_xaxis().set_major_locator(mdates.SecondLocator(interval=30))\n",
614     "ax.get_xaxis().set_major_formatter(mdates.DateFormatter(\"%M %S\"))\n",
615     "fig.autofmt_xdate()\n",
616     "plt.setp((ax.get_yticklabels() + ax.get_yticklines() +\n",
617     "          list(ax.spines.values())), visible=False)\n",
618     "plt.show()"
619    ]
620   },
621   {
622    "cell_type": "markdown",
623    "metadata": {},
624    "source": [
625     "## Current and old."
626    ]
627   },
628   {
629    "cell_type": "markdown",
630    "metadata": {},
631    "source": [
632     "# Current Result"
633    ]
634   },
635   {
636    "cell_type": "code",
637    "execution_count": null,
638    "metadata": {
639     "hide_input": true
640    },
641    "outputs": [],
642    "source": [
643     "import glob\n",
644     "filename = os.path.basename(glob.glob('./results/result*.csv')[0])\n",
645     "filename\n",
646     "tdf = pd.read_csv(os.path.join('./results', filename))\n",
647     "pkts = ['tx_frames', 'rx_frames']\n",
648     "fps =  ['tx_rate_fps', 'throughput_rx_fps']\n",
649     "mbps = ['tx_rate_mbps', 'throughput_rx_mbps']\n",
650     "pcents = ['tx_rate_percent', 'throughput_rx_percent', 'frame_loss_percent']\n",
651     "fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(14, 12))\n",
652     "tdf.plot.bar(y= pkts,ax=axes[0,0])\n",
653     "tdf.plot.bar(y= fps,ax=axes[0,1])\n",
654     "tdf.plot.bar(y= mbps,ax=axes[1,0])\n",
655     "tdf.plot.bar(y= pcents,ax=axes[1,1])\n",
656     "current_pkt_size = str(tdf['packet_size'].iloc[-1])\n",
657     "current_rx_fps = str(tdf['throughput_rx_fps'].iloc[-1])\n",
658     "print(current_rx_fps)"
659    ]
660   },
661   {
662    "cell_type": "markdown",
663    "metadata": {},
664    "source": [
665     "## How Current Result compares to Previous ones?"
666    ]
667   },
668   {
669    "cell_type": "code",
670    "execution_count": null,
671    "metadata": {
672     "hide_input": true
673    },
674    "outputs": [],
675    "source": [
676     "import urllib\n",
677     "import json\n",
678     "import requests\n",
679     "#json_data = requests.get('http://testresults.opnfv.org/test/api/v1/results?project=vsperf').json()\n",
680     "json_data = requests.get('http://10.10.120.22:8000/api/v1/results?project=vsperf').json()\n",
681     "res = json_data['results']\n",
682     "df1 = pd.DataFrame(res)\n",
683     "sort_by_date = df1.sort_values('start_date')\n",
684     "details = df1['details'].apply(pd.Series)\n",
685     "details[current_pkt_size] = pd.to_numeric(pd.Series(details[current_pkt_size]))\n",
686     "# details.plot.bar(y = current_pkt_size)\n",
687     "details_cur_pkt = details[[current_pkt_size]].copy()\n",
688     "details_cur_pkt.loc[-1]= float(current_rx_fps)\n",
689     "details_cur_pkt.index = details_cur_pkt.index + 1  # shifting index\n",
690     "details_cur_pkt.sort_index(inplace=True) \n",
691     "ax = details_cur_pkt.plot.bar()\n",
692     "ax.set_ylabel(\"Frames per sec\")\n",
693     "ax.set_xlabel(\"Run Number\")\n",
694     "def highlight(indices,ax):\n",
695     "    i=0\n",
696     "    while i<len(indices):\n",
697     "        ax.axvspan(indices[i]-0.5, indices[i]+0.5, facecolor='RED', edgecolor='none', alpha=.2)\n",
698     "        i+=1\n",
699     "highlight([0], ax)"
700    ]
701   },
702   {
703    "cell_type": "markdown",
704    "metadata": {},
705    "source": [
706     "## Heatmaps"
707    ]
708   },
709   {
710    "cell_type": "code",
711    "execution_count": null,
712    "metadata": {
713     "hide_input": true
714    },
715    "outputs": [],
716    "source": [
717     "array_of_dfs = []\n",
718     "for dirs in glob.glob('./pod12-node4/ovs_stats-vsperf*'):\n",
719     "    dirname = os.path.basename(dirs)\n",
720     "    if dirname:\n",
721     "        port = dirname.split('.')[1]\n",
722     "        if glob.glob('./pod12-node4/'+dirname+ '/*dropped*'):\n",
723     "            full_path = glob.glob('./pod12-node4/'+dirname+ '/*dropped*')[0]\n",
724     "            filename = os.path.basename(full_path)\n",
725     "            if filename:\n",
726     "                df = pd.read_csv(full_path)\n",
727     "                df.rename(index=str, columns={\"rx\": port+\"-rx\" , \"tx\": port+\"-tx\"}, inplace=True)\n",
728     "                df = df.drop(columns=['epoch'])\n",
729     "                array_of_dfs.append(df)\n",
730     "master_df = pd.concat(array_of_dfs, axis=1, sort=True)\n",
731     "master_df.columns\n",
732     "\n",
733     "# get the correlation coefficient between the different columns\n",
734     "corr = master_df.iloc[:, 0:].corr()\n",
735     "arr_corr = corr.values\n",
736     "# mask out the top triangle\n",
737     "arr_corr[np.triu_indices_from(arr_corr)] = np.nan\n",
738     "fig, ax = plt.subplots(figsize=(18, 12))\n",
739     "sns.set(font_scale=3.0)\n",
740     "hm = sns.heatmap(arr_corr, cbar=True, vmin=-0.5, vmax=0.5,\n",
741     "                 fmt='.2f', annot_kws={'size': 20}, annot=True, \n",
742     "                 square=True, cmap=plt.cm.Reds)\n",
743     "ticks = np.arange(corr.shape[0]) + 0.5\n",
744     "ax.set_xticks(ticks)\n",
745     "ax.set_xticklabels(corr.columns, rotation=90, fontsize=20)\n",
746     "ax.set_yticks(ticks)\n",
747     "ax.set_yticklabels(corr.index, rotation=360, fontsize=20)\n",
748     "\n",
749     "ax.set_title('Heatmap')\n",
750     "plt.tight_layout()\n",
751     "plt.show()"
752    ]
753   }
754  ],
755  "metadata": {
756   "author": {
757    "@type": "Person",
758    "name": "Sridhar K. N. Rao",
759    "worksFor": {
760     "@type": "Organization",
761     "name": "Spirent Communications"
762    }
763   },
764   "kernelspec": {
765    "display_name": "Python 3",
766    "language": "python",
767    "name": "python3"
768   },
769   "language_info": {
770    "codemirror_mode": {
771     "name": "ipython",
772     "version": 3
773    },
774    "file_extension": ".py",
775    "mimetype": "text/x-python",
776    "name": "python",
777    "nbconvert_exporter": "python",
778    "pygments_lexer": "ipython3",
779    "version": "3.7.1"
780   }
781  },
782  "nbformat": 4,
783  "nbformat_minor": 2
784 }