4 "cell_type": "markdown",
10 "# Beyond Performance Metrics: Towards Causation Analysis"
14 "cell_type": "markdown",
17 "### sridhar.rao@spirent.com and acm@research.att.com"
22 "execution_count": null,
26 "# Import packages\n",
27 "import numpy as np\n",
28 "import pandas as pd\n",
29 "import matplotlib.pyplot as plt\n",
30 "import seaborn as sns\n",
31 "from graphviz import Digraph\n",
32 "import collections\n",
38 "cell_type": "markdown",
41 "## Get the results to analyze: \n",
42 "Getting Latest one, if ``directory_to_download`` is empty"
47 "execution_count": null,
56 "from stat import S_ISDIR\n",
57 "RECV_BYTES = 4096\n",
58 "hostname = '10.10.120.24'\n",
64 "client = paramiko.Transport((hostname, port))\n",
65 "client.connect(username=uname, password=pwd)\n",
66 "session = client.open_channel(kind='session')\n",
67 "directory_to_download = ''\n",
69 "session.exec_command('ls /tmp | grep results')\n",
70 "if not directory_to_download:\n",
72 " if session.recv_ready():\n",
73 " stdout_data.append(session.recv(RECV_BYTES))\n",
74 " if session.recv_stderr_ready():\n",
75 " stderr_data.append(session.recv_stderr(RECV_BYTES))\n",
76 " if session.exit_status_ready():\n",
79 " line = stdout_data[0]\n",
80 " filenames = line.decode(\"utf-8\").rstrip('\\n').split('\\n')\n",
81 " filenames = sorted(filenames)\n",
82 " latest = filenames[-1]\n",
83 " directory_to_download = os.path.join('/tmp', latest).replace(\"\\\\\",\"/\")\n",
84 " print(directory_to_download)\n",
87 "if directory_to_download:\n",
88 " # zip the collectd results to make the download faster\n",
89 " zip_command = 'sudo -S tar -czvf '+ directory_to_download + '/collectd.tar.gz -C ' + directory_to_download + '/csv .'\n",
90 " session = client.open_channel(kind='session')\n",
91 " session.get_pty()\n",
92 " session.exec_command(zip_command)\n",
94 " if session.recv_ready():\n",
95 " stdout_data.append(session.recv(RECV_BYTES))\n",
96 " if session.recv_stderr_ready():\n",
97 " stderr_data.append(session.recv_stderr(RECV_BYTES))\n",
98 " if session.exit_status_ready():\n",
100 " if stderr_data:\n",
101 " print(stderr_data[0])\n",
102 " if stdout_data:\n",
103 " print(stdout_data[0])\n",
105 " # Begin the actual downlaod\n",
106 " sftp = paramiko.SFTPClient.from_transport(client)\n",
107 " def sftp_walk(remotepath):\n",
108 " path=remotepath\n",
111 " for f in sftp.listdir_attr(remotepath):\n",
112 " if S_ISDIR(f.st_mode):\n",
113 " folders.append(f.filename)\n",
115 " files.append(f.filename)\n",
117 " yield path, files\n",
118 " # Filewise download happens here\n",
119 " for path,files in sftp_walk(directory_to_download):\n",
120 " for file in files:\n",
121 " remote = os.path.join(path,file).replace(\"\\\\\",\"/\")\n",
122 " local = os.path.join('./results', file).replace(\"\\/\",\"/\")\n",
123 " sftp.get(remote, local)\n",
124 "# Untar the collectd results if we got it.\n",
125 "path = os.path.join('./results', 'collectd.tar.gz')\n",
126 "if os.path.exists(path):\n",
127 " tar = tarfile.open(path)\n",
128 " tar.extractall()\n",
130 "# Ready to work with downloaded data, close the session and client.\n",
137 "execution_count": null,
143 "strings = ('* OS:', '* Kernel Version:', '* Board:', '* CPU:', '* CPU cores:',\n",
144 " '* Memory:', '* Virtual Switch Set-up:',\n",
145 " '* Traffic Generator:','* vSwitch:', '* DPDK Version:', '* VNF:')\n",
146 "filename = os.path.basename(glob.glob('./results/result*.rst')[0])\n",
148 "with open(os.path.join('./results', filename), 'r') as file:\n",
149 " for line in file:\n",
150 " if any(s in line for s in strings):\n",
151 " info_dict[line.split(':', 1)[0]] = line.split(':', 1)[1].rstrip()\n",
152 "df = pd.DataFrame.from_dict(info_dict, orient='index', columns=['Value'])\n",
157 "cell_type": "markdown",
160 "## Understand the configuration used for the test."
165 "execution_count": null,
169 "filename = os.path.basename(glob.glob('./results/vsperf*.conf')[0])\n",
170 "file = os.path.join('./results', filename)\n",
171 "with open(file, 'r') as f:\n",
173 " if line.startswith('TRAFFICGEN_DURATION'):\n",
174 " value = line.split('=')[1]\n",
175 " value = value.rstrip()\n",
176 " value = value.lstrip()\n",
177 " traffic_duration = int(value)\n",
178 " elif line.startswith('VSWITCH_PMD_CPU_MASK'):\n",
179 " value = line.split('=')[1]\n",
180 " value = value.rstrip()\n",
181 " pmd_cores_mask = value.lstrip()\n",
182 " elif line.startswith('GUEST_CORE_BINDING'):\n",
183 " value = line.split('=')[1]\n",
184 " value = value.rstrip()\n",
185 " value = value.lstrip()\n",
186 " guest_cores = value[1:-2]\n",
188 "print(traffic_duration)\n",
189 "print(pmd_cores_mask)\n",
194 "cell_type": "markdown",
197 "## OVS-Ports and Cores"
202 "execution_count": null,
208 "import collections\n",
209 "portcores = collections.OrderedDict()\n",
211 "current_chunk = []\n",
212 "file = os.path.join('./results', 'ovs-cores.log')\n",
213 "with open(file, 'r') as f:\n",
215 " if line.startswith('pmd') and current_chunk:\n",
216 " # if line starts with token and the current chunk is not empty\n",
217 " chunks.append(current_chunk[:]) # add not empty chunk to chunks\n",
218 " current_chunk = [] # make current chunk blank\n",
219 " # just append a line to the current chunk on each iteration\n",
220 " if \"port:\" in line or 'pmd' in line:\n",
221 " current_chunk.append(line)\n",
222 " chunks.append(current_chunk) # append the last chunk outside the loop\n",
225 "for ch in chunks:\n",
228 " for line in ch:\n",
229 " if 'pmd' in line:\n",
230 " core_id = line.split()[-1][:-1]\n",
231 " if core_id not in core_ids:\n",
232 " core_ids.append(core_id)\n",
233 " elif 'port:' in line:\n",
234 " port_id = line.split()[1]\n",
235 " if port_id and core_id:\n",
236 " if port_id not in portcores:\n",
237 " portcores[port_id] = core_id\n",
239 "# import graphviz\n",
240 "from graphviz import Digraph\n",
241 "ps = Digraph(name='ovs-ports-cores', node_attr={'shape': 'box'}, edge_attr={'arrowhead':\"none\"})\n",
242 "with ps.subgraph(name=\"cluster_0\") as c:\n",
243 " c.node_attr.update(style='filled', color='green')\n",
244 " c.node('t0', 'TGen-Port-0')\n",
245 " c.node('t1', 'TGen-Port-1')\n",
246 " c.attr(label='TGEN')\n",
247 " c.attr(color='blue')\n",
248 "with ps.subgraph(name=\"cluster_1\") as c:\n",
249 " c.node_attr.update(style='filled', color='yellow')\n",
250 " c.node('v0', 'VNF-Port-0')\n",
251 " c.node('v1', 'VNF-Port-1')\n",
252 " c.attr(label='VNF')\n",
253 " c.attr(color='blue')\n",
255 "with ps.subgraph(name='cluster_2') as c: \n",
256 " c.attr(label='OVS-DPDK')\n",
257 " c.attr(color='blue')\n",
259 " for port, core in portcores.items():\n",
260 " id = 'o'+str(count)\n",
261 " c.node(id, port+'\\nCore-ID:'+ core)\n",
264 " if 'dpdkvhost' in port:\n",
265 " ps.edge(id, 'v'+num)\n",
267 " ps.edge(id, 't'+num)\n",
273 "cell_type": "markdown",
281 "execution_count": null,
287 "portcores = collections.OrderedDict()\n",
289 "current_chunk = []\n",
290 "file = os.path.join('./results', 'ovs-cores.log')\n",
291 "with open(file, 'r') as f:\n",
293 " if line.startswith('pmd') and current_chunk:\n",
294 " # if line starts with token and the current chunk is not empty\n",
295 " chunks.append(current_chunk[:]) # add not empty chunk to chunks\n",
296 " current_chunk = [] # make current chunk blank\n",
297 " # just append a line to the current chunk on each iteration\n",
298 " if \"port:\" in line or 'pmd' in line:\n",
299 " current_chunk.append(line)\n",
300 " chunks.append(current_chunk) # append the last chunk outside the loop\n",
303 "for ch in chunks:\n",
306 " for line in ch:\n",
307 " if 'pmd' in line:\n",
308 " core_id = line.split()[-1][:-1]\n",
309 " if core_id not in core_ids:\n",
310 " core_ids.append(core_id)\n",
311 " elif 'port:' in line:\n",
312 " port_id = line.split()[1]\n",
313 " if port_id and core_id:\n",
314 " if port_id not in portcores:\n",
315 " portcores[port_id] = core_id\n",
317 "ps = Digraph(name='ovs-dropped', node_attr={'shape': 'box'}, edge_attr={'arrowhead':\"none\"})\n",
319 "def get_dropped(port_id):\n",
320 " # port_id = 'dpdk0'\n",
321 " if glob.glob('./pod12-node4/*'+port_id):\n",
322 " dirname = os.path.basename(glob.glob('./pod12-node4/*'+port_id)[0])\n",
324 " if glob.glob('./pod12-node4/'+dirname+ '/*dropped*'):\n",
325 " filename = os.path.basename(glob.glob('./pod12-node4/'+dirname+ '/*dropped*')[0])\n",
327 " with open(os.path.join('./pod12-node4', dirname, filename), 'r') as f:\n",
328 " line = f.readlines()[-1]\n",
329 " fields = line.split(',')\n",
330 " return fields[1], fields[2]\n",
331 " return 'NA','NA'\n",
333 "with ps.subgraph(name=\"cluster_0\") as c:\n",
334 " c.node_attr.update(style='filled', color='pink')\n",
335 " c.attr(label='OVS-DPDK')\n",
336 " c.attr(color='blue')\n",
338 " for port, core in portcores.items():\n",
339 " id = 'o'+str(count)\n",
340 " rx,tx = get_dropped(port)\n",
341 " c.node(id, port+'\\nRX-Dropped:'+ rx + '\\nTX-Dropped:' + tx)\n",
348 "cell_type": "markdown",
351 "## Plotting Live Results - T-Rex"
356 "execution_count": null,
362 "lines_seen = set() # holds lines already seen\n",
363 "outfile = open('./counts.dat', \"w\")\n",
364 "file = os.path.join('./results', 'trex-liveresults-counts.dat')\n",
365 "for line in open(file, \"r\"):\n",
366 " if line not in lines_seen: # not a duplicate\n",
367 " outfile.write(line)\n",
368 " lines_seen.add(line)\n",
370 "tdf = pd.read_csv('./counts.dat')\n",
371 "print(tdf.columns)\n",
372 "ax = tdf.loc[(tdf.rx_port == 1)].plot(y='rx_pkts')\n",
373 "def highlight(indices,ax):\n",
375 " while i<len(indices):\n",
376 " ax.axvspan(indices[i][0], indices[i][1], facecolor='RED', edgecolor='BLUE', alpha=.2)\n",
380 "indv = tdf.ts[0]\n",
381 "ax.set_xlabel(\"Index\")\n",
382 "ax.set_ylabel('Count')\n",
383 "for i in range(len(tdf.ts)):\n",
384 " if tdf.ts[i] - indv > int(traffic_duration):\n",
385 " highlight([(ind, i)], ax)\n",
387 " indv = tdf.ts[i]\n",
388 "highlight([(ind,i)], ax)"
392 "cell_type": "markdown",
395 "## IRQ Latency Histogram"
400 "execution_count": null,
406 "file = os.path.join('./results', 'RUNirq.irq.log')\n",
407 "tdf = pd.read_csv(file)\n",
409 "exclude = [' <1', ' < 5', ' < 10',' < 50', ' < 100', ' < 500', ' < 1000']\n",
410 "ax = tdf.loc[:, tdf.columns.difference(exclude)].plot(x=' number', xticks=tdf[' number'], figsize=(20,10))\n",
411 "ax.set_xlabel('Core #')\n",
412 "ax.set_ylabel('Count')\n",
413 "#tdf.plot(x='number')"
417 "cell_type": "markdown",
420 "## Sample Collectd Metric Display - L3 Cache Occupancy in Bytes"
425 "execution_count": null,
430 "def cpumask2coreids(mask):\n",
431 " intmask = int(mask, 16)\n",
434 " while (i < intmask):\n",
435 " if (i & intmask):\n",
436 " coreids.append(str(math.frexp(i)[-1]-1))\n",
438 " return (coreids)\n",
440 "vswitch_cpus = \"['2']\"\n",
441 "ps = Digraph(name='cpu-map', node_attr={'shape': 'box'}, edge_attr={'arrowhead':\"none\"})\n",
442 "with ps.subgraph(name=\"cluster_0\") as c:\n",
443 " c.node_attr.update(style='filled', color='pink')\n",
444 " c.attr(label='CPU-MAPPINGS')\n",
445 " c.attr(color='blue')\n",
446 " c.node('vscpus', 'vSwitch: \\n' + vswitch_cpus)\n",
447 " # vnf_cpus = cpumask2coreids(guest_cores)\n",
448 " c.node('vncpus', 'VNF: \\n' + guest_cores)\n",
449 " pmd_cpus = cpumask2coreids(pmd_cores_mask[1:-1])\n",
450 " c.node('pmcpus', 'PMDs: \\n' + str(pmd_cpus))\n",
457 "execution_count": null,
463 "# Path where collectd results are stored.\n",
464 "mypath = \"./pod12-node4\"\n",
467 "for level1 in os.listdir(mypath):\n",
468 " if \"intel_rdt\" in level1:\n",
469 " l2path = os.path.join(mypath, level1)\n",
470 " for level2 in os.listdir(l2path):\n",
471 " if \"bytes\" in level2:\n",
472 " l3path = os.path.join(l2path, level2)\n",
473 " if file_count == 0:\n",
474 " file_count += 1\n",
475 " df = pd.read_csv(l3path)\n",
476 " nn = 'cpu-'+ level1[len('intel_rdt-'):]\n",
477 " # nn = 'cpu-'+ level1.split('-')[1]\n",
478 " cpu_names.append(nn)\n",
480 " df.rename(columns={'value': nn}, inplace=True)\n",
482 " file_count += 1\n",
483 " tdf = pd.read_csv(l3path)\n",
484 " nn = 'cpu-'+ level1[len('intel_rdt-'):]\n",
485 " cpu_names.append(nn)\n",
486 " tdf.rename(columns={'value': nn}, inplace=True)\n",
487 " df[nn] = tdf[nn] \n",
489 "ax = df.plot(x='epoch', y=cpu_names)\n",
490 "ax.set_ylabel(\"MBytes\")\n",
491 "ax.set_xlabel('Time')\n",
495 "# df = pd.read_csv()"
499 "cell_type": "markdown",
507 "execution_count": null,
513 "from datetime import datetime\n",
514 "filename = os.path.basename(glob.glob('./results/vsperf-overall*.log')[0])\n",
515 "logfile = os.path.join('./results', filename)\n",
518 "with open(logfile) as f:\n",
520 " line = line.strip('\\n')\n",
521 " if linecnt == 0:\n",
522 " times['Start-Test'] = line.split(\" : \")[0]\n",
524 " if 'Binding NICs' in line:\n",
525 " times['Binding-NICs'] = line.split(\" : \")[0]\n",
526 " if 'Starting traffic at' in line:\n",
527 " sline = line.split(\" : \")[1]\n",
528 " time = line.split(\" : \")[0]\n",
529 " speed = sline.split('at',1)[1]\n",
530 " times[speed] = time \n",
531 " elif 'Starting vswitchd' in line:\n",
532 " times['vSwitch-Start'] = line.split(\" : \")[0]\n",
533 " elif 'Starting ovs-vswitchd' in line:\n",
534 " times['ovsvswitch-start'] = line.split(\" : \")[0]\n",
535 " elif 'Adding Ports' in line:\n",
536 " times['Ports-Added'] = line.split(\" : \")[0]\n",
537 " elif 'Flows Added' in line:\n",
538 " times['Flows-Added'] = line.split(\" : \")[0]\n",
539 " elif 'send_traffic with' in line:\n",
540 " times['Traffic Start'] = line.split(\" : \")[0]\n",
541 " elif 'l2 framesize 1280' in line:\n",
542 " times['Traffic-Start-1280'] = line.split(\" : \")[0]\n",
543 " elif 'Starting qemu' in line:\n",
544 " times['VNF-Start'] = line.split(\" : \")[0]\n",
545 " elif 'l2 framesize 64' in line:\n",
546 " times['Traffic-Start-64'] = line.split(\" : \")[0]\n",
547 " elif 'l2 framesize 128' in line:\n",
548 " times['Traffic-Start-128'] = line.split(\" : \")[0]\n",
549 " elif 'l2 framesize 256' in line:\n",
550 " times['Traffic-Start-256'] = line.split(\" : \")[0]\n",
551 " elif 'l2 framesize 512' in line:\n",
552 " times['Traffic-Start-512'] = line.split(\" : \")[0]\n",
553 " elif 'l2 framesize 1024' in line:\n",
554 " times['Traffic-Start-1024'] = line.split(\" : \")[0]\n",
555 " elif 'l2 framesize 1518' in line:\n",
556 " times['Traffic-Start-1518'] = line.split(\" : \")[0]\n",
557 " elif 'dump flows' in line:\n",
558 " times['Traffic-End'] = line.split(\" : \")[0]\n",
559 " elif 'Wait for QEMU' in line:\n",
560 " times['VNF-Stop'] = line.split(\" : \")[0]\n",
561 " elif 'delete flow' in line:\n",
562 " times['flow-removed'] = line.split(\" : \")[0]\n",
563 " elif 'delete port' in line:\n",
564 " times['port-removed'] = line.split(\" : \")[0]\n",
565 " elif 'Killing ovs-vswitchd' in line:\n",
566 " times['vSwitch-Stop'] = line.split(\" : \")[0]\n",
568 "times['Test-Stop'] = line.split(\" : \")[0]\n",
570 "ddf = pd.DataFrame.from_dict(times, orient='index', columns=['timestamp'])\n",
571 "names = ddf.index.values\n",
572 "dates = ddf['timestamp'].tolist()\n",
573 "datefmt=\"%Y-%m-%d %H:%M:%S,%f\"\n",
574 "dates = [datetime.strptime(ii, datefmt) for ii in dates]\n",
581 "execution_count": null,
587 "import matplotlib.dates as mdates\n",
588 "from matplotlib import ticker\n",
590 "levels = np.array([-5, 5, -3, 3, -1, 1])\n",
591 "fig, ax = plt.subplots(figsize=(40, 5))\n",
593 "# Create the base line\n",
594 "start = min(dates)\n",
595 "stop = max(dates)\n",
596 "ax.plot((start, stop), (0, 0), 'k', alpha=.5)\n",
598 "pos_list = np.arange(len(dates))\n",
600 "# Iterate through releases annotating each one\n",
601 "for ii, (iname, idate) in enumerate(zip(names, dates)):\n",
602 " level = levels[ii % 6]\n",
603 " vert = 'top' if level < 0 else 'bottom'\n",
604 " ax.scatter(idate, 0, s=100, facecolor='w', edgecolor='k', zorder=9999)\n",
605 " # Plot a line up to the text\n",
606 " ax.plot((idate, idate), (0, level), c='r', alpha=.7)\n",
607 " # Give the text a faint background and align it properly\n",
608 " ax.text(idate, level, iname,\n",
609 " horizontalalignment='right', verticalalignment=vert, fontsize=14,\n",
610 " backgroundcolor=(1., 1., 1., .3))\n",
611 "ax.set(title=\"VSPERF Main Events\")\n",
612 "# Set the xticks formatting\n",
613 "ax.get_xaxis().set_major_locator(mdates.SecondLocator(interval=30))\n",
614 "ax.get_xaxis().set_major_formatter(mdates.DateFormatter(\"%M %S\"))\n",
615 "fig.autofmt_xdate()\n",
616 "plt.setp((ax.get_yticklabels() + ax.get_yticklines() +\n",
617 " list(ax.spines.values())), visible=False)\n",
622 "cell_type": "markdown",
625 "## Current and old."
629 "cell_type": "markdown",
637 "execution_count": null,
644 "filename = os.path.basename(glob.glob('./results/result*.csv')[0])\n",
646 "tdf = pd.read_csv(os.path.join('./results', filename))\n",
647 "pkts = ['tx_frames', 'rx_frames']\n",
648 "fps = ['tx_rate_fps', 'throughput_rx_fps']\n",
649 "mbps = ['tx_rate_mbps', 'throughput_rx_mbps']\n",
650 "pcents = ['tx_rate_percent', 'throughput_rx_percent', 'frame_loss_percent']\n",
651 "fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(14, 12))\n",
652 "tdf.plot.bar(y= pkts,ax=axes[0,0])\n",
653 "tdf.plot.bar(y= fps,ax=axes[0,1])\n",
654 "tdf.plot.bar(y= mbps,ax=axes[1,0])\n",
655 "tdf.plot.bar(y= pcents,ax=axes[1,1])\n",
656 "current_pkt_size = str(tdf['packet_size'].iloc[-1])\n",
657 "current_rx_fps = str(tdf['throughput_rx_fps'].iloc[-1])\n",
658 "print(current_rx_fps)"
662 "cell_type": "markdown",
665 "## How Current Result compares to Previous ones?"
670 "execution_count": null,
679 "#json_data = requests.get('http://testresults.opnfv.org/test/api/v1/results?project=vsperf').json()\n",
680 "json_data = requests.get('http://10.10.120.22:8000/api/v1/results?project=vsperf').json()\n",
681 "res = json_data['results']\n",
682 "df1 = pd.DataFrame(res)\n",
683 "sort_by_date = df1.sort_values('start_date')\n",
684 "details = df1['details'].apply(pd.Series)\n",
685 "details[current_pkt_size] = pd.to_numeric(pd.Series(details[current_pkt_size]))\n",
686 "# details.plot.bar(y = current_pkt_size)\n",
687 "details_cur_pkt = details[[current_pkt_size]].copy()\n",
688 "details_cur_pkt.loc[-1]= float(current_rx_fps)\n",
689 "details_cur_pkt.index = details_cur_pkt.index + 1 # shifting index\n",
690 "details_cur_pkt.sort_index(inplace=True) \n",
691 "ax = details_cur_pkt.plot.bar()\n",
692 "ax.set_ylabel(\"Frames per sec\")\n",
693 "ax.set_xlabel(\"Run Number\")\n",
694 "def highlight(indices,ax):\n",
696 " while i<len(indices):\n",
697 " ax.axvspan(indices[i]-0.5, indices[i]+0.5, facecolor='RED', edgecolor='none', alpha=.2)\n",
703 "cell_type": "markdown",
711 "execution_count": null,
717 "array_of_dfs = []\n",
718 "for dirs in glob.glob('./pod12-node4/ovs_stats-vsperf*'):\n",
719 " dirname = os.path.basename(dirs)\n",
721 " port = dirname.split('.')[1]\n",
722 " if glob.glob('./pod12-node4/'+dirname+ '/*dropped*'):\n",
723 " full_path = glob.glob('./pod12-node4/'+dirname+ '/*dropped*')[0]\n",
724 " filename = os.path.basename(full_path)\n",
726 " df = pd.read_csv(full_path)\n",
727 " df.rename(index=str, columns={\"rx\": port+\"-rx\" , \"tx\": port+\"-tx\"}, inplace=True)\n",
728 " df = df.drop(columns=['epoch'])\n",
729 " array_of_dfs.append(df)\n",
730 "master_df = pd.concat(array_of_dfs, axis=1, sort=True)\n",
731 "master_df.columns\n",
733 "# get the correlation coefficient between the different columns\n",
734 "corr = master_df.iloc[:, 0:].corr()\n",
735 "arr_corr = corr.values\n",
736 "# mask out the top triangle\n",
737 "arr_corr[np.triu_indices_from(arr_corr)] = np.nan\n",
738 "fig, ax = plt.subplots(figsize=(18, 12))\n",
739 "sns.set(font_scale=3.0)\n",
740 "hm = sns.heatmap(arr_corr, cbar=True, vmin=-0.5, vmax=0.5,\n",
741 " fmt='.2f', annot_kws={'size': 20}, annot=True, \n",
742 " square=True, cmap=plt.cm.Reds)\n",
743 "ticks = np.arange(corr.shape[0]) + 0.5\n",
744 "ax.set_xticks(ticks)\n",
745 "ax.set_xticklabels(corr.columns, rotation=90, fontsize=20)\n",
746 "ax.set_yticks(ticks)\n",
747 "ax.set_yticklabels(corr.index, rotation=360, fontsize=20)\n",
749 "ax.set_title('Heatmap')\n",
750 "plt.tight_layout()\n",
758 "name": "Sridhar K. N. Rao",
760 "@type": "Organization",
761 "name": "Spirent Communications"
765 "display_name": "Python 3",
766 "language": "python",
774 "file_extension": ".py",
775 "mimetype": "text/x-python",
777 "nbconvert_exporter": "python",
778 "pygments_lexer": "ipython3",