2 # Copyright 2016 Cisco Systems, Inc. All rights reserved.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 from contextlib import contextmanager
18 from datetime import datetime
23 from tabulate import tabulate
25 def _annotate_chain_stats(chain_stats, nodrop_marker='=>'):
26 """Transform a plain chain stats into an annotated one.
30 0: {'packets': [2000054, 1999996, 1999996, 1999996],
39 0: {'packets': [2000054, -58 (-0.034%), '=>', 1999996],
47 In the case of shared net, some columns in packets array can have ''.
48 Some columns cab also be None which means the data is not available.
50 for stats in list(chain_stats.values()):
51 packets = stats['packets']
54 # keep the first counter
55 annotated_packets = [packets[0]]
56 # modify all remaining counters
57 prev_count = packets[0]
58 for index in range(1, count):
59 cur_count = packets[index]
61 # an empty string indicates an unknown counter for a shared interface
62 # do not annotate those
64 elif cur_count is None:
66 annotated_value = 'n/a'
68 drop = cur_count - prev_count
70 dr = (drop * 100.0) / prev_count if prev_count else 0
71 annotated_value = '{:+,} ({:+.4f}%)'.format(drop, dr)
74 # if last column we display the value
75 annotated_value = cur_count if index == count - 1 else nodrop_marker
76 prev_count = cur_count
77 annotated_packets.append(annotated_value)
79 stats['packets'] = annotated_packets
81 class Formatter(object):
82 """Collection of string formatter methods."""
90 return '{:,}'.format(data)
94 return lambda data: '%.{}f'.format(decimal) % (data)
98 if isinstance(data, int):
99 return Formatter.int(data)
100 if isinstance(data, float):
101 return Formatter.float(4)(data)
102 return Formatter.fixed(data)
105 def suffix(suffix_str):
106 return lambda data: Formatter.standard(data) + suffix_str
110 # By default, `best_prefix` returns a value in byte format, this hack (multiply by 8.0)
111 # will convert it into bit format.
112 bit = 8.0 * bitmath.Bit(float(data))
113 bit = bit.best_prefix(bitmath.SI)
114 byte_to_bit_classes = {
124 bps = byte_to_bit_classes.get(bit.unit, bitmath.Bit).from_other(bit) / 8.0
125 if bps.unit != 'Bit':
126 return bps.format("{value:.4f} {unit}ps")
127 return bps.format("{value:.4f} bps")
130 def percentage(data):
135 return Formatter.suffix('%')(Formatter.float(4)(data))
139 """ASCII readable table class."""
141 def __init__(self, header):
142 header_row, self.formatters = list(zip(*header))
143 self.data = [header_row]
144 self.columns = len(header_row)
146 def add_row(self, row):
147 assert self.columns == len(row)
149 for entry, formatter in zip(row, self.formatters):
150 formatted_row.append(formatter(entry))
151 self.data.append(formatted_row)
153 def get_string(self, indent=0):
154 spaces = ' ' * indent
155 table = tabulate(self.data,
160 return table.replace('\n', '\n' + spaces)
163 class Summarizer(object):
164 """Generic summarizer class."""
170 self.marker_stack = [False]
173 def __indent(self, marker):
174 self.indent_size += self.indent_per_level
175 self.marker_stack.append(marker)
177 def __unindent(self):
178 assert self.indent_size >= self.indent_per_level
179 self.indent_size -= self.indent_per_level
180 self.marker_stack.pop()
182 def __get_indent_string(self):
183 current_str = ' ' * self.indent_size
184 if self.marker_stack[-1]:
185 current_str = current_str[:-2] + '> '
188 def _put(self, *args):
189 self.str += self.__get_indent_string()
190 if args and isinstance(args[-1], dict):
191 self.str += ' '.join(map(str, args[:-1])) + '\n'
192 self._put_dict(args[-1])
194 self.str += ' '.join(map(str, args)) + '\n'
196 def _put_dict(self, data):
197 with self._create_block(False):
198 for key, value in list(data.items()):
199 if isinstance(value, dict):
201 self._put_dict(value)
203 self._put(key + ':', value)
205 def _put_table(self, table):
206 self.str += self.__get_indent_string()
207 self.str += table.get_string(self.indent_size) + '\n'
213 def _create_block(self, marker=True):
214 self.__indent(marker)
219 class NFVBenchSummarizer(Summarizer):
220 """Summarize nfvbench json result."""
222 direction_keys = ['direction-forward', 'direction-reverse', 'direction-total']
223 direction_names = ['Forward', 'Reverse', 'Total']
225 def __init__(self, result, sender):
226 """Create a summarizer instance."""
227 Summarizer.__init__(self)
229 self.config = self.result['config']
230 self.record_header = None
231 self.record_data = None
234 self.ndr_pdr_header = [
235 ('-', Formatter.fixed),
236 ('L2 Frame Size', Formatter.standard),
237 ('Rate (fwd+rev)', Formatter.bits),
238 ('Rate (fwd+rev)', Formatter.suffix(' pps')),
239 ('Avg Drop Rate', Formatter.suffix('%')),
240 ('Avg Latency (usec)', Formatter.standard),
241 ('Min Latency (usec)', Formatter.standard),
242 ('Max Latency (usec)', Formatter.standard)
245 self.single_run_header = [
246 ('L2 Frame Size', Formatter.standard),
247 ('Drop Rate', Formatter.suffix('%')),
248 ('Avg Latency (usec)', Formatter.standard),
249 ('Min Latency (usec)', Formatter.standard),
250 ('Max Latency (usec)', Formatter.standard)
253 self.config_header = [
254 ('Direction', Formatter.standard),
255 ('Requested TX Rate (bps)', Formatter.bits),
256 ('Actual TX Rate (bps)', Formatter.bits),
257 ('RX Rate (bps)', Formatter.bits),
258 ('Requested TX Rate (pps)', Formatter.suffix(' pps')),
259 ('Actual TX Rate (pps)', Formatter.suffix(' pps')),
260 ('RX Rate (pps)', Formatter.suffix(' pps'))
263 # add percentiles headers if hdrh enabled
264 if not self.config.disable_hdrh:
265 for percentile in self.config.lat_percentiles:
266 # 'append' expects a single parameter => double parentheses
267 self.ndr_pdr_header.append((str(percentile) + ' %ile lat.', Formatter.standard))
268 self.single_run_header.append((str(percentile) + ' %ile lat.', Formatter.standard))
269 # if sender is available initialize record
274 def __get_openstack_spec(self, property):
276 return self.result['openstack_spec'][property]
280 def __summarize(self):
282 self._put('========== NFVBench Summary ==========')
283 self._put('Date:', self.result['date'])
284 self._put('NFVBench version', self.result['nfvbench_version'])
285 self._put('Openstack Neutron:', {
286 'vSwitch': self.__get_openstack_spec('vswitch'),
287 'Encapsulation': self.__get_openstack_spec('encaps')
289 self.__record_header_put('version', self.result['nfvbench_version'])
290 self.__record_header_put('vSwitch', self.__get_openstack_spec('vswitch'))
291 self.__record_header_put('Encapsulation', self.__get_openstack_spec('encaps'))
292 self._put('Benchmarks:')
293 with self._create_block():
294 self._put('Networks:')
295 with self._create_block():
296 network_benchmark = self.result['benchmarks']['network']
298 self._put('Components:')
299 with self._create_block():
300 self._put('Traffic Generator:')
301 with self._create_block(False):
302 self._put('Profile:', self.config['tg-name'])
303 self._put('Tool:', self.config['tg-tool'])
304 if network_benchmark['versions']:
305 self._put('Versions:')
306 with self._create_block():
307 for component, version in list(network_benchmark['versions'].items()):
308 self._put(component + ':', version)
310 if self.config['ndr_run'] or self.config['pdr_run']:
311 self._put('Measurement Parameters:')
312 with self._create_block(False):
313 if self.config['ndr_run']:
314 self._put('NDR:', self.config['measurement']['NDR'])
315 if self.config['pdr_run']:
316 self._put('PDR:', self.config['measurement']['PDR'])
317 self._put('Service chain:')
318 for result in list(network_benchmark['service_chain'].items()):
319 with self._create_block():
320 self.__chain_summarize(*result)
322 def __chain_summarize(self, chain_name, chain_benchmark):
323 self._put(chain_name + ':')
324 self.__record_header_put('service_chain', chain_name)
325 with self._create_block():
326 self._put('Traffic:')
327 with self._create_block(False):
328 self.__traffic_summarize(chain_benchmark['result'])
330 def __traffic_summarize(self, traffic_benchmark):
331 self._put('Profile:', traffic_benchmark['profile'])
332 self._put('Bidirectional:', traffic_benchmark['bidirectional'])
333 self._put('Flow count:', traffic_benchmark['flow_count'])
334 self._put('Service chains count:', traffic_benchmark['service_chain_count'])
335 self._put('Compute nodes:', list(traffic_benchmark['compute_nodes'].keys()))
337 self.__record_header_put('profile', traffic_benchmark['profile'])
338 self.__record_header_put('bidirectional', traffic_benchmark['bidirectional'])
339 self.__record_header_put('flow_count', traffic_benchmark['flow_count'])
340 self.__record_header_put('sc_count', traffic_benchmark['service_chain_count'])
341 self.__record_header_put('compute_nodes', list(traffic_benchmark['compute_nodes'].keys()))
342 with self._create_block(False):
344 if not self.config['no_traffic']:
345 self._put('Run Summary:')
347 with self._create_block(False):
348 self._put_table(self.__get_summary_table(traffic_benchmark['result']))
351 self._put(traffic_benchmark['result']['warning'])
355 for entry in list(traffic_benchmark['result'].items()):
356 if 'warning' in entry:
358 self.__chain_analysis_summarize(*entry)
361 def __chain_analysis_summarize(self, frame_size, analysis):
363 self._put('L2 frame size:', frame_size)
364 if self.config['ndr_run']:
365 self._put('NDR search duration:', Formatter.float(0)(analysis['ndr']['time_taken_sec']),
367 self.__record_data_put(frame_size, {'ndr_search_duration': Formatter.float(0)(
368 analysis['ndr']['time_taken_sec'])})
369 if self.config['pdr_run']:
370 self._put('PDR search duration:', Formatter.float(0)(analysis['pdr']['time_taken_sec']),
372 self.__record_data_put(frame_size, {'pdr_search_duration': Formatter.float(0)(
373 analysis['pdr']['time_taken_sec'])})
376 if not self.config['no_traffic'] and self.config['single_run']:
377 self._put('Run Config:')
379 with self._create_block(False):
380 self._put_table(self.__get_config_table(analysis['run_config'], frame_size))
381 if 'warning' in analysis['run_config'] and analysis['run_config']['warning']:
383 self._put(analysis['run_config']['warning'])
386 if 'packet_path_stats' in analysis:
387 for dir in ['Forward', 'Reverse']:
388 self._put(dir + ' Chain Packet Counters and Latency:')
390 with self._create_block(False):
391 self._put_table(self._get_chain_table(analysis['packet_path_stats'][dir]))
394 def __get_summary_table(self, traffic_result):
395 if self.config['single_run']:
396 summary_table = Table(self.single_run_header)
398 summary_table = Table(self.ndr_pdr_header)
400 if self.config['ndr_run']:
401 for frame_size, analysis in list(traffic_result.items()):
402 if frame_size == 'warning':
408 analysis['ndr']['rate_bps'],
409 analysis['ndr']['rate_pps'],
410 analysis['ndr']['stats']['overall']['drop_percentage'],
411 analysis['ndr']['stats']['overall']['avg_delay_usec'],
412 analysis['ndr']['stats']['overall']['min_delay_usec'],
413 analysis['ndr']['stats']['overall']['max_delay_usec']
415 if not self.config.disable_hdrh:
416 self.extract_hdrh_percentiles(
417 analysis['ndr']['stats']['overall']['lat_percentile'], row_data)
418 summary_table.add_row(row_data)
422 'rate_bps': analysis['ndr']['rate_bps'],
423 'rate_pps': analysis['ndr']['rate_pps'],
424 'offered_tx_rate_bps': analysis['ndr']['stats']['offered_tx_rate_bps'],
425 'theoretical_tx_rate_pps': analysis['ndr']['stats']['theoretical_tx_rate_pps'],
426 'theoretical_tx_rate_bps': analysis['ndr']['stats']['theoretical_tx_rate_bps'],
427 'drop_percentage': analysis['ndr']['stats']['overall']['drop_percentage'],
428 'avg_delay_usec': analysis['ndr']['stats']['overall']['avg_delay_usec'],
429 'min_delay_usec': analysis['ndr']['stats']['overall']['min_delay_usec'],
430 'max_delay_usec': analysis['ndr']['stats']['overall']['max_delay_usec']
432 if not self.config.disable_hdrh:
433 self.extract_hdrh_percentiles(
434 analysis['ndr']['stats']['overall']['lat_percentile'], ndr_data, True)
435 self.__record_data_put(frame_size, {'ndr': ndr_data})
436 if self.config['pdr_run']:
437 for frame_size, analysis in list(traffic_result.items()):
438 if frame_size == 'warning':
444 analysis['pdr']['rate_bps'],
445 analysis['pdr']['rate_pps'],
446 analysis['pdr']['stats']['overall']['drop_percentage'],
447 analysis['pdr']['stats']['overall']['avg_delay_usec'],
448 analysis['pdr']['stats']['overall']['min_delay_usec'],
449 analysis['pdr']['stats']['overall']['max_delay_usec']
451 if not self.config.disable_hdrh:
452 self.extract_hdrh_percentiles(
453 analysis['pdr']['stats']['overall']['lat_percentile'], row_data)
454 summary_table.add_row(row_data)
458 'rate_bps': analysis['pdr']['rate_bps'],
459 'rate_pps': analysis['pdr']['rate_pps'],
460 'offered_tx_rate_bps': analysis['pdr']['stats']['offered_tx_rate_bps'],
461 'theoretical_tx_rate_pps': analysis['pdr']['stats']['theoretical_tx_rate_pps'],
462 'theoretical_tx_rate_bps': analysis['pdr']['stats']['theoretical_tx_rate_bps'],
463 'drop_percentage': analysis['pdr']['stats']['overall']['drop_percentage'],
464 'avg_delay_usec': analysis['pdr']['stats']['overall']['avg_delay_usec'],
465 'min_delay_usec': analysis['pdr']['stats']['overall']['min_delay_usec'],
466 'max_delay_usec': analysis['pdr']['stats']['overall']['max_delay_usec']
468 if not self.config.disable_hdrh:
469 self.extract_hdrh_percentiles(
470 analysis['pdr']['stats']['overall']['lat_percentile'], pdr_data, True)
471 self.__record_data_put(frame_size, {'pdr': pdr_data})
472 if self.config['single_run']:
473 for frame_size, analysis in list(traffic_result.items()):
476 analysis['stats']['overall']['drop_rate_percent'],
477 analysis['stats']['overall']['rx']['avg_delay_usec'],
478 analysis['stats']['overall']['rx']['min_delay_usec'],
479 analysis['stats']['overall']['rx']['max_delay_usec']
481 if not self.config.disable_hdrh:
482 self.extract_hdrh_percentiles(
483 analysis['stats']['overall']['rx']['lat_percentile'], row_data)
484 summary_table.add_row(row_data)
487 'type': 'single_run',
488 'offered_tx_rate_bps': analysis['stats']['offered_tx_rate_bps'],
489 'theoretical_tx_rate_pps': analysis['stats']['theoretical_tx_rate_pps'],
490 'theoretical_tx_rate_bps': analysis['stats']['theoretical_tx_rate_bps'],
491 'drop_rate_percent': analysis['stats']['overall']['drop_rate_percent'],
492 'avg_delay_usec': analysis['stats']['overall']['rx']['avg_delay_usec'],
493 'min_delay_usec': analysis['stats']['overall']['rx']['min_delay_usec'],
494 'max_delay_usec': analysis['stats']['overall']['rx']['max_delay_usec']
496 if not self.config.disable_hdrh:
497 self.extract_hdrh_percentiles(
498 analysis['stats']['overall']['rx']['lat_percentile'], single_run_data, True)
499 self.__record_data_put(frame_size, {'single_run': single_run_data})
502 def extract_hdrh_percentiles(self, lat_percentile, data, add_key=False):
504 data['lat_percentile'] = {}
505 for percentile in self.config.lat_percentiles:
508 data['lat_percentile_' + str(percentile)] = lat_percentile[percentile]
510 data['lat_percentile_' + str(percentile)] = "n/a"
513 data.append(lat_percentile[percentile])
517 def __get_config_table(self, run_config, frame_size):
518 config_table = Table(self.config_header)
519 for key, name in zip(self.direction_keys, self.direction_names):
520 if key not in run_config:
522 config_table.add_row([
524 run_config[key]['orig']['rate_bps'],
525 run_config[key]['tx']['rate_bps'],
526 run_config[key]['rx']['rate_bps'],
527 int(run_config[key]['orig']['rate_pps']),
528 int(run_config[key]['tx']['rate_pps']),
529 int(run_config[key]['rx']['rate_pps']),
531 self.__record_data_put(frame_size, {
532 name.lower() + "_orig_rate_bps": int(run_config[key]['orig']['rate_bps']),
533 name.lower() + "_tx_rate_bps": int(run_config[key]['tx']['rate_bps']),
534 name.lower() + "_rx_rate_bps": int(run_config[key]['rx']['rate_bps']),
535 name.lower() + "_orig_rate_pps": int(run_config[key]['orig']['rate_pps']),
536 name.lower() + "_tx_rate_pps": int(run_config[key]['tx']['rate_pps']),
537 name.lower() + "_rx_rate_pps": int(run_config[key]['rx']['rate_pps']),
542 def _get_chain_table(self, chain_stats):
543 """Retrieve the table for a direction.
546 'interfaces': ['Port0', 'drop %'', 'vhost0', 'Port1'],
548 '0': {'packets': [2000054, '-0.023%', 1999996, 1999996],
557 chains = chain_stats['chains']
558 _annotate_chain_stats(chains)
559 header = [('Chain', Formatter.standard)] + \
560 [(ifname, Formatter.standard) for ifname in chain_stats['interfaces']]
561 # add latency columns if available Avg, Min, Max and percentiles
563 lat_map = {'lat_avg_usec': 'Avg lat.',
564 'lat_min_usec': 'Min lat.',
565 'lat_max_usec': 'Max lat.'}
566 if 'lat_avg_usec' in chains['0']:
567 lat_keys = ['lat_avg_usec', 'lat_min_usec', 'lat_max_usec']
569 if not self.config.disable_hdrh:
570 lat_keys.append('lat_percentile')
571 for percentile in self.config.lat_percentiles:
572 lat_map['lat_' + str(percentile) + '_percentile'] = \
573 str(percentile) + ' %ile lat.'
576 # 'append' expects a single parameter => double parentheses
577 header.append((lat_map[key], Formatter.standard))
579 table = Table(header)
580 for chain in sorted(list(chains.keys()), key=str):
581 row = [chain] + chains[chain]['packets']
582 for lat_key in lat_keys:
584 if lat_key != 'lat_percentile':
585 if chains[chain].get(lat_key, None):
586 row.append(Formatter.standard(chains[chain][lat_key]))
590 if not self.config.disable_hdrh:
591 if chains[chain].get(lat_key, None):
592 for percentile in chains[chain][lat_key]:
593 row.append(Formatter.standard(
594 chains[chain][lat_key][percentile]))
596 for _ in self.config.lat_percentiles:
601 def __record_header_put(self, key, value):
603 self.record_header[key] = value
605 def __record_data_put(self, key, data):
607 if key not in self.record_data:
608 self.record_data[key] = {}
609 self.record_data[key].update(data)
611 def __record_send(self):
613 self.record_header["@timestamp"] = datetime.utcnow().replace(
614 tzinfo=pytz.utc).strftime("%Y-%m-%dT%H:%M:%S.%f%z")
615 for frame_size in self.record_data:
616 data = self.record_header
617 data['frame_size'] = frame_size
618 data.update(self.record_data[frame_size])
619 run_specific_data = {}
620 if 'single_run' in data:
621 run_specific_data['single_run'] = data['single_run']
622 del data['single_run']
624 run_specific_data['ndr'] = data['ndr']
625 run_specific_data['ndr']['drop_limit'] = self.config['measurement']['NDR']
628 run_specific_data['pdr'] = data['pdr']
629 run_specific_data['pdr']['drop_limit'] = self.config['measurement']['PDR']
631 for key in run_specific_data:
632 data_to_send = data.copy()
633 data_to_send.update(run_specific_data[key])
634 self.sender.record_send(data_to_send)
637 def __record_init(self):
638 # init is called after checking for sender
639 self.record_header = {
640 "runlogdate": self.sender.runlogdate,
641 "user_label": self.config['user_label']
643 self.record_data = {}