2 # Copyright 2016 Cisco Systems, Inc. All rights reserved.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 from contextlib import contextmanager
18 from datetime import datetime
23 from tabulate import tabulate
25 def _annotate_chain_stats(chain_stats, nodrop_marker='=>'):
26 """Transform a plain chain stats into an annotated one.
30 0: {'packets': [2000054, 1999996, 1999996, 1999996],
39 0: {'packets': [2000054, -58 (-0.034%), '=>', 1999996],
47 In the case of shared net, some columns in packets array can have ''.
48 Some columns cab also be None which means the data is not available.
50 for stats in list(chain_stats.values()):
51 packets = stats['packets']
54 # keep the first counter
55 annotated_packets = [packets[0]]
56 # modify all remaining counters
57 prev_count = packets[0]
58 for index in range(1, count):
59 cur_count = packets[index]
61 # an empty string indicates an unknown counter for a shared interface
62 # do not annotate those
64 elif cur_count is None:
66 annotated_value = 'n/a'
68 drop = cur_count - prev_count
70 dr = (drop * 100.0) / prev_count if prev_count else 0
71 annotated_value = '{:+,} ({:+.4f}%)'.format(drop, dr)
74 # if last column we display the value
75 annotated_value = cur_count if index == count - 1 else nodrop_marker
76 prev_count = cur_count
77 annotated_packets.append(annotated_value)
79 stats['packets'] = annotated_packets
81 class Formatter(object):
82 """Collection of string formatter methods."""
90 return '{:,}'.format(data)
94 return lambda data: '%.{}f'.format(decimal) % (data)
98 if isinstance(data, int):
99 return Formatter.int(data)
100 if isinstance(data, float):
101 return Formatter.float(4)(data)
102 return Formatter.fixed(data)
105 def suffix(suffix_str):
106 return lambda data: Formatter.standard(data) + suffix_str
110 # By default, `best_prefix` returns a value in byte format, this hack (multiply by 8.0)
111 # will convert it into bit format.
112 bit = 8.0 * bitmath.Bit(float(data))
113 bit = bit.best_prefix(bitmath.SI)
114 byte_to_bit_classes = {
124 bps = byte_to_bit_classes.get(bit.unit, bitmath.Bit).from_other(bit) / 8.0
125 if bps.unit != 'Bit':
126 return bps.format("{value:.4f} {unit}ps")
127 return bps.format("{value:.4f} bps")
130 def percentage(data):
135 return Formatter.suffix('%')(Formatter.float(4)(data))
139 """ASCII readable table class."""
141 def __init__(self, header):
142 header_row, self.formatters = list(zip(*header))
143 self.data = [header_row]
144 self.columns = len(header_row)
146 def add_row(self, row):
147 assert self.columns == len(row)
149 for entry, formatter in zip(row, self.formatters):
150 formatted_row.append(formatter(entry))
151 self.data.append(formatted_row)
153 def get_string(self, indent=0):
154 spaces = ' ' * indent
155 table = tabulate(self.data,
160 return table.replace('\n', '\n' + spaces)
163 class Summarizer(object):
164 """Generic summarizer class."""
170 self.marker_stack = [False]
173 def __indent(self, marker):
174 self.indent_size += self.indent_per_level
175 self.marker_stack.append(marker)
177 def __unindent(self):
178 assert self.indent_size >= self.indent_per_level
179 self.indent_size -= self.indent_per_level
180 self.marker_stack.pop()
182 def __get_indent_string(self):
183 current_str = ' ' * self.indent_size
184 if self.marker_stack[-1]:
185 current_str = current_str[:-2] + '> '
188 def _put(self, *args):
189 self.str += self.__get_indent_string()
190 if args and isinstance(args[-1], dict):
191 self.str += ' '.join(map(str, args[:-1])) + '\n'
192 self._put_dict(args[-1])
194 self.str += ' '.join(map(str, args)) + '\n'
196 def _put_dict(self, data):
197 with self._create_block(False):
198 for key, value in list(data.items()):
199 if isinstance(value, dict):
201 self._put_dict(value)
203 self._put(key + ':', value)
205 def _put_table(self, table):
206 self.str += self.__get_indent_string()
207 self.str += table.get_string(self.indent_size) + '\n'
213 def _create_block(self, marker=True):
214 self.__indent(marker)
219 class NFVBenchSummarizer(Summarizer):
220 """Summarize nfvbench json result."""
222 direction_keys = ['direction-forward', 'direction-reverse', 'direction-total']
223 direction_names = ['Forward', 'Reverse', 'Total']
225 def __init__(self, result, sender):
226 """Create a summarizer instance."""
227 Summarizer.__init__(self)
229 self.config = self.result['config']
230 self.record_header = None
231 self.record_data = None
234 self.ndr_pdr_header = [
235 ('-', Formatter.fixed),
236 ('L2 Frame Size', Formatter.standard),
237 ('Rate (fwd+rev)', Formatter.bits),
238 ('Rate (fwd+rev)', Formatter.suffix(' pps')),
239 ('Avg Drop Rate', Formatter.suffix('%')),
240 ('Avg Latency (usec)', Formatter.standard),
241 ('Min Latency (usec)', Formatter.standard),
242 ('Max Latency (usec)', Formatter.standard)
245 self.single_run_header = [
246 ('L2 Frame Size', Formatter.standard),
247 ('Drop Rate', Formatter.suffix('%')),
248 ('Avg Latency (usec)', Formatter.standard),
249 ('Min Latency (usec)', Formatter.standard),
250 ('Max Latency (usec)', Formatter.standard)
253 self.config_header = [
254 ('Direction', Formatter.standard),
255 ('Requested TX Rate (bps)', Formatter.bits),
256 ('Actual TX Rate (bps)', Formatter.bits),
257 ('RX Rate (bps)', Formatter.bits),
258 ('Requested TX Rate (pps)', Formatter.suffix(' pps')),
259 ('Actual TX Rate (pps)', Formatter.suffix(' pps')),
260 ('RX Rate (pps)', Formatter.suffix(' pps'))
263 # add percentiles headers if hdrh enabled
264 if not self.config.disable_hdrh:
265 for percentile in self.config.lat_percentiles:
266 # 'append' expects a single parameter => double parentheses
267 self.ndr_pdr_header.append((str(percentile) + ' %ile lat.', Formatter.standard))
268 self.single_run_header.append((str(percentile) + ' %ile lat.', Formatter.standard))
270 if self.config.periodic_gratuitous_arp:
271 self.direction_keys.insert(2, 'garp-direction-total')
272 self.direction_names.insert(2, 'Gratuitous ARP')
274 # if sender is available initialize record
279 def __get_openstack_spec(self, property):
281 return self.result['openstack_spec'][property]
285 def __summarize(self):
287 self._put('========== NFVBench Summary ==========')
288 self._put('Date:', self.result['date'])
289 self._put('NFVBench version', self.result['nfvbench_version'])
290 self._put('Openstack Neutron:', {
291 'vSwitch': self.__get_openstack_spec('vswitch'),
292 'Encapsulation': self.__get_openstack_spec('encaps')
294 self.__record_header_put('version', self.result['nfvbench_version'])
295 self.__record_header_put('vSwitch', self.__get_openstack_spec('vswitch'))
296 self.__record_header_put('Encapsulation', self.__get_openstack_spec('encaps'))
297 self._put('Benchmarks:')
298 with self._create_block():
299 self._put('Networks:')
300 with self._create_block():
301 network_benchmark = self.result['benchmarks']['network']
303 self._put('Components:')
304 with self._create_block():
305 self._put('Traffic Generator:')
306 with self._create_block(False):
307 self._put('Profile:', self.config['tg-name'])
308 self._put('Tool:', self.config['tg-tool'])
309 if network_benchmark['versions']:
310 self._put('Versions:')
311 with self._create_block():
312 for component, version in list(network_benchmark['versions'].items()):
313 self._put(component + ':', version)
315 if self.config['ndr_run'] or self.config['pdr_run']:
316 self._put('Measurement Parameters:')
317 with self._create_block(False):
318 if self.config['ndr_run']:
319 self._put('NDR:', self.config['measurement']['NDR'])
320 if self.config['pdr_run']:
321 self._put('PDR:', self.config['measurement']['PDR'])
322 self._put('Service chain:')
323 for result in list(network_benchmark['service_chain'].items()):
324 with self._create_block():
325 self.__chain_summarize(*result)
327 def __chain_summarize(self, chain_name, chain_benchmark):
328 self._put(chain_name + ':')
329 self.__record_header_put('service_chain', chain_name)
330 with self._create_block():
331 self._put('Traffic:')
332 with self._create_block(False):
333 self.__traffic_summarize(chain_benchmark['result'])
335 def __traffic_summarize(self, traffic_benchmark):
336 self._put('Profile:', traffic_benchmark['profile'])
337 self._put('Bidirectional:', traffic_benchmark['bidirectional'])
338 self._put('Flow count:', traffic_benchmark['flow_count'])
339 self._put('Service chains count:', traffic_benchmark['service_chain_count'])
340 self._put('Compute nodes:', list(traffic_benchmark['compute_nodes'].keys()))
342 self.__record_header_put('profile', traffic_benchmark['profile'])
343 self.__record_header_put('bidirectional', traffic_benchmark['bidirectional'])
344 self.__record_header_put('flow_count', traffic_benchmark['flow_count'])
345 self.__record_header_put('sc_count', traffic_benchmark['service_chain_count'])
346 self.__record_header_put('compute_nodes', list(traffic_benchmark['compute_nodes'].keys()))
347 with self._create_block(False):
349 if not self.config['no_traffic']:
350 self._put('Run Summary:')
352 with self._create_block(False):
353 self._put_table(self.__get_summary_table(traffic_benchmark['result']))
356 self._put(traffic_benchmark['result']['warning'])
360 for entry in list(traffic_benchmark['result'].items()):
361 if 'warning' in entry:
363 self.__chain_analysis_summarize(*entry)
366 def __chain_analysis_summarize(self, frame_size, analysis):
368 self._put('L2 frame size:', frame_size)
369 if self.config['ndr_run']:
370 self._put('NDR search duration:', Formatter.float(0)(analysis['ndr']['time_taken_sec']),
372 self.__record_data_put(frame_size, {'ndr_search_duration': Formatter.float(0)(
373 analysis['ndr']['time_taken_sec'])})
374 if self.config['pdr_run']:
375 self._put('PDR search duration:', Formatter.float(0)(analysis['pdr']['time_taken_sec']),
377 self.__record_data_put(frame_size, {'pdr_search_duration': Formatter.float(0)(
378 analysis['pdr']['time_taken_sec'])})
381 if not self.config['no_traffic'] and self.config['single_run']:
382 self._put('Run Config:')
384 with self._create_block(False):
385 self._put_table(self.__get_config_table(analysis['run_config'], frame_size))
386 if 'warning' in analysis['run_config'] and analysis['run_config']['warning']:
388 self._put(analysis['run_config']['warning'])
391 if 'packet_path_stats' in analysis:
392 for dir in ['Forward', 'Reverse']:
393 self._put(dir + ' Chain Packet Counters and Latency:')
395 with self._create_block(False):
396 self._put_table(self._get_chain_table(analysis['packet_path_stats'][dir]))
399 def __get_summary_table(self, traffic_result):
400 if self.config['single_run']:
401 summary_table = Table(self.single_run_header)
403 summary_table = Table(self.ndr_pdr_header)
405 if self.config['ndr_run']:
406 for frame_size, analysis in list(traffic_result.items()):
407 if frame_size == 'warning':
413 analysis['ndr']['rate_bps'],
414 analysis['ndr']['rate_pps'],
415 analysis['ndr']['stats']['overall']['drop_percentage'],
416 analysis['ndr']['stats']['overall']['avg_delay_usec'],
417 analysis['ndr']['stats']['overall']['min_delay_usec'],
418 analysis['ndr']['stats']['overall']['max_delay_usec']
420 if not self.config.disable_hdrh:
421 self.extract_hdrh_percentiles(
422 analysis['ndr']['stats']['overall']['lat_percentile'], row_data)
423 summary_table.add_row(row_data)
427 'rate_bps': analysis['ndr']['rate_bps'],
428 'rate_pps': analysis['ndr']['rate_pps'],
429 'offered_tx_rate_bps': analysis['ndr']['stats']['offered_tx_rate_bps'],
430 'theoretical_tx_rate_pps': analysis['ndr']['stats']['theoretical_tx_rate_pps'],
431 'theoretical_tx_rate_bps': analysis['ndr']['stats']['theoretical_tx_rate_bps'],
432 'drop_percentage': analysis['ndr']['stats']['overall']['drop_percentage'],
433 'avg_delay_usec': analysis['ndr']['stats']['overall']['avg_delay_usec'],
434 'min_delay_usec': analysis['ndr']['stats']['overall']['min_delay_usec'],
435 'max_delay_usec': analysis['ndr']['stats']['overall']['max_delay_usec']
437 if not self.config.disable_hdrh:
438 self.extract_hdrh_percentiles(
439 analysis['ndr']['stats']['overall']['lat_percentile'], ndr_data, True)
440 self.__record_data_put(frame_size, {'ndr': ndr_data})
441 if self.config['pdr_run']:
442 for frame_size, analysis in list(traffic_result.items()):
443 if frame_size == 'warning':
449 analysis['pdr']['rate_bps'],
450 analysis['pdr']['rate_pps'],
451 analysis['pdr']['stats']['overall']['drop_percentage'],
452 analysis['pdr']['stats']['overall']['avg_delay_usec'],
453 analysis['pdr']['stats']['overall']['min_delay_usec'],
454 analysis['pdr']['stats']['overall']['max_delay_usec']
456 if not self.config.disable_hdrh:
457 self.extract_hdrh_percentiles(
458 analysis['pdr']['stats']['overall']['lat_percentile'], row_data)
459 summary_table.add_row(row_data)
463 'rate_bps': analysis['pdr']['rate_bps'],
464 'rate_pps': analysis['pdr']['rate_pps'],
465 'offered_tx_rate_bps': analysis['pdr']['stats']['offered_tx_rate_bps'],
466 'theoretical_tx_rate_pps': analysis['pdr']['stats']['theoretical_tx_rate_pps'],
467 'theoretical_tx_rate_bps': analysis['pdr']['stats']['theoretical_tx_rate_bps'],
468 'drop_percentage': analysis['pdr']['stats']['overall']['drop_percentage'],
469 'avg_delay_usec': analysis['pdr']['stats']['overall']['avg_delay_usec'],
470 'min_delay_usec': analysis['pdr']['stats']['overall']['min_delay_usec'],
471 'max_delay_usec': analysis['pdr']['stats']['overall']['max_delay_usec']
473 if not self.config.disable_hdrh:
474 self.extract_hdrh_percentiles(
475 analysis['pdr']['stats']['overall']['lat_percentile'], pdr_data, True)
476 self.__record_data_put(frame_size, {'pdr': pdr_data})
477 if self.config['single_run']:
478 for frame_size, analysis in list(traffic_result.items()):
481 analysis['stats']['overall']['drop_rate_percent'],
482 analysis['stats']['overall']['rx']['avg_delay_usec'],
483 analysis['stats']['overall']['rx']['min_delay_usec'],
484 analysis['stats']['overall']['rx']['max_delay_usec']
486 if not self.config.disable_hdrh:
487 self.extract_hdrh_percentiles(
488 analysis['stats']['overall']['rx']['lat_percentile'], row_data)
489 summary_table.add_row(row_data)
492 'type': 'single_run',
493 'offered_tx_rate_bps': analysis['stats']['offered_tx_rate_bps'],
494 'theoretical_tx_rate_pps': analysis['stats']['theoretical_tx_rate_pps'],
495 'theoretical_tx_rate_bps': analysis['stats']['theoretical_tx_rate_bps'],
496 'drop_rate_percent': analysis['stats']['overall']['drop_rate_percent'],
497 'avg_delay_usec': analysis['stats']['overall']['rx']['avg_delay_usec'],
498 'min_delay_usec': analysis['stats']['overall']['rx']['min_delay_usec'],
499 'max_delay_usec': analysis['stats']['overall']['rx']['max_delay_usec']
501 if not self.config.disable_hdrh:
502 self.extract_hdrh_percentiles(
503 analysis['stats']['overall']['rx']['lat_percentile'], single_run_data, True)
504 self.__record_data_put(frame_size, {'single_run': single_run_data})
507 def extract_hdrh_percentiles(self, lat_percentile, data, add_key=False):
509 data['lat_percentile'] = {}
510 for percentile in self.config.lat_percentiles:
513 data['lat_percentile_' + str(percentile)] = lat_percentile[percentile]
515 data['lat_percentile_' + str(percentile)] = "n/a"
518 data.append(lat_percentile[percentile])
522 def __get_config_table(self, run_config, frame_size):
523 config_table = Table(self.config_header)
524 for key, name in zip(self.direction_keys, self.direction_names):
525 if key not in run_config:
527 config_table.add_row([
529 run_config[key]['orig']['rate_bps'],
530 run_config[key]['tx']['rate_bps'],
531 run_config[key]['rx']['rate_bps'],
532 int(run_config[key]['orig']['rate_pps']),
533 int(run_config[key]['tx']['rate_pps']),
534 int(run_config[key]['rx']['rate_pps']),
536 self.__record_data_put(frame_size, {
537 name.lower() + "_orig_rate_bps": int(run_config[key]['orig']['rate_bps']),
538 name.lower() + "_tx_rate_bps": int(run_config[key]['tx']['rate_bps']),
539 name.lower() + "_rx_rate_bps": int(run_config[key]['rx']['rate_bps']),
540 name.lower() + "_orig_rate_pps": int(run_config[key]['orig']['rate_pps']),
541 name.lower() + "_tx_rate_pps": int(run_config[key]['tx']['rate_pps']),
542 name.lower() + "_rx_rate_pps": int(run_config[key]['rx']['rate_pps']),
547 def _get_chain_table(self, chain_stats):
548 """Retrieve the table for a direction.
551 'interfaces': ['Port0', 'drop %'', 'vhost0', 'Port1'],
553 '0': {'packets': [2000054, '-0.023%', 1999996, 1999996],
562 chains = chain_stats['chains']
563 _annotate_chain_stats(chains)
564 header = [('Chain', Formatter.standard)] + \
565 [(ifname, Formatter.standard) for ifname in chain_stats['interfaces']]
566 # add latency columns if available Avg, Min, Max and percentiles
568 lat_map = {'lat_avg_usec': 'Avg lat.',
569 'lat_min_usec': 'Min lat.',
570 'lat_max_usec': 'Max lat.'}
571 if 'lat_avg_usec' in chains['0']:
572 lat_keys = ['lat_avg_usec', 'lat_min_usec', 'lat_max_usec']
574 if not self.config.disable_hdrh:
575 lat_keys.append('lat_percentile')
576 for percentile in self.config.lat_percentiles:
577 lat_map['lat_' + str(percentile) + '_percentile'] = \
578 str(percentile) + ' %ile lat.'
581 # 'append' expects a single parameter => double parentheses
582 header.append((lat_map[key], Formatter.standard))
584 table = Table(header)
585 for chain in sorted(list(chains.keys()), key=str):
586 row = [chain] + chains[chain]['packets']
587 for lat_key in lat_keys:
589 if lat_key != 'lat_percentile':
590 if chains[chain].get(lat_key, None):
591 row.append(Formatter.standard(chains[chain][lat_key]))
595 if not self.config.disable_hdrh:
596 if chains[chain].get(lat_key, None):
597 for percentile in chains[chain][lat_key]:
598 row.append(Formatter.standard(
599 chains[chain][lat_key][percentile]))
601 for _ in self.config.lat_percentiles:
606 def __record_header_put(self, key, value):
608 self.record_header[key] = value
610 def __record_data_put(self, key, data):
612 if key not in self.record_data:
613 self.record_data[key] = {}
614 self.record_data[key].update(data)
616 def __record_send(self):
618 self.record_header["@timestamp"] = datetime.utcnow().replace(
619 tzinfo=pytz.utc).strftime("%Y-%m-%dT%H:%M:%S.%f%z")
620 for frame_size in self.record_data:
621 data = self.record_header
622 data['frame_size'] = frame_size
623 data.update(self.record_data[frame_size])
624 run_specific_data = {}
625 if 'single_run' in data:
626 run_specific_data['single_run'] = data['single_run']
627 del data['single_run']
629 run_specific_data['ndr'] = data['ndr']
630 run_specific_data['ndr']['drop_limit'] = self.config['measurement']['NDR']
633 run_specific_data['pdr'] = data['pdr']
634 run_specific_data['pdr']['drop_limit'] = self.config['measurement']['PDR']
636 for key in run_specific_data:
637 data_to_send = data.copy()
638 data_to_send.update(run_specific_data[key])
639 self.sender.record_send(data_to_send)
642 def __record_init(self):
643 # init is called after checking for sender
644 self.record_header = {
645 "runlogdate": self.sender.runlogdate,
646 "user_label": self.config['user_label']
648 self.record_data = {}