2 # Copyright 2016 Cisco Systems, Inc. All rights reserved.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 from contextlib import contextmanager
18 from datetime import datetime
23 from tabulate import tabulate
25 def _annotate_chain_stats(chain_stats, nodrop_marker='=>'):
26 """Transform a plain chain stats into an annotated one.
30 0: {'packets': [2000054, 1999996, 1999996, 1999996],
39 0: {'packets': [2000054, -58 (-0.034%), '=>', 1999996],
47 In the case of shared net, some columns in packets array can have ''.
48 Some columns cab also be None which means the data is not available.
50 for stats in list(chain_stats.values()):
51 packets = stats['packets']
54 # keep the first counter
55 annotated_packets = [packets[0]]
56 # modify all remaining counters
57 prev_count = packets[0]
58 for index in range(1, count):
59 cur_count = packets[index]
61 # an empty string indicates an unknown counter for a shared interface
62 # do not annotate those
64 elif cur_count is None:
66 annotated_value = 'n/a'
68 drop = cur_count - prev_count
70 dr = (drop * 100.0) / prev_count if prev_count else 0
71 annotated_value = '{:+,} ({:+.4f}%)'.format(drop, dr)
74 # if last column we display the value
75 annotated_value = cur_count if index == count - 1 else nodrop_marker
76 prev_count = cur_count
77 annotated_packets.append(annotated_value)
79 stats['packets'] = annotated_packets
81 class Formatter(object):
82 """Collection of string formatter methods."""
90 return '{:,}'.format(data)
94 return lambda data: '%.{}f'.format(decimal) % (data)
98 if isinstance(data, int):
99 return Formatter.int(data)
100 if isinstance(data, float):
101 return Formatter.float(4)(data)
102 return Formatter.fixed(data)
105 def suffix(suffix_str):
106 return lambda data: Formatter.standard(data) + suffix_str
110 # By default, `best_prefix` returns a value in byte format, this hack (multiply by 8.0)
111 # will convert it into bit format.
112 bit = 8.0 * bitmath.Bit(float(data))
113 bit = bit.best_prefix(bitmath.SI)
114 byte_to_bit_classes = {
124 bps = byte_to_bit_classes.get(bit.unit, bitmath.Bit).from_other(bit) / 8.0
125 if bps.unit != 'Bit':
126 return bps.format("{value:.4f} {unit}ps")
127 return bps.format("{value:.4f} bps")
130 def percentage(data):
135 return Formatter.suffix('%')(Formatter.float(4)(data))
139 """ASCII readable table class."""
141 def __init__(self, header):
142 header_row, self.formatters = list(zip(*header))
143 self.data = [header_row]
144 self.columns = len(header_row)
146 def add_row(self, row):
147 assert self.columns == len(row)
149 for entry, formatter in zip(row, self.formatters):
150 formatted_row.append(formatter(entry))
151 self.data.append(formatted_row)
153 def get_string(self, indent=0):
154 spaces = ' ' * indent
155 table = tabulate(self.data,
160 return table.replace('\n', '\n' + spaces)
163 class Summarizer(object):
164 """Generic summarizer class."""
170 self.marker_stack = [False]
173 def __indent(self, marker):
174 self.indent_size += self.indent_per_level
175 self.marker_stack.append(marker)
177 def __unindent(self):
178 assert self.indent_size >= self.indent_per_level
179 self.indent_size -= self.indent_per_level
180 self.marker_stack.pop()
182 def __get_indent_string(self):
183 current_str = ' ' * self.indent_size
184 if self.marker_stack[-1]:
185 current_str = current_str[:-2] + '> '
188 def _put(self, *args):
189 self.str += self.__get_indent_string()
190 if args and isinstance(args[-1], dict):
191 self.str += ' '.join(map(str, args[:-1])) + '\n'
192 self._put_dict(args[-1])
194 self.str += ' '.join(map(str, args)) + '\n'
196 def _put_dict(self, data):
197 with self._create_block(False):
198 for key, value in list(data.items()):
199 if isinstance(value, dict):
201 self._put_dict(value)
203 self._put(key + ':', value)
205 def _put_table(self, table):
206 self.str += self.__get_indent_string()
207 self.str += table.get_string(self.indent_size) + '\n'
213 def _create_block(self, marker=True):
214 self.__indent(marker)
219 class NFVBenchSummarizer(Summarizer):
220 """Summarize nfvbench json result."""
223 ('-', Formatter.fixed),
224 ('L2 Frame Size', Formatter.standard),
225 ('Rate (fwd+rev)', Formatter.bits),
226 ('Rate (fwd+rev)', Formatter.suffix(' pps')),
227 ('Avg Drop Rate', Formatter.suffix('%')),
228 ('Avg Latency (usec)', Formatter.standard),
229 ('Min Latency (usec)', Formatter.standard),
230 ('Max Latency (usec)', Formatter.standard)
233 single_run_header = [
234 ('L2 Frame Size', Formatter.standard),
235 ('Drop Rate', Formatter.suffix('%')),
236 ('Avg Latency (usec)', Formatter.standard),
237 ('Min Latency (usec)', Formatter.standard),
238 ('Max Latency (usec)', Formatter.standard)
242 ('Direction', Formatter.standard),
243 ('Requested TX Rate (bps)', Formatter.bits),
244 ('Actual TX Rate (bps)', Formatter.bits),
245 ('RX Rate (bps)', Formatter.bits),
246 ('Requested TX Rate (pps)', Formatter.suffix(' pps')),
247 ('Actual TX Rate (pps)', Formatter.suffix(' pps')),
248 ('RX Rate (pps)', Formatter.suffix(' pps'))
251 direction_keys = ['direction-forward', 'direction-reverse', 'direction-total']
252 direction_names = ['Forward', 'Reverse', 'Total']
254 def __init__(self, result, sender):
255 """Create a summarizer instance."""
256 Summarizer.__init__(self)
258 self.config = self.result['config']
259 self.record_header = None
260 self.record_data = None
262 # if sender is available initialize record
267 def __get_openstack_spec(self, property):
269 return self.result['openstack_spec'][property]
273 def __summarize(self):
275 self._put('========== NFVBench Summary ==========')
276 self._put('Date:', self.result['date'])
277 self._put('NFVBench version', self.result['nfvbench_version'])
278 self._put('Openstack Neutron:', {
279 'vSwitch': self.__get_openstack_spec('vswitch'),
280 'Encapsulation': self.__get_openstack_spec('encaps')
282 self.__record_header_put('version', self.result['nfvbench_version'])
283 self.__record_header_put('vSwitch', self.__get_openstack_spec('vswitch'))
284 self.__record_header_put('Encapsulation', self.__get_openstack_spec('encaps'))
285 self._put('Benchmarks:')
286 with self._create_block():
287 self._put('Networks:')
288 with self._create_block():
289 network_benchmark = self.result['benchmarks']['network']
291 self._put('Components:')
292 with self._create_block():
293 self._put('Traffic Generator:')
294 with self._create_block(False):
295 self._put('Profile:', self.config['tg-name'])
296 self._put('Tool:', self.config['tg-tool'])
297 if network_benchmark['versions']:
298 self._put('Versions:')
299 with self._create_block():
300 for component, version in list(network_benchmark['versions'].items()):
301 self._put(component + ':', version)
303 if self.config['ndr_run'] or self.config['pdr_run']:
304 self._put('Measurement Parameters:')
305 with self._create_block(False):
306 if self.config['ndr_run']:
307 self._put('NDR:', self.config['measurement']['NDR'])
308 if self.config['pdr_run']:
309 self._put('PDR:', self.config['measurement']['PDR'])
310 self._put('Service chain:')
311 for result in list(network_benchmark['service_chain'].items()):
312 with self._create_block():
313 self.__chain_summarize(*result)
315 def __chain_summarize(self, chain_name, chain_benchmark):
316 self._put(chain_name + ':')
317 self.__record_header_put('service_chain', chain_name)
318 with self._create_block():
319 self._put('Traffic:')
320 with self._create_block(False):
321 self.__traffic_summarize(chain_benchmark['result'])
323 def __traffic_summarize(self, traffic_benchmark):
324 self._put('Profile:', traffic_benchmark['profile'])
325 self._put('Bidirectional:', traffic_benchmark['bidirectional'])
326 self._put('Flow count:', traffic_benchmark['flow_count'])
327 self._put('Service chains count:', traffic_benchmark['service_chain_count'])
328 self._put('Compute nodes:', list(traffic_benchmark['compute_nodes'].keys()))
330 self.__record_header_put('profile', traffic_benchmark['profile'])
331 self.__record_header_put('bidirectional', traffic_benchmark['bidirectional'])
332 self.__record_header_put('flow_count', traffic_benchmark['flow_count'])
333 self.__record_header_put('sc_count', traffic_benchmark['service_chain_count'])
334 self.__record_header_put('compute_nodes', list(traffic_benchmark['compute_nodes'].keys()))
335 with self._create_block(False):
337 if not self.config['no_traffic']:
338 self._put('Run Summary:')
340 with self._create_block(False):
341 self._put_table(self.__get_summary_table(traffic_benchmark['result']))
344 self._put(traffic_benchmark['result']['warning'])
348 for entry in list(traffic_benchmark['result'].items()):
349 if 'warning' in entry:
351 self.__chain_analysis_summarize(*entry)
354 def __chain_analysis_summarize(self, frame_size, analysis):
356 self._put('L2 frame size:', frame_size)
357 if self.config['ndr_run']:
358 self._put('NDR search duration:', Formatter.float(0)(analysis['ndr']['time_taken_sec']),
360 self.__record_data_put(frame_size, {'ndr_search_duration': Formatter.float(0)(
361 analysis['ndr']['time_taken_sec'])})
362 if self.config['pdr_run']:
363 self._put('PDR search duration:', Formatter.float(0)(analysis['pdr']['time_taken_sec']),
365 self.__record_data_put(frame_size, {'pdr_search_duration': Formatter.float(0)(
366 analysis['pdr']['time_taken_sec'])})
369 if not self.config['no_traffic'] and self.config['single_run']:
370 self._put('Run Config:')
372 with self._create_block(False):
373 self._put_table(self.__get_config_table(analysis['run_config'], frame_size))
374 if 'warning' in analysis['run_config'] and analysis['run_config']['warning']:
376 self._put(analysis['run_config']['warning'])
379 if 'packet_path_stats' in analysis:
380 for dir in ['Forward', 'Reverse']:
381 self._put(dir + ' Chain Packet Counters and Latency:')
383 with self._create_block(False):
384 self._put_table(self._get_chain_table(analysis['packet_path_stats'][dir]))
387 def __get_summary_table(self, traffic_result):
388 if self.config['single_run']:
389 summary_table = Table(self.single_run_header)
391 summary_table = Table(self.ndr_pdr_header)
393 if self.config['ndr_run']:
394 for frame_size, analysis in list(traffic_result.items()):
395 if frame_size == 'warning':
397 summary_table.add_row([
400 analysis['ndr']['rate_bps'],
401 analysis['ndr']['rate_pps'],
402 analysis['ndr']['stats']['overall']['drop_percentage'],
403 analysis['ndr']['stats']['overall']['avg_delay_usec'],
404 analysis['ndr']['stats']['overall']['min_delay_usec'],
405 analysis['ndr']['stats']['overall']['max_delay_usec']
407 self.__record_data_put(frame_size, {'ndr': {
409 'rate_bps': analysis['ndr']['rate_bps'],
410 'rate_pps': analysis['ndr']['rate_pps'],
411 'drop_percentage': analysis['ndr']['stats']['overall']['drop_percentage'],
412 'avg_delay_usec': analysis['ndr']['stats']['overall']['avg_delay_usec'],
413 'min_delay_usec': analysis['ndr']['stats']['overall']['min_delay_usec'],
414 'max_delay_usec': analysis['ndr']['stats']['overall']['max_delay_usec']
416 if self.config['pdr_run']:
417 for frame_size, analysis in list(traffic_result.items()):
418 if frame_size == 'warning':
420 summary_table.add_row([
423 analysis['pdr']['rate_bps'],
424 analysis['pdr']['rate_pps'],
425 analysis['pdr']['stats']['overall']['drop_percentage'],
426 analysis['pdr']['stats']['overall']['avg_delay_usec'],
427 analysis['pdr']['stats']['overall']['min_delay_usec'],
428 analysis['pdr']['stats']['overall']['max_delay_usec']
430 self.__record_data_put(frame_size, {'pdr': {
432 'rate_bps': analysis['pdr']['rate_bps'],
433 'rate_pps': analysis['pdr']['rate_pps'],
434 'drop_percentage': analysis['pdr']['stats']['overall']['drop_percentage'],
435 'avg_delay_usec': analysis['pdr']['stats']['overall']['avg_delay_usec'],
436 'min_delay_usec': analysis['pdr']['stats']['overall']['min_delay_usec'],
437 'max_delay_usec': analysis['pdr']['stats']['overall']['max_delay_usec']
439 if self.config['single_run']:
440 for frame_size, analysis in list(traffic_result.items()):
441 summary_table.add_row([
443 analysis['stats']['overall']['drop_rate_percent'],
444 analysis['stats']['overall']['rx']['avg_delay_usec'],
445 analysis['stats']['overall']['rx']['min_delay_usec'],
446 analysis['stats']['overall']['rx']['max_delay_usec']
448 self.__record_data_put(frame_size, {'single_run': {
449 'type': 'single_run',
450 'drop_rate_percent': analysis['stats']['overall']['drop_rate_percent'],
451 'avg_delay_usec': analysis['stats']['overall']['rx']['avg_delay_usec'],
452 'min_delay_usec': analysis['stats']['overall']['rx']['min_delay_usec'],
453 'max_delay_usec': analysis['stats']['overall']['rx']['max_delay_usec']
457 def __get_config_table(self, run_config, frame_size):
458 config_table = Table(self.config_header)
459 for key, name in zip(self.direction_keys, self.direction_names):
460 if key not in run_config:
462 config_table.add_row([
464 run_config[key]['orig']['rate_bps'],
465 run_config[key]['tx']['rate_bps'],
466 run_config[key]['rx']['rate_bps'],
467 int(run_config[key]['orig']['rate_pps']),
468 int(run_config[key]['tx']['rate_pps']),
469 int(run_config[key]['rx']['rate_pps']),
471 self.__record_data_put(frame_size, {
472 name.lower() + "_orig_rate_bps": int(run_config[key]['orig']['rate_bps']),
473 name.lower() + "_tx_rate_bps": int(run_config[key]['tx']['rate_bps']),
474 name.lower() + "_rx_rate_bps": int(run_config[key]['rx']['rate_bps']),
475 name.lower() + "_orig_rate_pps": int(run_config[key]['orig']['rate_pps']),
476 name.lower() + "_tx_rate_pps": int(run_config[key]['tx']['rate_pps']),
477 name.lower() + "_rx_rate_pps": int(run_config[key]['rx']['rate_pps']),
482 def _get_chain_table(self, chain_stats):
483 """Retrieve the table for a direction.
486 'interfaces': ['Port0', 'drop %'', 'vhost0', 'Port1'],
488 '0': {'packets': [2000054, '-0.023%', 1999996, 1999996],
497 chains = chain_stats['chains']
498 _annotate_chain_stats(chains)
499 header = [('Chain', Formatter.standard)] + \
500 [(ifname, Formatter.standard) for ifname in chain_stats['interfaces']]
501 # add latency columns if available Avg, Min, Max
503 lat_map = {'lat_avg_usec': 'Avg lat.',
504 'lat_min_usec': 'Min lat.',
505 'lat_max_usec': 'Max lat.'}
506 if 'lat_avg_usec' in chains['0']:
507 lat_keys = ['lat_avg_usec', 'lat_min_usec', 'lat_max_usec']
509 header.append((lat_map[key], Formatter.standard))
511 table = Table(header)
512 for chain in sorted(list(chains.keys()), key=str):
513 row = [chain] + chains[chain]['packets']
514 for lat_key in lat_keys:
515 row.append('{:,} usec'.format(chains[chain][lat_key]))
519 def __record_header_put(self, key, value):
521 self.record_header[key] = value
523 def __record_data_put(self, key, data):
525 if key not in self.record_data:
526 self.record_data[key] = {}
527 self.record_data[key].update(data)
529 def __record_send(self):
531 self.record_header["@timestamp"] = datetime.utcnow().replace(
532 tzinfo=pytz.utc).strftime("%Y-%m-%dT%H:%M:%S.%f%z")
533 for frame_size in self.record_data:
534 data = self.record_header
535 data['frame_size'] = frame_size
536 data.update(self.record_data[frame_size])
537 run_specific_data = {}
538 if 'single_run' in data:
539 run_specific_data['single_run'] = data['single_run']
540 del data['single_run']
542 run_specific_data['ndr'] = data['ndr']
543 run_specific_data['ndr']['drop_limit'] = self.config['measurement']['NDR']
546 run_specific_data['pdr'] = data['pdr']
547 run_specific_data['pdr']['drop_limit'] = self.config['measurement']['PDR']
549 for key in run_specific_data:
550 data_to_send = data.copy()
551 data_to_send.update(run_specific_data[key])
552 self.sender.record_send(data_to_send)
555 def __record_init(self):
556 # init is called after checking for sender
557 self.record_header = {
558 "runlogdate": self.sender.runlogdate,
559 "user_label": self.config['user_label']
561 self.record_data = {}