2 # Copyright 2016 Cisco Systems, Inc. All rights reserved.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 from contextlib import contextmanager
18 from datetime import datetime
23 from tabulate import tabulate
25 def _annotate_chain_stats(chain_stats, nodrop_marker='=>'):
26 """Transform a plain chain stats into an annotated one.
30 0: {'packets': [2000054, 1999996, 1999996, 1999996],
39 0: {'packets': [2000054, -58 (-0.034%), '=>', 1999996],
47 In the case of shared net, some columns in packets array can have ''
49 for stats in chain_stats.values():
50 packets = stats['packets']
53 # keep the first counter
54 annotated_packets = [packets[0]]
55 # modify all remaining counters
56 prev_count = packets[0]
57 for index in range(1, count):
58 cur_count = packets[index]
60 # an empty string indicates an unknown counter for a shared interface
61 # do not annotate those
64 drop = cur_count - prev_count
66 dr = (drop * 100.0) / prev_count if prev_count else 0
67 annotated_value = '{:+,} ({:+.4f}%)'.format(drop, dr)
70 # if last column we display the value
71 annotated_value = cur_count if index == count - 1 else nodrop_marker
72 prev_count = cur_count
73 annotated_packets.append(annotated_value)
75 stats['packets'] = annotated_packets
77 class Formatter(object):
78 """Collection of string formatter methods."""
86 return '{:,}'.format(data)
90 return lambda data: '%.{}f'.format(decimal) % (data)
94 if isinstance(data, int):
95 return Formatter.int(data)
96 elif isinstance(data, float):
97 return Formatter.float(4)(data)
98 return Formatter.fixed(data)
101 def suffix(suffix_str):
102 return lambda data: Formatter.standard(data) + suffix_str
106 # By default, `best_prefix` returns a value in byte format, this hack (multiply by 8.0)
107 # will convert it into bit format.
108 bit = 8.0 * bitmath.Bit(float(data))
109 bit = bit.best_prefix(bitmath.SI)
110 byte_to_bit_classes = {
120 bps = byte_to_bit_classes.get(bit.unit, bitmath.Bit).from_other(bit) / 8.0
121 if bps.unit != 'Bit':
122 return bps.format("{value:.4f} {unit}ps")
123 return bps.format("{value:.4f} bps")
126 def percentage(data):
129 elif math.isnan(data):
131 return Formatter.suffix('%')(Formatter.float(4)(data))
135 """ASCII readable table class."""
137 def __init__(self, header):
138 header_row, self.formatters = zip(*header)
139 self.data = [header_row]
140 self.columns = len(header_row)
142 def add_row(self, row):
143 assert self.columns == len(row)
145 for entry, formatter in zip(row, self.formatters):
146 formatted_row.append(formatter(entry))
147 self.data.append(formatted_row)
149 def get_string(self, indent=0):
150 spaces = ' ' * indent
151 table = tabulate(self.data,
156 return table.replace('\n', '\n' + spaces)
159 class Summarizer(object):
160 """Generic summarizer class."""
166 self.marker_stack = [False]
169 def __indent(self, marker):
170 self.indent_size += self.indent_per_level
171 self.marker_stack.append(marker)
173 def __unindent(self):
174 assert self.indent_size >= self.indent_per_level
175 self.indent_size -= self.indent_per_level
176 self.marker_stack.pop()
178 def __get_indent_string(self):
179 current_str = ' ' * self.indent_size
180 if self.marker_stack[-1]:
181 current_str = current_str[:-2] + '> '
184 def _put(self, *args):
185 self.str += self.__get_indent_string()
186 if args and isinstance(args[-1], dict):
187 self.str += ' '.join(map(str, args[:-1])) + '\n'
188 self._put_dict(args[-1])
190 self.str += ' '.join(map(str, args)) + '\n'
192 def _put_dict(self, data):
193 with self._create_block(False):
194 for key, value in data.iteritems():
195 if isinstance(value, dict):
197 self._put_dict(value)
199 self._put(key + ':', value)
201 def _put_table(self, table):
202 self.str += self.__get_indent_string()
203 self.str += table.get_string(self.indent_size) + '\n'
209 def _create_block(self, marker=True):
210 self.__indent(marker)
215 class NFVBenchSummarizer(Summarizer):
216 """Summarize nfvbench json result."""
219 ('-', Formatter.fixed),
220 ('L2 Frame Size', Formatter.standard),
221 ('Rate (fwd+rev)', Formatter.bits),
222 ('Rate (fwd+rev)', Formatter.suffix(' pps')),
223 ('Avg Drop Rate', Formatter.suffix('%')),
224 ('Avg Latency (usec)', Formatter.standard),
225 ('Min Latency (usec)', Formatter.standard),
226 ('Max Latency (usec)', Formatter.standard)
229 single_run_header = [
230 ('L2 Frame Size', Formatter.standard),
231 ('Drop Rate', Formatter.suffix('%')),
232 ('Avg Latency (usec)', Formatter.standard),
233 ('Min Latency (usec)', Formatter.standard),
234 ('Max Latency (usec)', Formatter.standard)
238 ('Direction', Formatter.standard),
239 ('Requested TX Rate (bps)', Formatter.bits),
240 ('Actual TX Rate (bps)', Formatter.bits),
241 ('RX Rate (bps)', Formatter.bits),
242 ('Requested TX Rate (pps)', Formatter.suffix(' pps')),
243 ('Actual TX Rate (pps)', Formatter.suffix(' pps')),
244 ('RX Rate (pps)', Formatter.suffix(' pps'))
247 direction_keys = ['direction-forward', 'direction-reverse', 'direction-total']
248 direction_names = ['Forward', 'Reverse', 'Total']
250 def __init__(self, result, sender):
251 """Create a summarizer instance."""
252 Summarizer.__init__(self)
254 self.config = self.result['config']
255 self.record_header = None
256 self.record_data = None
258 # if sender is available initialize record
263 def __get_openstack_spec(self, property):
265 return self.result['openstack_spec'][property]
269 def __summarize(self):
271 self._put('========== NFVBench Summary ==========')
272 self._put('Date:', self.result['date'])
273 self._put('NFVBench version', self.result['nfvbench_version'])
274 self._put('Openstack Neutron:', {
275 'vSwitch': self.__get_openstack_spec('vswitch'),
276 'Encapsulation': self.__get_openstack_spec('encaps')
278 self.__record_header_put('version', self.result['nfvbench_version'])
279 self.__record_header_put('vSwitch', self.__get_openstack_spec('vswitch'))
280 self.__record_header_put('Encapsulation', self.__get_openstack_spec('encaps'))
281 self._put('Benchmarks:')
282 with self._create_block():
283 self._put('Networks:')
284 with self._create_block():
285 network_benchmark = self.result['benchmarks']['network']
287 self._put('Components:')
288 with self._create_block():
289 self._put('Traffic Generator:')
290 with self._create_block(False):
291 self._put('Profile:', self.config['tg-name'])
292 self._put('Tool:', self.config['tg-tool'])
293 if network_benchmark['versions']:
294 self._put('Versions:')
295 with self._create_block():
296 for component, version in network_benchmark['versions'].iteritems():
297 self._put(component + ':', version)
299 if self.config['ndr_run'] or self.config['pdr_run']:
300 self._put('Measurement Parameters:')
301 with self._create_block(False):
302 if self.config['ndr_run']:
303 self._put('NDR:', self.config['measurement']['NDR'])
304 if self.config['pdr_run']:
305 self._put('PDR:', self.config['measurement']['PDR'])
306 self._put('Service chain:')
307 for result in network_benchmark['service_chain'].iteritems():
308 with self._create_block():
309 self.__chain_summarize(*result)
311 def __chain_summarize(self, chain_name, chain_benchmark):
312 self._put(chain_name + ':')
313 self.__record_header_put('service_chain', chain_name)
314 with self._create_block():
315 self._put('Traffic:')
316 with self._create_block(False):
317 self.__traffic_summarize(chain_benchmark['result'])
319 def __traffic_summarize(self, traffic_benchmark):
320 self._put('Profile:', traffic_benchmark['profile'])
321 self._put('Bidirectional:', traffic_benchmark['bidirectional'])
322 self._put('Flow count:', traffic_benchmark['flow_count'])
323 self._put('Service chains count:', traffic_benchmark['service_chain_count'])
324 self._put('Compute nodes:', traffic_benchmark['compute_nodes'].keys())
326 self.__record_header_put('profile', traffic_benchmark['profile'])
327 self.__record_header_put('bidirectional', traffic_benchmark['bidirectional'])
328 self.__record_header_put('flow_count', traffic_benchmark['flow_count'])
329 self.__record_header_put('sc_count', traffic_benchmark['service_chain_count'])
330 self.__record_header_put('compute_nodes', traffic_benchmark['compute_nodes'].keys())
331 with self._create_block(False):
333 if not self.config['no_traffic']:
334 self._put('Run Summary:')
336 with self._create_block(False):
337 self._put_table(self.__get_summary_table(traffic_benchmark['result']))
340 self._put(traffic_benchmark['result']['warning'])
344 for entry in traffic_benchmark['result'].iteritems():
345 if 'warning' in entry:
347 self.__chain_analysis_summarize(*entry)
350 def __chain_analysis_summarize(self, frame_size, analysis):
352 self._put('L2 frame size:', frame_size)
353 if 'actual_l2frame_size' in analysis:
354 self._put('Actual l2 frame size:', analysis['actual_l2frame_size'])
355 elif self.config['ndr_run'] and 'actual_l2frame_size' in analysis['ndr']:
356 self._put('Actual l2 frame size:', analysis['ndr']['actual_l2frame_size'])
357 elif self.config['pdr_run'] and 'actual_l2frame_size' in analysis['pdr']:
358 self._put('Actual l2 frame size:', analysis['pdr']['actual_l2frame_size'])
359 if self.config['ndr_run']:
360 self._put('NDR search duration:', Formatter.float(0)(analysis['ndr']['time_taken_sec']),
362 self.__record_data_put(frame_size, {'ndr_search_duration': Formatter.float(0)(
363 analysis['ndr']['time_taken_sec'])})
364 if self.config['pdr_run']:
365 self._put('PDR search duration:', Formatter.float(0)(analysis['pdr']['time_taken_sec']),
367 self.__record_data_put(frame_size, {'pdr_search_duration': Formatter.float(0)(
368 analysis['pdr']['time_taken_sec'])})
371 if not self.config['no_traffic'] and self.config['single_run']:
372 self._put('Run Config:')
374 with self._create_block(False):
375 self._put_table(self.__get_config_table(analysis['run_config'], frame_size))
376 if 'warning' in analysis['run_config'] and analysis['run_config']['warning']:
378 self._put(analysis['run_config']['warning'])
381 if 'packet_path_stats' in analysis:
382 for dir in ['Forward', 'Reverse']:
383 self._put(dir + ' Chain Packet Counters and Latency:')
385 with self._create_block(False):
386 self._put_table(self._get_chain_table(analysis['packet_path_stats'][dir]))
389 def __get_summary_table(self, traffic_result):
390 if self.config['single_run']:
391 summary_table = Table(self.single_run_header)
393 summary_table = Table(self.ndr_pdr_header)
395 if self.config['ndr_run']:
396 for frame_size, analysis in traffic_result.iteritems():
397 if frame_size == 'warning':
399 summary_table.add_row([
402 analysis['ndr']['rate_bps'],
403 analysis['ndr']['rate_pps'],
404 analysis['ndr']['stats']['overall']['drop_percentage'],
405 analysis['ndr']['stats']['overall']['avg_delay_usec'],
406 analysis['ndr']['stats']['overall']['min_delay_usec'],
407 analysis['ndr']['stats']['overall']['max_delay_usec']
409 self.__record_data_put(frame_size, {'ndr': {
411 'rate_bps': analysis['ndr']['rate_bps'],
412 'rate_pps': analysis['ndr']['rate_pps'],
413 'drop_percentage': analysis['ndr']['stats']['overall']['drop_percentage'],
414 'avg_delay_usec': analysis['ndr']['stats']['overall']['avg_delay_usec'],
415 'min_delay_usec': analysis['ndr']['stats']['overall']['min_delay_usec'],
416 'max_delay_usec': analysis['ndr']['stats']['overall']['max_delay_usec']
418 if self.config['pdr_run']:
419 for frame_size, analysis in traffic_result.iteritems():
420 if frame_size == 'warning':
422 summary_table.add_row([
425 analysis['pdr']['rate_bps'],
426 analysis['pdr']['rate_pps'],
427 analysis['pdr']['stats']['overall']['drop_percentage'],
428 analysis['pdr']['stats']['overall']['avg_delay_usec'],
429 analysis['pdr']['stats']['overall']['min_delay_usec'],
430 analysis['pdr']['stats']['overall']['max_delay_usec']
432 self.__record_data_put(frame_size, {'pdr': {
434 'rate_bps': analysis['pdr']['rate_bps'],
435 'rate_pps': analysis['pdr']['rate_pps'],
436 'drop_percentage': analysis['pdr']['stats']['overall']['drop_percentage'],
437 'avg_delay_usec': analysis['pdr']['stats']['overall']['avg_delay_usec'],
438 'min_delay_usec': analysis['pdr']['stats']['overall']['min_delay_usec'],
439 'max_delay_usec': analysis['pdr']['stats']['overall']['max_delay_usec']
441 if self.config['single_run']:
442 for frame_size, analysis in traffic_result.iteritems():
443 summary_table.add_row([
445 analysis['stats']['overall']['drop_rate_percent'],
446 analysis['stats']['overall']['rx']['avg_delay_usec'],
447 analysis['stats']['overall']['rx']['min_delay_usec'],
448 analysis['stats']['overall']['rx']['max_delay_usec']
450 self.__record_data_put(frame_size, {'single_run': {
451 'type': 'single_run',
452 'drop_rate_percent': analysis['stats']['overall']['drop_rate_percent'],
453 'avg_delay_usec': analysis['stats']['overall']['rx']['avg_delay_usec'],
454 'min_delay_usec': analysis['stats']['overall']['rx']['min_delay_usec'],
455 'max_delay_usec': analysis['stats']['overall']['rx']['max_delay_usec']
459 def __get_config_table(self, run_config, frame_size):
460 config_table = Table(self.config_header)
461 for key, name in zip(self.direction_keys, self.direction_names):
462 if key not in run_config:
464 config_table.add_row([
466 run_config[key]['orig']['rate_bps'],
467 run_config[key]['tx']['rate_bps'],
468 run_config[key]['rx']['rate_bps'],
469 int(run_config[key]['orig']['rate_pps']),
470 int(run_config[key]['tx']['rate_pps']),
471 int(run_config[key]['rx']['rate_pps']),
473 self.__record_data_put(frame_size, {
474 name.lower() + "_orig_rate_bps": int(run_config[key]['orig']['rate_bps']),
475 name.lower() + "_tx_rate_bps": int(run_config[key]['tx']['rate_bps']),
476 name.lower() + "_rx_rate_bps": int(run_config[key]['rx']['rate_bps']),
477 name.lower() + "_orig_rate_pps": int(run_config[key]['orig']['rate_pps']),
478 name.lower() + "_tx_rate_pps": int(run_config[key]['tx']['rate_pps']),
479 name.lower() + "_rx_rate_pps": int(run_config[key]['rx']['rate_pps']),
484 def _get_chain_table(self, chain_stats):
485 """Retrieve the table for a direction.
488 'interfaces': ['Port0', 'drop %'', 'vhost0', 'Port1'],
490 0: {'packets': [2000054, '-0.023%', 1999996, 1999996],
499 chains = chain_stats['chains']
500 _annotate_chain_stats(chains)
501 header = [('Chain', Formatter.standard)] + \
502 [(ifname, Formatter.standard) for ifname in chain_stats['interfaces']]
503 # add latency columns if available Avg, Min, Max
505 lat_map = {'lat_avg_usec': 'Avg lat.',
506 'lat_min_usec': 'Min lat.',
507 'lat_max_usec': 'Max lat.'}
508 if 'lat_avg_usec' in chains[0]:
509 lat_keys = ['lat_avg_usec', 'lat_min_usec', 'lat_max_usec']
511 header.append((lat_map[key], Formatter.standard))
513 table = Table(header)
514 for chain in sorted(chains.keys()):
515 row = [chain] + chains[chain]['packets']
516 for lat_key in lat_keys:
517 row.append('{:,} usec'.format(chains[chain][lat_key]))
521 def __record_header_put(self, key, value):
523 self.record_header[key] = value
525 def __record_data_put(self, key, data):
527 if key not in self.record_data:
528 self.record_data[key] = {}
529 self.record_data[key].update(data)
531 def __record_send(self):
533 self.record_header["@timestamp"] = datetime.utcnow().replace(
534 tzinfo=pytz.utc).strftime("%Y-%m-%dT%H:%M:%S.%f%z")
535 for frame_size in self.record_data:
536 data = self.record_header
537 data['frame_size'] = frame_size
538 data.update(self.record_data[frame_size])
539 run_specific_data = {}
540 if 'single_run' in data:
541 run_specific_data['single_run'] = data['single_run']
542 del data['single_run']
544 run_specific_data['ndr'] = data['ndr']
545 run_specific_data['ndr']['drop_limit'] = self.config['measurement']['NDR']
548 run_specific_data['pdr'] = data['pdr']
549 run_specific_data['pdr']['drop_limit'] = self.config['measurement']['PDR']
551 for key in run_specific_data:
552 data_to_send = data.copy()
553 data_to_send.update(run_specific_data[key])
554 self.sender.record_send(data_to_send)
557 def __record_init(self):
558 # init is called after checking for sender
559 self.record_header = {
560 "runlogdate": self.sender.runlogdate,
561 "user_label": self.config['user_label']
563 self.record_data = {}