2 # Copyright 2016 Cisco Systems, Inc. All rights reserved.
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
17 from contextlib import contextmanager
18 from datetime import datetime
23 from tabulate import tabulate
25 def _annotate_chain_stats(chain_stats, nodrop_marker='=>'):
26 """Transform a plain chain stats into an annotated one.
30 0: {'packets': [2000054, 1999996, 1999996, 1999996],
39 0: {'packets': [2000054, -58 (-0.034%), '=>', 1999996],
47 In the case of shared net, some columns in packets array can have ''
49 for stats in chain_stats.values():
50 packets = stats['packets']
53 # keep the first counter
54 annotated_packets = [packets[0]]
55 # modify all remaining counters
56 prev_count = packets[0]
57 for index in range(1, count):
58 cur_count = packets[index]
60 # an empty string indicates an unknown counter for a shared interface
61 # do not annotate those
64 drop = cur_count - prev_count
66 dr = (drop * 100.0) / prev_count if prev_count else 0
67 annotated_value = '{:+,} ({:+.4f}%)'.format(drop, dr)
70 # if last column we display the value
71 annotated_value = cur_count if index == count - 1 else nodrop_marker
72 prev_count = cur_count
73 annotated_packets.append(annotated_value)
75 stats['packets'] = annotated_packets
77 class Formatter(object):
78 """Collection of string formatter methods."""
86 return '{:,}'.format(data)
90 return lambda data: '%.{}f'.format(decimal) % (data)
94 if isinstance(data, int):
95 return Formatter.int(data)
96 elif isinstance(data, float):
97 return Formatter.float(4)(data)
98 return Formatter.fixed(data)
101 def suffix(suffix_str):
102 return lambda data: Formatter.standard(data) + suffix_str
106 # By default, `best_prefix` returns a value in byte format, this hack (multiply by 8.0)
107 # will convert it into bit format.
108 bit = 8.0 * bitmath.Bit(float(data))
109 bit = bit.best_prefix(bitmath.SI)
110 byte_to_bit_classes = {
120 bps = byte_to_bit_classes.get(bit.unit, bitmath.Bit).from_other(bit) / 8.0
121 if bps.unit != 'Bit':
122 return bps.format("{value:.4f} {unit}ps")
123 return bps.format("{value:.4f} bps")
126 def percentage(data):
129 elif math.isnan(data):
131 return Formatter.suffix('%')(Formatter.float(4)(data))
135 """ASCII readable table class."""
137 def __init__(self, header):
138 header_row, self.formatters = zip(*header)
139 self.data = [header_row]
140 self.columns = len(header_row)
142 def add_row(self, row):
143 assert self.columns == len(row)
145 for entry, formatter in zip(row, self.formatters):
146 formatted_row.append(formatter(entry))
147 self.data.append(formatted_row)
149 def get_string(self, indent=0):
150 spaces = ' ' * indent
151 table = tabulate(self.data,
156 return table.replace('\n', '\n' + spaces)
159 class Summarizer(object):
160 """Generic summarizer class."""
166 self.marker_stack = [False]
169 def __indent(self, marker):
170 self.indent_size += self.indent_per_level
171 self.marker_stack.append(marker)
173 def __unindent(self):
174 assert self.indent_size >= self.indent_per_level
175 self.indent_size -= self.indent_per_level
176 self.marker_stack.pop()
178 def __get_indent_string(self):
179 current_str = ' ' * self.indent_size
180 if self.marker_stack[-1]:
181 current_str = current_str[:-2] + '> '
184 def _put(self, *args):
185 self.str += self.__get_indent_string()
186 if args and isinstance(args[-1], dict):
187 self.str += ' '.join(map(str, args[:-1])) + '\n'
188 self._put_dict(args[-1])
190 self.str += ' '.join(map(str, args)) + '\n'
192 def _put_dict(self, data):
193 with self._create_block(False):
194 for key, value in data.iteritems():
195 if isinstance(value, dict):
197 self._put_dict(value)
199 self._put(key + ':', value)
201 def _put_table(self, table):
202 self.str += self.__get_indent_string()
203 self.str += table.get_string(self.indent_size) + '\n'
209 def _create_block(self, marker=True):
210 self.__indent(marker)
215 class NFVBenchSummarizer(Summarizer):
216 """Summarize nfvbench json result."""
219 ('-', Formatter.fixed),
220 ('L2 Frame Size', Formatter.standard),
221 ('Rate (fwd+rev)', Formatter.bits),
222 ('Rate (fwd+rev)', Formatter.suffix(' pps')),
223 ('Avg Drop Rate', Formatter.suffix('%')),
224 ('Avg Latency (usec)', Formatter.standard),
225 ('Min Latency (usec)', Formatter.standard),
226 ('Max Latency (usec)', Formatter.standard)
229 single_run_header = [
230 ('L2 Frame Size', Formatter.standard),
231 ('Drop Rate', Formatter.suffix('%')),
232 ('Avg Latency (usec)', Formatter.standard),
233 ('Min Latency (usec)', Formatter.standard),
234 ('Max Latency (usec)', Formatter.standard)
238 ('Direction', Formatter.standard),
239 ('Requested TX Rate (bps)', Formatter.bits),
240 ('Actual TX Rate (bps)', Formatter.bits),
241 ('RX Rate (bps)', Formatter.bits),
242 ('Requested TX Rate (pps)', Formatter.suffix(' pps')),
243 ('Actual TX Rate (pps)', Formatter.suffix(' pps')),
244 ('RX Rate (pps)', Formatter.suffix(' pps'))
247 direction_keys = ['direction-forward', 'direction-reverse', 'direction-total']
248 direction_names = ['Forward', 'Reverse', 'Total']
250 def __init__(self, result, sender):
251 """Create a summarizer instance."""
252 Summarizer.__init__(self)
254 self.config = self.result['config']
255 self.record_header = None
256 self.record_data = None
258 # if sender is available initialize record
263 def __get_openstack_spec(self, property):
265 return self.result['openstack_spec'][property]
269 def __summarize(self):
271 self._put('========== NFVBench Summary ==========')
272 self._put('Date:', self.result['date'])
273 self._put('NFVBench version', self.result['nfvbench_version'])
274 self._put('Openstack Neutron:', {
275 'vSwitch': self.__get_openstack_spec('vswitch'),
276 'Encapsulation': self.__get_openstack_spec('encaps')
278 self.__record_header_put('version', self.result['nfvbench_version'])
279 self.__record_header_put('vSwitch', self.__get_openstack_spec('vswitch'))
280 self.__record_header_put('Encapsulation', self.__get_openstack_spec('encaps'))
281 self._put('Benchmarks:')
282 with self._create_block():
283 self._put('Networks:')
284 with self._create_block():
285 network_benchmark = self.result['benchmarks']['network']
287 self._put('Components:')
288 with self._create_block():
289 self._put('Traffic Generator:')
290 with self._create_block(False):
291 self._put('Profile:', self.config['tg-name'])
292 self._put('Tool:', self.config['tg-tool'])
293 if network_benchmark['versions']:
294 self._put('Versions:')
295 with self._create_block():
296 for component, version in network_benchmark['versions'].iteritems():
297 self._put(component + ':', version)
299 if self.config['ndr_run'] or self.config['pdr_run']:
300 self._put('Measurement Parameters:')
301 with self._create_block(False):
302 if self.config['ndr_run']:
303 self._put('NDR:', self.config['measurement']['NDR'])
304 if self.config['pdr_run']:
305 self._put('PDR:', self.config['measurement']['PDR'])
306 self._put('Service chain:')
307 for result in network_benchmark['service_chain'].iteritems():
308 with self._create_block():
309 self.__chain_summarize(*result)
311 def __chain_summarize(self, chain_name, chain_benchmark):
312 self._put(chain_name + ':')
313 self.__record_header_put('service_chain', chain_name)
314 with self._create_block():
315 self._put('Traffic:')
316 with self._create_block(False):
317 self.__traffic_summarize(chain_benchmark['result'])
319 def __traffic_summarize(self, traffic_benchmark):
320 self._put('Profile:', traffic_benchmark['profile'])
321 self._put('Bidirectional:', traffic_benchmark['bidirectional'])
322 self._put('Flow count:', traffic_benchmark['flow_count'])
323 self._put('Service chains count:', traffic_benchmark['service_chain_count'])
324 self._put('Compute nodes:', traffic_benchmark['compute_nodes'].keys())
326 self.__record_header_put('profile', traffic_benchmark['profile'])
327 self.__record_header_put('bidirectional', traffic_benchmark['bidirectional'])
328 self.__record_header_put('flow_count', traffic_benchmark['flow_count'])
329 self.__record_header_put('sc_count', traffic_benchmark['service_chain_count'])
330 self.__record_header_put('compute_nodes', traffic_benchmark['compute_nodes'].keys())
331 with self._create_block(False):
333 if not self.config['no_traffic']:
334 self._put('Run Summary:')
336 with self._create_block(False):
337 self._put_table(self.__get_summary_table(traffic_benchmark['result']))
340 self._put(traffic_benchmark['result']['warning'])
344 for entry in traffic_benchmark['result'].iteritems():
345 if 'warning' in entry:
347 self.__chain_analysis_summarize(*entry)
350 def __chain_analysis_summarize(self, frame_size, analysis):
352 self._put('L2 frame size:', frame_size)
353 if self.config['ndr_run']:
354 self._put('NDR search duration:', Formatter.float(0)(analysis['ndr']['time_taken_sec']),
356 self.__record_data_put(frame_size, {'ndr_search_duration': Formatter.float(0)(
357 analysis['ndr']['time_taken_sec'])})
358 if self.config['pdr_run']:
359 self._put('PDR search duration:', Formatter.float(0)(analysis['pdr']['time_taken_sec']),
361 self.__record_data_put(frame_size, {'pdr_search_duration': Formatter.float(0)(
362 analysis['pdr']['time_taken_sec'])})
365 if not self.config['no_traffic'] and self.config['single_run']:
366 self._put('Run Config:')
368 with self._create_block(False):
369 self._put_table(self.__get_config_table(analysis['run_config'], frame_size))
370 if 'warning' in analysis['run_config'] and analysis['run_config']['warning']:
372 self._put(analysis['run_config']['warning'])
375 if 'packet_path_stats' in analysis:
376 for dir in ['Forward', 'Reverse']:
377 self._put(dir + ' Chain Packet Counters and Latency:')
379 with self._create_block(False):
380 self._put_table(self._get_chain_table(analysis['packet_path_stats'][dir]))
383 def __get_summary_table(self, traffic_result):
384 if self.config['single_run']:
385 summary_table = Table(self.single_run_header)
387 summary_table = Table(self.ndr_pdr_header)
389 if self.config['ndr_run']:
390 for frame_size, analysis in traffic_result.iteritems():
391 if frame_size == 'warning':
393 summary_table.add_row([
396 analysis['ndr']['rate_bps'],
397 analysis['ndr']['rate_pps'],
398 analysis['ndr']['stats']['overall']['drop_percentage'],
399 analysis['ndr']['stats']['overall']['avg_delay_usec'],
400 analysis['ndr']['stats']['overall']['min_delay_usec'],
401 analysis['ndr']['stats']['overall']['max_delay_usec']
403 self.__record_data_put(frame_size, {'ndr': {
405 'rate_bps': analysis['ndr']['rate_bps'],
406 'rate_pps': analysis['ndr']['rate_pps'],
407 'drop_percentage': analysis['ndr']['stats']['overall']['drop_percentage'],
408 'avg_delay_usec': analysis['ndr']['stats']['overall']['avg_delay_usec'],
409 'min_delay_usec': analysis['ndr']['stats']['overall']['min_delay_usec'],
410 'max_delay_usec': analysis['ndr']['stats']['overall']['max_delay_usec']
412 if self.config['pdr_run']:
413 for frame_size, analysis in traffic_result.iteritems():
414 if frame_size == 'warning':
416 summary_table.add_row([
419 analysis['pdr']['rate_bps'],
420 analysis['pdr']['rate_pps'],
421 analysis['pdr']['stats']['overall']['drop_percentage'],
422 analysis['pdr']['stats']['overall']['avg_delay_usec'],
423 analysis['pdr']['stats']['overall']['min_delay_usec'],
424 analysis['pdr']['stats']['overall']['max_delay_usec']
426 self.__record_data_put(frame_size, {'pdr': {
428 'rate_bps': analysis['pdr']['rate_bps'],
429 'rate_pps': analysis['pdr']['rate_pps'],
430 'drop_percentage': analysis['pdr']['stats']['overall']['drop_percentage'],
431 'avg_delay_usec': analysis['pdr']['stats']['overall']['avg_delay_usec'],
432 'min_delay_usec': analysis['pdr']['stats']['overall']['min_delay_usec'],
433 'max_delay_usec': analysis['pdr']['stats']['overall']['max_delay_usec']
435 if self.config['single_run']:
436 for frame_size, analysis in traffic_result.iteritems():
437 summary_table.add_row([
439 analysis['stats']['overall']['drop_rate_percent'],
440 analysis['stats']['overall']['rx']['avg_delay_usec'],
441 analysis['stats']['overall']['rx']['min_delay_usec'],
442 analysis['stats']['overall']['rx']['max_delay_usec']
444 self.__record_data_put(frame_size, {'single_run': {
445 'type': 'single_run',
446 'drop_rate_percent': analysis['stats']['overall']['drop_rate_percent'],
447 'avg_delay_usec': analysis['stats']['overall']['rx']['avg_delay_usec'],
448 'min_delay_usec': analysis['stats']['overall']['rx']['min_delay_usec'],
449 'max_delay_usec': analysis['stats']['overall']['rx']['max_delay_usec']
453 def __get_config_table(self, run_config, frame_size):
454 config_table = Table(self.config_header)
455 for key, name in zip(self.direction_keys, self.direction_names):
456 if key not in run_config:
458 config_table.add_row([
460 run_config[key]['orig']['rate_bps'],
461 run_config[key]['tx']['rate_bps'],
462 run_config[key]['rx']['rate_bps'],
463 int(run_config[key]['orig']['rate_pps']),
464 int(run_config[key]['tx']['rate_pps']),
465 int(run_config[key]['rx']['rate_pps']),
467 self.__record_data_put(frame_size, {
468 name.lower() + "_orig_rate_bps": int(run_config[key]['orig']['rate_bps']),
469 name.lower() + "_tx_rate_bps": int(run_config[key]['tx']['rate_bps']),
470 name.lower() + "_rx_rate_bps": int(run_config[key]['rx']['rate_bps']),
471 name.lower() + "_orig_rate_pps": int(run_config[key]['orig']['rate_pps']),
472 name.lower() + "_tx_rate_pps": int(run_config[key]['tx']['rate_pps']),
473 name.lower() + "_rx_rate_pps": int(run_config[key]['rx']['rate_pps']),
478 def _get_chain_table(self, chain_stats):
479 """Retrieve the table for a direction.
482 'interfaces': ['Port0', 'drop %'', 'vhost0', 'Port1'],
484 0: {'packets': [2000054, '-0.023%', 1999996, 1999996],
493 chains = chain_stats['chains']
494 _annotate_chain_stats(chains)
495 header = [('Chain', Formatter.standard)] + \
496 [(ifname, Formatter.standard) for ifname in chain_stats['interfaces']]
497 # add latency columns if available Avg, Min, Max
499 lat_map = {'lat_avg_usec': 'Avg lat.',
500 'lat_min_usec': 'Min lat.',
501 'lat_max_usec': 'Max lat.'}
502 if 'lat_avg_usec' in chains[0]:
503 lat_keys = ['lat_avg_usec', 'lat_min_usec', 'lat_max_usec']
505 header.append((lat_map[key], Formatter.standard))
507 table = Table(header)
508 for chain in sorted(chains.keys()):
509 row = [chain] + chains[chain]['packets']
510 for lat_key in lat_keys:
511 row.append('{:,} usec'.format(chains[chain][lat_key]))
515 def __record_header_put(self, key, value):
517 self.record_header[key] = value
519 def __record_data_put(self, key, data):
521 if key not in self.record_data:
522 self.record_data[key] = {}
523 self.record_data[key].update(data)
525 def __record_send(self):
527 self.record_header["@timestamp"] = datetime.utcnow().replace(
528 tzinfo=pytz.utc).strftime("%Y-%m-%dT%H:%M:%S.%f%z")
529 for frame_size in self.record_data:
530 data = self.record_header
531 data['frame_size'] = frame_size
532 data.update(self.record_data[frame_size])
533 run_specific_data = {}
534 if 'single_run' in data:
535 run_specific_data['single_run'] = data['single_run']
536 del data['single_run']
538 run_specific_data['ndr'] = data['ndr']
539 run_specific_data['ndr']['drop_limit'] = self.config['measurement']['NDR']
542 run_specific_data['pdr'] = data['pdr']
543 run_specific_data['pdr']['drop_limit'] = self.config['measurement']['PDR']
545 for key in run_specific_data:
546 data_to_send = data.copy()
547 data_to_send.update(run_specific_data[key])
548 self.sender.record_send(data_to_send)
551 def __record_init(self):
552 # init is called after checking for sender
553 self.record_header = {
554 "runlogdate": self.sender.runlogdate,
555 "user_label": self.config['user_label']
557 self.record_data = {}