# under the License.
#
-import bitmath
from contextlib import contextmanager
+from datetime import datetime
import math
-from specs import ChainType
+
+import bitmath
+import pytz
from tabulate import tabulate
+def _annotate_chain_stats(chain_stats, nodrop_marker='=>'):
+ """Transform a plain chain stats into an annotated one.
+
+ Example:
+ {
+ 0: {'packets': [2000054, 1999996, 1999996, 1999996],
+ 'lat_min_usec': 10,
+ 'lat_max_usec': 187,
+ 'lat_avg_usec': 45},
+ 1: {...},
+ 'total': {...}
+ }
+ should become:
+ {
+ 0: {'packets': [2000054, -58 (-0.034%), '=>', 1999996],
+ 'lat_min_usec': 10,
+ 'lat_max_usec': 187,
+ 'lat_avg_usec': 45},
+ 1: {...},
+ 'total': {...}
+ }
+
+ In the case of shared net, some columns in packets array can have ''.
+ Some columns cab also be None which means the data is not available.
+ """
+ for stats in list(chain_stats.values()):
+ packets = stats['packets']
+ count = len(packets)
+ if count > 1:
+ # keep the first counter
+ annotated_packets = [packets[0]]
+ # modify all remaining counters
+ prev_count = packets[0]
+ for index in range(1, count):
+ cur_count = packets[index]
+ if cur_count == '':
+ # an empty string indicates an unknown counter for a shared interface
+ # do not annotate those
+ annotated_value = ''
+ elif cur_count is None:
+ # Not available
+ annotated_value = 'n/a'
+ else:
+ drop = cur_count - prev_count
+ if drop:
+ dr = (drop * 100.0) / prev_count if prev_count else 0
+ annotated_value = '{:+,} ({:+.4f}%)'.format(drop, dr)
+ else:
+ # no drop
+ # if last column we display the value
+ annotated_value = cur_count if index == count - 1 else nodrop_marker
+ prev_count = cur_count
+ annotated_packets.append(annotated_value)
+
+ stats['packets'] = annotated_packets
class Formatter(object):
- """Collection of string formatter methods"""
+ """Collection of string formatter methods."""
@staticmethod
def fixed(data):
@staticmethod
def standard(data):
- if type(data) == int:
+ if isinstance(data, int):
return Formatter.int(data)
- elif type(data) == float:
+ if isinstance(data, float):
return Formatter.float(4)(data)
- else:
- return Formatter.fixed(data)
+ return Formatter.fixed(data)
@staticmethod
def suffix(suffix_str):
bps = byte_to_bit_classes.get(bit.unit, bitmath.Bit).from_other(bit) / 8.0
if bps.unit != 'Bit':
return bps.format("{value:.4f} {unit}ps")
- else:
- return bps.format("{value:.4f} bps")
+ return bps.format("{value:.4f} bps")
@staticmethod
def percentage(data):
if data is None:
return ''
- elif math.isnan(data):
+ if math.isnan(data):
return '-'
- else:
- return Formatter.suffix('%')(Formatter.float(4)(data))
+ return Formatter.suffix('%')(Formatter.float(4)(data))
class Table(object):
- """ASCII readable table class"""
+ """ASCII readable table class."""
def __init__(self, header):
- header_row, self.formatters = zip(*header)
+ header_row, self.formatters = list(zip(*header))
self.data = [header_row]
self.columns = len(header_row)
def add_row(self, row):
- assert(self.columns == len(row))
+ assert self.columns == len(row)
formatted_row = []
for entry, formatter in zip(row, self.formatters):
formatted_row.append(formatter(entry))
class Summarizer(object):
- """Generic summarizer class"""
+ """Generic summarizer class."""
indent_per_level = 2
self.marker_stack.append(marker)
def __unindent(self):
- assert(self.indent_size >= self.indent_per_level)
+ assert self.indent_size >= self.indent_per_level
self.indent_size -= self.indent_per_level
self.marker_stack.pop()
def _put(self, *args):
self.str += self.__get_indent_string()
- if len(args) and type(args[-1]) == dict:
+ if args and isinstance(args[-1], dict):
self.str += ' '.join(map(str, args[:-1])) + '\n'
self._put_dict(args[-1])
else:
def _put_dict(self, data):
with self._create_block(False):
- for key, value in data.iteritems():
- if type(value) == dict:
+ for key, value in list(data.items()):
+ if isinstance(value, dict):
self._put(key + ':')
self._put_dict(value)
else:
class NFVBenchSummarizer(Summarizer):
- """Summarize nfvbench json result"""
+ """Summarize nfvbench json result."""
ndr_pdr_header = [
('-', Formatter.fixed),
('RX Rate (pps)', Formatter.suffix(' pps'))
]
- chain_analysis_header = [
- ('Interface', Formatter.standard),
- ('Device', Formatter.standard),
- ('Packets (fwd)', Formatter.standard),
- ('Drops (fwd)', Formatter.standard),
- ('Drop% (fwd)', Formatter.percentage),
- ('Packets (rev)', Formatter.standard),
- ('Drops (rev)', Formatter.standard),
- ('Drop% (rev)', Formatter.percentage)
- ]
-
direction_keys = ['direction-forward', 'direction-reverse', 'direction-total']
direction_names = ['Forward', 'Reverse', 'Total']
- def __init__(self, result):
+ def __init__(self, result, sender):
+ """Create a summarizer instance."""
Summarizer.__init__(self)
self.result = result
self.config = self.result['config']
+ self.record_header = None
+ self.record_data = None
+ self.sender = sender
+
+ # add percentiles headers if hdrh enabled
+ if not self.config.disable_hdrh:
+ for percentile in self.config.lat_percentiles:
+ # 'append' expects a single parameter => double parentheses
+ self.ndr_pdr_header.append((str(percentile) + ' %ile lat.', Formatter.standard))
+ self.single_run_header.append((str(percentile) + ' %ile lat.', Formatter.standard))
+ # if sender is available initialize record
+ if self.sender:
+ self.__record_init()
self.__summarize()
+ def __get_openstack_spec(self, property):
+ try:
+ return self.result['openstack_spec'][property]
+ except KeyError:
+ return ''
+
def __summarize(self):
self._put()
self._put('========== NFVBench Summary ==========')
self._put('Date:', self.result['date'])
self._put('NFVBench version', self.result['nfvbench_version'])
self._put('Openstack Neutron:', {
- 'vSwitch': self.result['openstack_spec']['vswitch'],
- 'Encapsulation': self.result['openstack_spec']['encaps']
+ 'vSwitch': self.__get_openstack_spec('vswitch'),
+ 'Encapsulation': self.__get_openstack_spec('encaps')
})
+ self.__record_header_put('version', self.result['nfvbench_version'])
+ self.__record_header_put('vSwitch', self.__get_openstack_spec('vswitch'))
+ self.__record_header_put('Encapsulation', self.__get_openstack_spec('encaps'))
self._put('Benchmarks:')
with self._create_block():
self._put('Networks:')
self._put('Components:')
with self._create_block():
- self._put('TOR:')
- with self._create_block(False):
- self._put('Type:', self.config['tor']['type'])
self._put('Traffic Generator:')
with self._create_block(False):
- self._put('Profile:', self.config['generator_config']['name'])
- self._put('Tool:', self.config['generator_config']['tool'])
+ self._put('Profile:', self.config['tg-name'])
+ self._put('Tool:', self.config['tg-tool'])
if network_benchmark['versions']:
self._put('Versions:')
with self._create_block():
- for component, version in network_benchmark['versions'].iteritems():
+ for component, version in list(network_benchmark['versions'].items()):
self._put(component + ':', version)
if self.config['ndr_run'] or self.config['pdr_run']:
self._put('NDR:', self.config['measurement']['NDR'])
if self.config['pdr_run']:
self._put('PDR:', self.config['measurement']['PDR'])
-
self._put('Service chain:')
- for result in network_benchmark['service_chain'].iteritems():
+ for result in list(network_benchmark['service_chain'].items()):
with self._create_block():
self.__chain_summarize(*result)
def __chain_summarize(self, chain_name, chain_benchmark):
self._put(chain_name + ':')
- if chain_name == ChainType.PVVP:
- self._put('Mode:', chain_benchmark.get('mode'))
+ self.__record_header_put('service_chain', chain_name)
with self._create_block():
self._put('Traffic:')
with self._create_block(False):
self._put('Bidirectional:', traffic_benchmark['bidirectional'])
self._put('Flow count:', traffic_benchmark['flow_count'])
self._put('Service chains count:', traffic_benchmark['service_chain_count'])
- self._put('Compute nodes:', traffic_benchmark['compute_nodes'].keys())
+ self._put('Compute nodes:', list(traffic_benchmark['compute_nodes'].keys()))
+
+ self.__record_header_put('profile', traffic_benchmark['profile'])
+ self.__record_header_put('bidirectional', traffic_benchmark['bidirectional'])
+ self.__record_header_put('flow_count', traffic_benchmark['flow_count'])
+ self.__record_header_put('sc_count', traffic_benchmark['service_chain_count'])
+ self.__record_header_put('compute_nodes', list(traffic_benchmark['compute_nodes'].keys()))
with self._create_block(False):
self._put()
if not self.config['no_traffic']:
except KeyError:
pass
- for entry in traffic_benchmark['result'].iteritems():
+ for entry in list(traffic_benchmark['result'].items()):
if 'warning' in entry:
continue
self.__chain_analysis_summarize(*entry)
+ self.__record_send()
def __chain_analysis_summarize(self, frame_size, analysis):
self._put()
self._put('L2 frame size:', frame_size)
- if 'analysis_duration_sec' in analysis:
- self._put('Chain analysis duration:',
- Formatter.float(3)(analysis['analysis_duration_sec']), 'seconds')
if self.config['ndr_run']:
self._put('NDR search duration:', Formatter.float(0)(analysis['ndr']['time_taken_sec']),
'seconds')
+ self.__record_data_put(frame_size, {'ndr_search_duration': Formatter.float(0)(
+ analysis['ndr']['time_taken_sec'])})
if self.config['pdr_run']:
self._put('PDR search duration:', Formatter.float(0)(analysis['pdr']['time_taken_sec']),
'seconds')
+ self.__record_data_put(frame_size, {'pdr_search_duration': Formatter.float(0)(
+ analysis['pdr']['time_taken_sec'])})
self._put()
if not self.config['no_traffic'] and self.config['single_run']:
self._put('Run Config:')
self._put()
with self._create_block(False):
- self._put_table(self.__get_config_table(analysis['run_config']))
+ self._put_table(self.__get_config_table(analysis['run_config'], frame_size))
if 'warning' in analysis['run_config'] and analysis['run_config']['warning']:
self._put()
self._put(analysis['run_config']['warning'])
self._put()
- if 'packet_analysis' in analysis:
- self._put('Chain Analysis:')
- self._put()
- with self._create_block(False):
- self._put_table(self.__get_chain_analysis_table(analysis['packet_analysis']))
+ if 'packet_path_stats' in analysis:
+ for dir in ['Forward', 'Reverse']:
+ self._put(dir + ' Chain Packet Counters and Latency:')
self._put()
+ with self._create_block(False):
+ self._put_table(self._get_chain_table(analysis['packet_path_stats'][dir]))
+ self._put()
def __get_summary_table(self, traffic_result):
if self.config['single_run']:
summary_table = Table(self.ndr_pdr_header)
if self.config['ndr_run']:
- for frame_size, analysis in traffic_result.iteritems():
+ for frame_size, analysis in list(traffic_result.items()):
if frame_size == 'warning':
continue
- summary_table.add_row([
+
+ row_data = [
'NDR',
frame_size,
analysis['ndr']['rate_bps'],
- int(analysis['ndr']['rate_pps']),
+ analysis['ndr']['rate_pps'],
analysis['ndr']['stats']['overall']['drop_percentage'],
analysis['ndr']['stats']['overall']['avg_delay_usec'],
analysis['ndr']['stats']['overall']['min_delay_usec'],
analysis['ndr']['stats']['overall']['max_delay_usec']
- ])
+ ]
+ if not self.config.disable_hdrh:
+ self.extract_hdrh_percentiles(
+ analysis['ndr']['stats']['overall']['lat_percentile'], row_data)
+ summary_table.add_row(row_data)
+
+ ndr_data = {
+ 'type': 'NDR',
+ 'rate_bps': analysis['ndr']['rate_bps'],
+ 'rate_pps': analysis['ndr']['rate_pps'],
+ 'offered_tx_rate_bps': analysis['ndr']['stats']['offered_tx_rate_bps'],
+ 'theoretical_tx_rate_pps': analysis['ndr']['stats']['theoretical_tx_rate_pps'],
+ 'theoretical_tx_rate_bps': analysis['ndr']['stats']['theoretical_tx_rate_bps'],
+ 'drop_percentage': analysis['ndr']['stats']['overall']['drop_percentage'],
+ 'avg_delay_usec': analysis['ndr']['stats']['overall']['avg_delay_usec'],
+ 'min_delay_usec': analysis['ndr']['stats']['overall']['min_delay_usec'],
+ 'max_delay_usec': analysis['ndr']['stats']['overall']['max_delay_usec']
+ }
+ if not self.config.disable_hdrh:
+ self.extract_hdrh_percentiles(
+ analysis['ndr']['stats']['overall']['lat_percentile'], ndr_data, True)
+ self.__record_data_put(frame_size, {'ndr': ndr_data})
if self.config['pdr_run']:
- for frame_size, analysis in traffic_result.iteritems():
+ for frame_size, analysis in list(traffic_result.items()):
if frame_size == 'warning':
continue
- summary_table.add_row([
+
+ row_data = [
'PDR',
frame_size,
analysis['pdr']['rate_bps'],
- int(analysis['pdr']['rate_pps']),
+ analysis['pdr']['rate_pps'],
analysis['pdr']['stats']['overall']['drop_percentage'],
analysis['pdr']['stats']['overall']['avg_delay_usec'],
analysis['pdr']['stats']['overall']['min_delay_usec'],
analysis['pdr']['stats']['overall']['max_delay_usec']
- ])
+ ]
+ if not self.config.disable_hdrh:
+ self.extract_hdrh_percentiles(
+ analysis['pdr']['stats']['overall']['lat_percentile'], row_data)
+ summary_table.add_row(row_data)
+
+ pdr_data = {
+ 'type': 'PDR',
+ 'rate_bps': analysis['pdr']['rate_bps'],
+ 'rate_pps': analysis['pdr']['rate_pps'],
+ 'offered_tx_rate_bps': analysis['pdr']['stats']['offered_tx_rate_bps'],
+ 'theoretical_tx_rate_pps': analysis['pdr']['stats']['theoretical_tx_rate_pps'],
+ 'theoretical_tx_rate_bps': analysis['pdr']['stats']['theoretical_tx_rate_bps'],
+ 'drop_percentage': analysis['pdr']['stats']['overall']['drop_percentage'],
+ 'avg_delay_usec': analysis['pdr']['stats']['overall']['avg_delay_usec'],
+ 'min_delay_usec': analysis['pdr']['stats']['overall']['min_delay_usec'],
+ 'max_delay_usec': analysis['pdr']['stats']['overall']['max_delay_usec']
+ }
+ if not self.config.disable_hdrh:
+ self.extract_hdrh_percentiles(
+ analysis['pdr']['stats']['overall']['lat_percentile'], pdr_data, True)
+ self.__record_data_put(frame_size, {'pdr': pdr_data})
if self.config['single_run']:
- for frame_size, analysis in traffic_result.iteritems():
- summary_table.add_row([
+ for frame_size, analysis in list(traffic_result.items()):
+ row_data = [
frame_size,
analysis['stats']['overall']['drop_rate_percent'],
analysis['stats']['overall']['rx']['avg_delay_usec'],
analysis['stats']['overall']['rx']['min_delay_usec'],
analysis['stats']['overall']['rx']['max_delay_usec']
- ])
+ ]
+ if not self.config.disable_hdrh:
+ self.extract_hdrh_percentiles(
+ analysis['stats']['overall']['rx']['lat_percentile'], row_data)
+ summary_table.add_row(row_data)
+
+ single_run_data = {
+ 'type': 'single_run',
+ 'offered_tx_rate_bps': analysis['stats']['offered_tx_rate_bps'],
+ 'theoretical_tx_rate_pps': analysis['stats']['theoretical_tx_rate_pps'],
+ 'theoretical_tx_rate_bps': analysis['stats']['theoretical_tx_rate_bps'],
+ 'drop_rate_percent': analysis['stats']['overall']['drop_rate_percent'],
+ 'avg_delay_usec': analysis['stats']['overall']['rx']['avg_delay_usec'],
+ 'min_delay_usec': analysis['stats']['overall']['rx']['min_delay_usec'],
+ 'max_delay_usec': analysis['stats']['overall']['rx']['max_delay_usec']
+ }
+ if not self.config.disable_hdrh:
+ self.extract_hdrh_percentiles(
+ analysis['stats']['overall']['rx']['lat_percentile'], single_run_data, True)
+ self.__record_data_put(frame_size, {'single_run': single_run_data})
return summary_table
- def __get_config_table(self, run_config):
+ def extract_hdrh_percentiles(self, lat_percentile, data, add_key=False):
+ if add_key:
+ data['lat_percentile'] = {}
+ for percentile in self.config.lat_percentiles:
+ if add_key:
+ data['lat_percentile_' + str(percentile)] = lat_percentile[percentile]
+ else:
+ data.append(lat_percentile[percentile])
+
+ def __get_config_table(self, run_config, frame_size):
config_table = Table(self.config_header)
for key, name in zip(self.direction_keys, self.direction_names):
if key not in run_config:
int(run_config[key]['tx']['rate_pps']),
int(run_config[key]['rx']['rate_pps']),
])
+ self.__record_data_put(frame_size, {
+ name.lower() + "_orig_rate_bps": int(run_config[key]['orig']['rate_bps']),
+ name.lower() + "_tx_rate_bps": int(run_config[key]['tx']['rate_bps']),
+ name.lower() + "_rx_rate_bps": int(run_config[key]['rx']['rate_bps']),
+ name.lower() + "_orig_rate_pps": int(run_config[key]['orig']['rate_pps']),
+ name.lower() + "_tx_rate_pps": int(run_config[key]['tx']['rate_pps']),
+ name.lower() + "_rx_rate_pps": int(run_config[key]['rx']['rate_pps']),
+
+ })
return config_table
- def __get_chain_analysis_table(self, packet_analysis):
- chain_analysis_table = Table(self.chain_analysis_header)
- forward_analysis = packet_analysis['direction-forward']
- reverse_analysis = packet_analysis['direction-reverse']
- reverse_analysis.reverse()
-
- for fwd, rev in zip(forward_analysis, reverse_analysis):
- chain_analysis_table.add_row([
- fwd['interface'],
- fwd['device'],
- fwd['packet_count'],
- fwd.get('packet_drop_count', None),
- fwd.get('packet_drop_percentage', None),
- rev['packet_count'],
- rev.get('packet_drop_count', None),
- rev.get('packet_drop_percentage', None),
- ])
- return chain_analysis_table
+ def _get_chain_table(self, chain_stats):
+ """Retrieve the table for a direction.
+
+ chain_stats: {
+ 'interfaces': ['Port0', 'drop %'', 'vhost0', 'Port1'],
+ 'chains': {
+ '0': {'packets': [2000054, '-0.023%', 1999996, 1999996],
+ 'lat_min_usec': 10,
+ 'lat_max_usec': 187,
+ 'lat_avg_usec': 45},
+ '1': {...},
+ 'total': {...}
+ }
+ }
+ """
+ chains = chain_stats['chains']
+ _annotate_chain_stats(chains)
+ header = [('Chain', Formatter.standard)] + \
+ [(ifname, Formatter.standard) for ifname in chain_stats['interfaces']]
+ # add latency columns if available Avg, Min, Max and percentiles
+ lat_keys = []
+ lat_map = {'lat_avg_usec': 'Avg lat.',
+ 'lat_min_usec': 'Min lat.',
+ 'lat_max_usec': 'Max lat.'}
+ if 'lat_avg_usec' in chains['0']:
+ lat_keys = ['lat_avg_usec', 'lat_min_usec', 'lat_max_usec']
+
+ if not self.config.disable_hdrh:
+ lat_keys.append('lat_percentile')
+ for percentile in self.config.lat_percentiles:
+ lat_map['lat_' + str(percentile) + '_percentile'] = \
+ str(percentile) + ' %ile lat.'
+
+ for key in lat_map:
+ # 'append' expects a single parameter => double parentheses
+ header.append((lat_map[key], Formatter.standard))
+
+ table = Table(header)
+ for chain in sorted(list(chains.keys()), key=str):
+ row = [chain] + chains[chain]['packets']
+ for lat_key in lat_keys:
+
+ if lat_key != 'lat_percentile':
+ if chains[chain].get(lat_key, None):
+ row.append(Formatter.standard(chains[chain][lat_key]))
+ else:
+ row.append('n/a')
+ else:
+ if not self.config.disable_hdrh:
+ if chains[chain].get(lat_key, None):
+ for percentile in chains[chain][lat_key]:
+ row.append(Formatter.standard(
+ chains[chain][lat_key][percentile]))
+ else:
+ for percentile in self.config.lat_percentiles:
+ row.append('n/a')
+ table.add_row(row)
+ return table
+
+ def __record_header_put(self, key, value):
+ if self.sender:
+ self.record_header[key] = value
+
+ def __record_data_put(self, key, data):
+ if self.sender:
+ if key not in self.record_data:
+ self.record_data[key] = {}
+ self.record_data[key].update(data)
+
+ def __record_send(self):
+ if self.sender:
+ self.record_header["@timestamp"] = datetime.utcnow().replace(
+ tzinfo=pytz.utc).strftime("%Y-%m-%dT%H:%M:%S.%f%z")
+ for frame_size in self.record_data:
+ data = self.record_header
+ data['frame_size'] = frame_size
+ data.update(self.record_data[frame_size])
+ run_specific_data = {}
+ if 'single_run' in data:
+ run_specific_data['single_run'] = data['single_run']
+ del data['single_run']
+ if 'ndr' in data:
+ run_specific_data['ndr'] = data['ndr']
+ run_specific_data['ndr']['drop_limit'] = self.config['measurement']['NDR']
+ del data['ndr']
+ if 'pdr' in data:
+ run_specific_data['pdr'] = data['pdr']
+ run_specific_data['pdr']['drop_limit'] = self.config['measurement']['PDR']
+ del data['pdr']
+ for key in run_specific_data:
+ data_to_send = data.copy()
+ data_to_send.update(run_specific_data[key])
+ self.sender.record_send(data_to_send)
+ self.__record_init()
+
+ def __record_init(self):
+ # init is called after checking for sender
+ self.record_header = {
+ "runlogdate": self.sender.runlogdate,
+ "user_label": self.config['user_label']
+ }
+ self.record_data = {}