4 ## Copyright (c) 2020 Intel Corporation
6 ## Licensed under the Apache License, Version 2.0 (the "License");
7 ## you may not use this file except in compliance with the License.
8 ## You may obtain a copy of the License at
11 ## http://www.apache.org/licenses/LICENSE-2.0
13 ## Unless required by applicable law or agreed to in writing, software
14 ## distributed under the License is distributed on an "AS IS" BASIS,
15 ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 ## See the License for the specific language governing permissions and
17 ## limitations under the License.
25 from past.utils import old_div
26 from rapid_log import RapidLog
27 from rapid_log import bcolors
29 from datetime import datetime as dt
31 _CURR_DIR = os.path.dirname(os.path.realpath(__file__))
33 class RapidTest(object):
35 Class to manage the testing
37 def __init__(self, test_param, runtime, testname, environment_file ):
38 self.test = test_param
39 self.test['runtime'] = runtime
40 self.test['testname'] = testname
41 self.test['environment_file'] = environment_file
42 if 'maxr' not in self.test.keys():
44 if 'maxz' not in self.test.keys():
45 self.test['maxz'] = inf
46 with open(os.path.join(_CURR_DIR,'format.yaml')) as f:
47 self.data_format = yaml.load(f, Loader=yaml.FullLoader)
50 def get_percentageof10Gbps(pps_speed,size):
51 # speed is given in pps, returning % of 10Gb/s
52 # 12 bytes is the inter packet gap
53 # pre-amble is 7 bytes
54 # SFD (start of frame delimiter) is 1 byte
55 # Total of 20 bytes overhead per packet
56 return (pps_speed / 1000000.0 * 0.08 * (size+20))
59 def get_pps(speed,size):
60 # speed is given in % of 10Gb/s, returning Mpps
61 # 12 bytes is the inter packet gap
62 # pre-amble is 7 bytes
63 # SFD (start of frame delimiter) is 1 byte
64 # Total of 20 bytes overhead per packet
65 return (speed * 100.0 / (8*(size+20)))
68 def get_speed(packet_speed,size):
69 # return speed in Gb/s
70 # 12 bytes is the inter packet gap
71 # pre-amble is 7 bytes
72 # SFD (start of frame delimiter) is 1 byte
73 # Total of 20 bytes overhead per packet
74 return (packet_speed / 1000.0 * (8*(size+20)))
77 def set_background_flows(background_machines, number_of_flows):
78 for machine in background_machines:
79 _ = machine.set_flows(number_of_flows)
82 def set_background_speed(background_machines, speed):
83 for machine in background_machines:
84 machine.set_generator_speed(speed)
87 def set_background_size(background_machines, imix):
88 # imixs is a list of packet sizes
89 for machine in background_machines:
90 machine.set_udp_packet_size(imix)
93 def start_background_traffic(background_machines):
94 for machine in background_machines:
98 def stop_background_traffic(background_machines):
99 for machine in background_machines:
103 def parse_data_format_dict(data_format, variables):
104 for k, v in data_format.items():
106 RapidTest.parse_data_format_dict(v, variables)
108 if v in variables.keys():
109 data_format[k] = variables[v]
111 def post_data(self, variables):
112 test_type = type(self).__name__
113 var = copy.deepcopy(self.data_format)
114 self.parse_data_format_dict(var, variables)
115 if var.keys() >= {'URL', test_type, 'Format'}:
117 for value in var['URL'].values():
119 HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'application/rapid'}
120 if var['Format'] == 'PushGateway':
121 data = "\n".join("{} {}".format(k, v) for k, v in var[test_type].items()) + "\n"
122 response = requests.post(url=URL, data=data,headers=HEADERS)
123 elif var['Format'] == 'Xtesting':
124 data = var[test_type]
125 response = requests.post(url=URL, json=data)
126 if (response.status_code >= 300):
127 RapidLog.info('Cannot send metrics to {}'.format(URL))
129 return (var[test_type])
132 def report_result(flow_number, size, data, prefix):
134 flow_number_str = '| ({:>4}) |'.format(abs(flow_number))
136 flow_number_str = '|{:>7} |'.format(flow_number)
137 if data['pps_req_tx'] is None:
138 pps_req_tx_str = '{0: >14}'.format(' NA |')
140 pps_req_tx_str = '{:>7.3f} Mpps |'.format(data['pps_req_tx'])
141 if data['pps_tx'] is None:
142 pps_tx_str = '{0: >14}'.format(' NA |')
144 pps_tx_str = '{:>7.3f} Mpps |'.format(data['pps_tx'])
145 if data['pps_sut_tx'] is None:
146 pps_sut_tx_str = '{0: >14}'.format(' NA |')
148 pps_sut_tx_str = '{:>7.3f} Mpps |'.format(data['pps_sut_tx'])
149 if data['pps_rx'] is None:
150 pps_rx_str = '{0: >25}'.format('NA |')
152 pps_rx_str = bcolors.OKBLUE + '{:>4.1f} Gb/s |{:7.3f} Mpps {}|'.format(
153 RapidTest.get_speed(data['pps_rx'],size),data['pps_rx'],bcolors.ENDC)
154 if data['abs_dropped'] is None:
155 tot_drop_str = ' | NA | '
157 tot_drop_str = ' | {:>9.0f} | '.format(data['abs_dropped'])
158 if data['lat_perc'] is None:
159 lat_perc_str = '|{:^10.10}|'.format('NA')
160 elif data['lat_perc_max'] == True:
161 lat_perc_str = '|>{}{:>5.0f} us{} |'.format(prefix['lat_perc'],
162 float(data['lat_perc']), bcolors.ENDC)
164 lat_perc_str = '| {}{:>5.0f} us{} |'.format(prefix['lat_perc'],
165 float(data['lat_perc']), bcolors.ENDC)
166 if data['actual_duration'] is None:
167 elapsed_time_str = ' NA |'
169 elapsed_time_str = '{:>3.0f} |'.format(data['actual_duration'])
170 if data['mis_ordered'] is None:
171 mis_ordered_str = ' NA '
173 mis_ordered_str = '{:>9.0f} '.format(data['mis_ordered'])
174 return(flow_number_str + '{:>5.1f}'.format(data['speed']) + '% ' + prefix['speed']
175 + '{:>6.3f}'.format(RapidTest.get_pps(data['speed'],size)) + ' Mpps|' +
176 pps_req_tx_str + pps_tx_str + bcolors.ENDC + pps_sut_tx_str +
177 pps_rx_str + prefix['lat_avg'] + ' {:>6.0f}'.format(data['lat_avg']) +
178 ' us' + lat_perc_str +prefix['lat_max']+'{:>6.0f}'.format(data['lat_max'])
179 + ' us | ' + '{:>9.0f}'.format(data['abs_tx']) + ' | {:>9.0f}'.format(data['abs_rx']) +
180 ' | '+ prefix['abs_drop_rate']+ '{:>9.0f}'.format(data['abs_tx']-data['abs_rx']) +
181 tot_drop_str + prefix['drop_rate'] +
182 '{:>5.2f}'.format(100*old_div(float(data['abs_tx']-data['abs_rx']),data['abs_tx'])) + ' |' +
183 prefix['mis_ordered'] + mis_ordered_str + bcolors.ENDC +
184 ' |' + elapsed_time_str)
186 def run_iteration(self, requested_duration, flow_number, size, speed):
187 BUCKET_SIZE_EXP = self.gen_machine.bucket_size_exp
188 sleep_time = self.test['sleep_time']
189 LAT_PERCENTILE = self.test['lat_percentile']
192 iteration_data['r'] = 0;
194 while (iteration_data['r'] < self.test['maxr']):
195 self.gen_machine.start_latency_cores()
196 time.sleep(sleep_time)
197 # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
198 t1_rx, t1_non_dp_rx, t1_tx, t1_non_dp_tx, t1_drop, t1_tx_fail, t1_tsc, abs_tsc_hz = self.gen_machine.core_stats()
199 t1_dp_rx = t1_rx - t1_non_dp_rx
200 t1_dp_tx = t1_tx - t1_non_dp_tx
201 self.gen_machine.set_generator_speed(0)
202 self.gen_machine.start_gen_cores()
203 self.set_background_speed(self.background_machines, 0)
204 self.start_background_traffic(self.background_machines)
205 if 'ramp_step' in self.test.keys():
206 ramp_speed = self.test['ramp_step']
209 while ramp_speed < speed:
210 self.gen_machine.set_generator_speed(ramp_speed)
211 self.set_background_speed(self.background_machines, ramp_speed)
213 ramp_speed = ramp_speed + self.test['ramp_step']
214 self.gen_machine.set_generator_speed(speed)
215 self.set_background_speed(self.background_machines, speed)
216 iteration_data['speed'] = speed
217 time_loop_data['speed'] = speed
218 time.sleep(2) ## Needs to be 2 seconds since this 1 sec is the time that PROX uses to refresh the stats. Note that this can be changed in PROX!! Don't do it.
219 start_bg_gen_stats = []
220 for bg_gen_machine in self.background_machines:
221 bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, _ = bg_gen_machine.core_stats()
223 "bg_dp_rx" : bg_rx - bg_non_dp_rx,
224 "bg_dp_tx" : bg_tx - bg_non_dp_tx,
227 start_bg_gen_stats.append(dict(bg_gen_stat))
228 if self.sut_machine!= None:
229 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
230 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc, tsc_hz = self.gen_machine.core_stats()
232 iteration_data['abs_tx'] = tx - (t2_non_dp_tx - t1_non_dp_tx )
233 iteration_data['abs_rx'] = t2_rx - t1_rx - (t2_non_dp_rx - t1_non_dp_rx)
234 iteration_data['abs_dropped'] = iteration_data['abs_tx'] - iteration_data['abs_rx']
236 RapidLog.critical("TX = 0. Test interrupted since no packet has been sent.")
237 if iteration_data['abs_tx'] == 0:
238 RapidLog.critical("Only non-dataplane packets (e.g. ARP) sent. Test interrupted since no packet has been sent.")
239 # Ask PROX to calibrate the bucket size once we have a PROX function to do this.
240 # Measure latency statistics per second
241 iteration_data.update(self.gen_machine.lat_stats())
242 t2_lat_tsc = iteration_data['lat_tsc']
244 for sample_percentile, bucket in enumerate(iteration_data['buckets'],start=1):
245 sample_count += bucket
246 if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
248 iteration_data['lat_perc_max'] = (sample_percentile == len(iteration_data['buckets']))
249 iteration_data['bucket_size'] = float(2 ** BUCKET_SIZE_EXP) / (old_div(float(iteration_data['lat_hz']),float(10**6)))
250 time_loop_data['bucket_size'] = iteration_data['bucket_size']
251 iteration_data['lat_perc'] = sample_percentile * iteration_data['bucket_size']
252 if self.test['test'] == 'fixed_rate':
253 iteration_data['pps_req_tx'] = None
254 iteration_data['pps_tx'] = None
255 iteration_data['pps_sut_tx'] = None
256 iteration_data['pps_rx'] = None
257 iteration_data['lat_perc'] = None
258 iteration_data['actual_duration'] = None
259 iteration_prefix = {'speed' : '',
263 'abs_drop_rate' : '',
265 RapidLog.info(self.report_result(flow_number, size,
266 iteration_data, iteration_prefix ))
267 tot_rx = tot_non_dp_rx = tot_tx = tot_non_dp_tx = tot_drop = 0
268 iteration_data['lat_avg'] = iteration_data['lat_used'] = 0
269 tot_lat_measurement_duration = float(0)
270 iteration_data['actual_duration'] = float(0)
271 tot_sut_core_measurement_duration = float(0)
272 tot_sut_rx = tot_sut_non_dp_rx = tot_sut_tx = tot_sut_non_dp_tx = tot_sut_drop = tot_sut_tx_fail = tot_sut_tsc = 0
273 lat_avail = core_avail = sut_avail = False
274 while (iteration_data['actual_duration'] - float(requested_duration) <= 0.1) or (tot_lat_measurement_duration - float(requested_duration) <= 0.1):
276 time_loop_data.update(self.gen_machine.lat_stats())
277 # Get statistics after some execution time
278 if time_loop_data['lat_tsc'] != t2_lat_tsc:
279 single_lat_measurement_duration = (time_loop_data['lat_tsc'] - t2_lat_tsc) * 1.0 / time_loop_data['lat_hz'] # time difference between the 2 measurements, expressed in seconds.
280 # A second has passed in between to lat_stats requests. Hence we need to process the results
281 tot_lat_measurement_duration = tot_lat_measurement_duration + single_lat_measurement_duration
282 if iteration_data['lat_min'] > time_loop_data['lat_min']:
283 iteration_data['lat_min'] = time_loop_data['lat_min']
284 if iteration_data['lat_max'] < time_loop_data['lat_max']:
285 iteration_data['lat_max'] = time_loop_data['lat_max']
286 iteration_data['lat_avg'] = iteration_data['lat_avg'] + time_loop_data['lat_avg'] * single_lat_measurement_duration # Sometimes, There is more than 1 second between 2 lat_stats. Hence we will take the latest measurement
287 iteration_data['lat_used'] = iteration_data['lat_used'] + time_loop_data['lat_used'] * single_lat_measurement_duration # and give it more weigth.
289 for sample_percentile, bucket in enumerate(time_loop_data['buckets'],start=1):
290 sample_count += bucket
291 if sample_count > sum(time_loop_data['buckets']) * LAT_PERCENTILE:
293 time_loop_data['lat_perc_max'] = (sample_percentile == len(time_loop_data['buckets']))
294 time_loop_data['lat_perc'] = sample_percentile * iteration_data['bucket_size']
295 iteration_data['buckets'] = [iteration_data['buckets'][i] + time_loop_data['buckets'][i] for i in range(len(iteration_data['buckets']))]
296 t2_lat_tsc = time_loop_data['lat_tsc']
298 t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc, tsc_hz = self.gen_machine.core_stats()
300 time_loop_data['actual_duration'] = (t3_tsc - t2_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
301 iteration_data['actual_duration'] = iteration_data['actual_duration'] + time_loop_data['actual_duration']
302 delta_rx = t3_rx - t2_rx
304 delta_non_dp_rx = t3_non_dp_rx - t2_non_dp_rx
305 tot_non_dp_rx += delta_non_dp_rx
306 delta_tx = t3_tx - t2_tx
308 delta_non_dp_tx = t3_non_dp_tx - t2_non_dp_tx
309 tot_non_dp_tx += delta_non_dp_tx
310 delta_dp_tx = delta_tx -delta_non_dp_tx
311 delta_dp_rx = delta_rx -delta_non_dp_rx
312 time_loop_data['abs_dropped'] = delta_dp_tx - delta_dp_rx
313 iteration_data['abs_dropped'] += time_loop_data['abs_dropped']
314 delta_drop = t3_drop - t2_drop
315 tot_drop += delta_drop
316 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc = t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc
318 if self.sut_machine!=None:
319 t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
320 if t3_sut_tsc != t2_sut_tsc:
321 single_sut_core_measurement_duration = (t3_sut_tsc - t2_sut_tsc) * 1.0 / sut_tsc_hz # time difference between the 2 measurements, expressed in seconds.
322 tot_sut_core_measurement_duration = tot_sut_core_measurement_duration + single_sut_core_measurement_duration
323 tot_sut_rx += t3_sut_rx - t2_sut_rx
324 tot_sut_non_dp_rx += t3_sut_non_dp_rx - t2_sut_non_dp_rx
325 delta_sut_tx = t3_sut_tx - t2_sut_tx
326 tot_sut_tx += delta_sut_tx
327 delta_sut_non_dp_tx = t3_sut_non_dp_tx - t2_sut_non_dp_tx
328 tot_sut_non_dp_tx += delta_sut_non_dp_tx
329 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc = t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc
331 if self.test['test'] == 'fixed_rate':
332 if lat_avail == core_avail == True:
333 lat_avail = core_avail = False
334 time_loop_data['pps_req_tx'] = (delta_tx + delta_drop - delta_rx)/time_loop_data['actual_duration']/1000000
335 time_loop_data['pps_tx'] = delta_tx/time_loop_data['actual_duration']/1000000
336 if self.sut_machine != None and sut_avail:
337 time_loop_data['pps_sut_tx'] = delta_sut_tx/single_sut_core_measurement_duration/1000000
340 time_loop_data['pps_sut_tx'] = None
341 time_loop_data['pps_rx'] = delta_rx/time_loop_data['actual_duration']/1000000
342 time_loop_data['abs_tx'] = delta_dp_tx
343 time_loop_data['abs_rx'] = delta_dp_rx
344 time_loop_prefix = {'speed' : '',
348 'abs_drop_rate' : '',
350 RapidLog.info(self.report_result(flow_number, size, time_loop_data,
352 time_loop_data['test'] = self.test['testname']
353 time_loop_data['environment_file'] = self.test['environment_file']
354 time_loop_data['Flows'] = flow_number
355 time_loop_data['Size'] = size
356 time_loop_data['RequestedSpeed'] = RapidTest.get_pps(speed, size)
357 _ = self.post_data(time_loop_data)
358 end_bg_gen_stats = []
359 for bg_gen_machine in self.background_machines:
360 bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, bg_hz = bg_gen_machine.core_stats()
361 bg_gen_stat = {"bg_dp_rx" : bg_rx - bg_non_dp_rx,
362 "bg_dp_tx" : bg_tx - bg_non_dp_tx,
366 end_bg_gen_stats.append(dict(bg_gen_stat))
367 self.stop_background_traffic(self.background_machines)
370 while i < len(end_bg_gen_stats):
371 bg_rates.append(0.000001*(end_bg_gen_stats[i]['bg_dp_rx'] -
372 start_bg_gen_stats[i]['bg_dp_rx']) / ((end_bg_gen_stats[i]['bg_tsc'] -
373 start_bg_gen_stats[i]['bg_tsc']) * 1.0 / end_bg_gen_stats[i]['bg_hz']))
376 iteration_data['avg_bg_rate'] = sum(bg_rates) / len(bg_rates)
377 RapidLog.debug('Average Background traffic rate: {:>7.3f} Mpps'.format(iteration_data['avg_bg_rate']))
379 iteration_data['avg_bg_rate'] = None
381 self.gen_machine.stop_gen_cores()
383 self.gen_machine.stop_latency_cores()
384 iteration_data['r'] += 1
385 iteration_data['lat_avg'] = old_div(iteration_data['lat_avg'], float(tot_lat_measurement_duration))
386 iteration_data['lat_used'] = old_div(iteration_data['lat_used'], float(tot_lat_measurement_duration))
388 while t4_tsc == t2_tsc:
389 t4_rx, t4_non_dp_rx, t4_tx, t4_non_dp_tx, t4_drop, t4_tx_fail, t4_tsc, abs_tsc_hz = self.gen_machine.core_stats()
390 if self.test['test'] == 'fixed_rate':
391 iteration_data['lat_tsc'] = t2_lat_tsc
392 while iteration_data['lat_tsc'] == t2_lat_tsc:
393 iteration_data.update(self.gen_machine.lat_stats())
395 for percentile, bucket in enumerate(iteration_data['buckets'],start=1):
396 sample_count += bucket
397 if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
399 iteration_data['lat_perc_max'] = (percentile == len(iteration_data['buckets']))
400 iteration_data['lat_perc'] = percentile * iteration_data['bucket_size']
401 delta_rx = t4_rx - t2_rx
402 delta_non_dp_rx = t4_non_dp_rx - t2_non_dp_rx
403 delta_tx = t4_tx - t2_tx
404 delta_non_dp_tx = t4_non_dp_tx - t2_non_dp_tx
405 delta_dp_tx = delta_tx -delta_non_dp_tx
406 delta_dp_rx = delta_rx -delta_non_dp_rx
407 iteration_data['abs_tx'] = delta_dp_tx
408 iteration_data['abs_rx'] = delta_dp_rx
409 iteration_data['abs_dropped'] += delta_dp_tx - delta_dp_rx
410 iteration_data['pps_req_tx'] = None
411 iteration_data['pps_tx'] = None
412 iteration_data['pps_sut_tx'] = None
413 iteration_data['drop_rate'] = 100.0*(iteration_data['abs_tx']-iteration_data['abs_rx'])/iteration_data['abs_tx']
414 iteration_data['actual_duration'] = None
415 break ## Not really needed since the while loop will stop when evaluating the value of r
418 for percentile, bucket in enumerate(iteration_data['buckets'],start=1):
419 sample_count += bucket
420 if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
422 iteration_data['lat_perc_max'] = (percentile == len(iteration_data['buckets']))
423 iteration_data['lat_perc'] = percentile * iteration_data['bucket_size']
424 iteration_data['pps_req_tx'] = (tot_tx + tot_drop - tot_rx)/iteration_data['actual_duration']/1000000.0 # tot_drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
425 iteration_data['pps_tx'] = tot_tx/iteration_data['actual_duration']/1000000.0 # tot_tx is all generated packets actually accepted by the interface
426 iteration_data['pps_rx'] = tot_rx/iteration_data['actual_duration']/1000000.0 # tot_rx is all packets received by the nop task = all packets received in the gen VM
427 if self.sut_machine != None and sut_avail:
428 iteration_data['pps_sut_tx'] = tot_sut_tx / tot_sut_core_measurement_duration / 1000000.0
430 iteration_data['pps_sut_tx'] = None
431 iteration_data['abs_tx'] = (t4_tx - t1_tx) - (t4_non_dp_tx - t1_non_dp_tx)
432 iteration_data['abs_rx'] = (t4_rx - t1_rx) - (t4_non_dp_rx - t1_non_dp_rx)
433 iteration_data['abs_dropped'] = iteration_data['abs_tx'] - iteration_data['abs_rx']
434 iteration_data['drop_rate'] = 100.0*iteration_data['abs_dropped']/iteration_data['abs_tx']
435 if ((iteration_data['drop_rate'] < self.test['drop_rate_threshold']) or (iteration_data['abs_dropped'] == self.test['drop_rate_threshold'] ==0) or (iteration_data['abs_dropped'] > self.test['maxz'])):
437 self.gen_machine.stop_latency_cores()
438 iteration_data['abs_tx_fail'] = t4_tx_fail - t1_tx_fail
439 return (iteration_data)