4 ## Copyright (c) 2020 Intel Corporation
6 ## Licensed under the Apache License, Version 2.0 (the "License");
7 ## you may not use this file except in compliance with the License.
8 ## You may obtain a copy of the License at
11 ## http://www.apache.org/licenses/LICENSE-2.0
13 ## Unless required by applicable law or agreed to in writing, software
14 ## distributed under the License is distributed on an "AS IS" BASIS,
15 ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 ## See the License for the specific language governing permissions and
17 ## limitations under the License.
25 from past.utils import old_div
26 from rapid_log import RapidLog
27 from rapid_log import bcolors
29 from datetime import datetime as dt
31 _CURR_DIR = os.path.dirname(os.path.realpath(__file__))
33 class RapidTest(object):
35 Class to manage the testing
37 def __init__(self, test_param, runtime, testname, environment_file ):
38 self.test = test_param
39 self.test['runtime'] = runtime
40 self.test['testname'] = testname
41 self.test['environment_file'] = environment_file
42 if 'maxr' not in self.test.keys():
44 if 'maxz' not in self.test.keys():
45 self.test['maxz'] = inf
46 with open(os.path.join(_CURR_DIR,'format.yaml')) as f:
47 self.data_format = yaml.load(f, Loader=yaml.FullLoader)
50 def get_percentageof10Gbps(pps_speed,size):
51 # speed is given in pps, returning % of 10Gb/s
52 # 12 bytes is the inter packet gap
53 # pre-amble is 7 bytes
54 # SFD (start of frame delimiter) is 1 byte
55 # Total of 20 bytes overhead per packet
56 return (pps_speed / 1000000.0 * 0.08 * (size+20))
59 def get_pps(speed,size):
60 # speed is given in % of 10Gb/s, returning Mpps
61 # 12 bytes is the inter packet gap
62 # pre-amble is 7 bytes
63 # SFD (start of frame delimiter) is 1 byte
64 # Total of 20 bytes overhead per packet
65 return (speed * 100.0 / (8*(size+20)))
68 def get_speed(packet_speed,size):
69 # return speed in Gb/s
70 # 12 bytes is the inter packet gap
71 # pre-amble is 7 bytes
72 # SFD (start of frame delimiter) is 1 byte
73 # Total of 20 bytes overhead per packet
74 return (packet_speed / 1000.0 * (8*(size+20)))
77 def set_background_flows(background_machines, number_of_flows):
78 for machine in background_machines:
79 _ = machine.set_flows(number_of_flows)
82 def set_background_speed(background_machines, speed):
83 for machine in background_machines:
84 machine.set_generator_speed(speed)
87 def set_background_size(background_machines, imix):
88 # imixs is a list of packet sizes
89 for machine in background_machines:
90 machine.set_udp_packet_size(imix)
93 def start_background_traffic(background_machines):
94 for machine in background_machines:
98 def stop_background_traffic(background_machines):
99 for machine in background_machines:
103 def parse_data_format_dict(data_format, variables):
104 for k, v in data_format.items():
106 RapidTest.parse_data_format_dict(v, variables)
108 if v in variables.keys():
109 data_format[k] = variables[v]
111 def post_data(self, variables):
112 test_type = type(self).__name__
113 var = copy.deepcopy(self.data_format)
114 self.parse_data_format_dict(var, variables)
115 if var.keys() >= {'URL', test_type, 'Format'}:
117 for value in var['URL'].values():
119 HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'application/rapid'}
120 if var['Format'] == 'PushGateway':
121 data = "\n".join("{} {}".format(k, v) for k, v in var[test_type].items()) + "\n"
122 response = requests.post(url=URL, data=data,headers=HEADERS)
123 elif var['Format'] == 'Xtesting':
124 data = var[test_type]
125 response = requests.post(url=URL, json=data)
126 if (response.status_code >= 300):
127 RapidLog.info('Cannot send metrics to {}'.format(URL))
129 return (var[test_type])
132 def report_result(flow_number, size, data, prefix):
134 flow_number_str = '| ({:>4}) |'.format(abs(flow_number))
136 flow_number_str = '|{:>7} |'.format(flow_number)
137 if data['pps_req_tx'] is None:
138 pps_req_tx_str = '{0: >14}'.format(' NA |')
140 pps_req_tx_str = '{:>7.3f} Mpps |'.format(data['pps_req_tx'])
141 if data['pps_tx'] is None:
142 pps_tx_str = '{0: >14}'.format(' NA |')
144 pps_tx_str = '{:>7.3f} Mpps |'.format(data['pps_tx'])
145 if data['pps_sut_tx'] is None:
146 pps_sut_tx_str = '{0: >14}'.format(' NA |')
148 pps_sut_tx_str = '{:>7.3f} Mpps |'.format(data['pps_sut_tx'])
149 if data['pps_rx'] is None:
150 pps_rx_str = '{0: >25}'.format('NA |')
152 pps_rx_str = bcolors.OKBLUE + '{:>4.1f} Gb/s |{:7.3f} Mpps {}|'.format(
153 RapidTest.get_speed(data['pps_rx'],size),data['pps_rx'],bcolors.ENDC)
154 if data['abs_dropped'] is None:
155 tot_drop_str = ' | NA | '
157 tot_drop_str = ' | {:>9.0f} | '.format(data['abs_dropped'])
158 if data['lat_perc'] is None:
159 lat_perc_str = '|{:^10.10}|'.format('NA')
160 elif data['lat_perc_max'] == True:
161 lat_perc_str = '|>{}{:>5.0f} us{} |'.format(prefix['lat_perc'],
162 float(data['lat_perc']), bcolors.ENDC)
164 lat_perc_str = '| {}{:>5.0f} us{} |'.format(prefix['lat_perc'],
165 float(data['lat_perc']), bcolors.ENDC)
166 if data['actual_duration'] is None:
167 elapsed_time_str = ' NA |'
169 elapsed_time_str = '{:>3.0f} |'.format(data['actual_duration'])
170 if data['mis_ordered'] is None:
171 mis_ordered_str = ' NA '
173 mis_ordered_str = '{:>9.0f} '.format(data['mis_ordered'])
174 return(flow_number_str + '{:>5.1f}'.format(data['speed']) + '% ' + prefix['speed']
175 + '{:>6.3f}'.format(RapidTest.get_pps(data['speed'],size)) + ' Mpps|' +
176 pps_req_tx_str + pps_tx_str + bcolors.ENDC + pps_sut_tx_str +
177 pps_rx_str + prefix['lat_avg'] + ' {:>6.0f}'.format(data['lat_avg']) +
178 ' us' + lat_perc_str +prefix['lat_max']+'{:>6.0f}'.format(data['lat_max'])
179 + ' us | ' + '{:>9.0f}'.format(data['abs_tx']) + ' | {:>9.0f}'.format(data['abs_rx']) +
180 ' | '+ prefix['abs_drop_rate']+ '{:>9.0f}'.format(data['abs_tx']-data['abs_rx']) +
181 tot_drop_str + prefix['drop_rate'] +
182 '{:>5.2f}'.format(100*old_div(float(data['abs_tx']-data['abs_rx']),data['abs_tx'])) + ' |' +
183 prefix['mis_ordered'] + mis_ordered_str + bcolors.ENDC +
184 ' |' + elapsed_time_str)
186 def run_iteration(self, requested_duration, flow_number, size, speed):
187 BUCKET_SIZE_EXP = self.gen_machine.bucket_size_exp
188 LAT_PERCENTILE = self.test['lat_percentile']
191 iteration_data['r'] = 0;
193 while (iteration_data['r'] < self.test['maxr']):
194 self.gen_machine.start_latency_cores()
195 time.sleep(sleep_time)
196 # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
197 t1_rx, t1_non_dp_rx, t1_tx, t1_non_dp_tx, t1_drop, t1_tx_fail, t1_tsc, abs_tsc_hz = self.gen_machine.core_stats()
198 t1_dp_rx = t1_rx - t1_non_dp_rx
199 t1_dp_tx = t1_tx - t1_non_dp_tx
200 self.gen_machine.set_generator_speed(0)
201 self.gen_machine.start_gen_cores()
202 self.set_background_speed(self.background_machines, 0)
203 self.start_background_traffic(self.background_machines)
204 if 'ramp_step' in self.test.keys():
205 ramp_speed = self.test['ramp_step']
208 while ramp_speed < speed:
209 self.gen_machine.set_generator_speed(ramp_speed)
210 self.set_background_speed(self.background_machines, ramp_speed)
212 ramp_speed = ramp_speed + self.test['ramp_step']
213 self.gen_machine.set_generator_speed(speed)
214 self.set_background_speed(self.background_machines, speed)
215 iteration_data['speed'] = speed
216 time_loop_data['speed'] = speed
217 time.sleep(2) ## Needs to be 2 seconds since this 1 sec is the time that PROX uses to refresh the stats. Note that this can be changed in PROX!! Don't do it.
218 start_bg_gen_stats = []
219 for bg_gen_machine in self.background_machines:
220 bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, _ = bg_gen_machine.core_stats()
222 "bg_dp_rx" : bg_rx - bg_non_dp_rx,
223 "bg_dp_tx" : bg_tx - bg_non_dp_tx,
226 start_bg_gen_stats.append(dict(bg_gen_stat))
227 if self.sut_machine!= None:
228 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
229 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc, tsc_hz = self.gen_machine.core_stats()
231 iteration_data['abs_tx'] = tx - (t2_non_dp_tx - t1_non_dp_tx )
232 iteration_data['abs_rx'] = t2_rx - t1_rx - (t2_non_dp_rx - t1_non_dp_rx)
233 iteration_data['abs_dropped'] = iteration_data['abs_tx'] - iteration_data['abs_rx']
235 RapidLog.critical("TX = 0. Test interrupted since no packet has been sent.")
236 if iteration_data['abs_tx'] == 0:
237 RapidLog.critical("Only non-dataplane packets (e.g. ARP) sent. Test interrupted since no packet has been sent.")
238 # Ask PROX to calibrate the bucket size once we have a PROX function to do this.
239 # Measure latency statistics per second
240 iteration_data.update(self.gen_machine.lat_stats())
241 t2_lat_tsc = iteration_data['lat_tsc']
243 for sample_percentile, bucket in enumerate(iteration_data['buckets'],start=1):
244 sample_count += bucket
245 if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
247 iteration_data['lat_perc_max'] = (sample_percentile == len(iteration_data['buckets']))
248 iteration_data['bucket_size'] = float(2 ** BUCKET_SIZE_EXP) / (old_div(float(iteration_data['lat_hz']),float(10**6)))
249 time_loop_data['bucket_size'] = iteration_data['bucket_size']
250 iteration_data['lat_perc'] = sample_percentile * iteration_data['bucket_size']
251 if self.test['test'] == 'fixed_rate':
252 iteration_data['pps_req_tx'] = None
253 iteration_data['pps_tx'] = None
254 iteration_data['pps_sut_tx'] = None
255 iteration_data['pps_rx'] = None
256 iteration_data['lat_perc'] = None
257 iteration_data['actual_duration'] = None
258 iteration_prefix = {'speed' : '',
262 'abs_drop_rate' : '',
264 RapidLog.info(self.report_result(flow_number, size,
265 iteration_data, iteration_prefix ))
266 tot_rx = tot_non_dp_rx = tot_tx = tot_non_dp_tx = tot_drop = 0
267 iteration_data['lat_avg'] = iteration_data['lat_used'] = 0
268 tot_lat_measurement_duration = float(0)
269 iteration_data['actual_duration'] = float(0)
270 tot_sut_core_measurement_duration = float(0)
271 tot_sut_rx = tot_sut_non_dp_rx = tot_sut_tx = tot_sut_non_dp_tx = tot_sut_drop = tot_sut_tx_fail = tot_sut_tsc = 0
272 lat_avail = core_avail = sut_avail = False
273 while (iteration_data['actual_duration'] - float(requested_duration) <= 0.1) or (tot_lat_measurement_duration - float(requested_duration) <= 0.1):
275 time_loop_data.update(self.gen_machine.lat_stats())
276 # Get statistics after some execution time
277 if time_loop_data['lat_tsc'] != t2_lat_tsc:
278 single_lat_measurement_duration = (time_loop_data['lat_tsc'] - t2_lat_tsc) * 1.0 / time_loop_data['lat_hz'] # time difference between the 2 measurements, expressed in seconds.
279 # A second has passed in between to lat_stats requests. Hence we need to process the results
280 tot_lat_measurement_duration = tot_lat_measurement_duration + single_lat_measurement_duration
281 if iteration_data['lat_min'] > time_loop_data['lat_min']:
282 iteration_data['lat_min'] = time_loop_data['lat_min']
283 if iteration_data['lat_max'] < time_loop_data['lat_max']:
284 iteration_data['lat_max'] = time_loop_data['lat_max']
285 iteration_data['lat_avg'] = iteration_data['lat_avg'] + time_loop_data['lat_avg'] * single_lat_measurement_duration # Sometimes, There is more than 1 second between 2 lat_stats. Hence we will take the latest measurement
286 iteration_data['lat_used'] = iteration_data['lat_used'] + time_loop_data['lat_used'] * single_lat_measurement_duration # and give it more weigth.
288 for sample_percentile, bucket in enumerate(time_loop_data['buckets'],start=1):
289 sample_count += bucket
290 if sample_count > sum(time_loop_data['buckets']) * LAT_PERCENTILE:
292 time_loop_data['lat_perc_max'] = (sample_percentile == len(time_loop_data['buckets']))
293 time_loop_data['lat_perc'] = sample_percentile * iteration_data['bucket_size']
294 iteration_data['buckets'] = [iteration_data['buckets'][i] + time_loop_data['buckets'][i] for i in range(len(iteration_data['buckets']))]
295 t2_lat_tsc = time_loop_data['lat_tsc']
297 t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc, tsc_hz = self.gen_machine.core_stats()
299 time_loop_data['actual_duration'] = (t3_tsc - t2_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
300 iteration_data['actual_duration'] = iteration_data['actual_duration'] + time_loop_data['actual_duration']
301 delta_rx = t3_rx - t2_rx
303 delta_non_dp_rx = t3_non_dp_rx - t2_non_dp_rx
304 tot_non_dp_rx += delta_non_dp_rx
305 delta_tx = t3_tx - t2_tx
307 delta_non_dp_tx = t3_non_dp_tx - t2_non_dp_tx
308 tot_non_dp_tx += delta_non_dp_tx
309 delta_dp_tx = delta_tx -delta_non_dp_tx
310 delta_dp_rx = delta_rx -delta_non_dp_rx
311 time_loop_data['abs_dropped'] = delta_dp_tx - delta_dp_rx
312 iteration_data['abs_dropped'] += time_loop_data['abs_dropped']
313 delta_drop = t3_drop - t2_drop
314 tot_drop += delta_drop
315 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc = t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc
317 if self.sut_machine!=None:
318 t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
319 if t3_sut_tsc != t2_sut_tsc:
320 single_sut_core_measurement_duration = (t3_sut_tsc - t2_sut_tsc) * 1.0 / sut_tsc_hz # time difference between the 2 measurements, expressed in seconds.
321 tot_sut_core_measurement_duration = tot_sut_core_measurement_duration + single_sut_core_measurement_duration
322 tot_sut_rx += t3_sut_rx - t2_sut_rx
323 tot_sut_non_dp_rx += t3_sut_non_dp_rx - t2_sut_non_dp_rx
324 delta_sut_tx = t3_sut_tx - t2_sut_tx
325 tot_sut_tx += delta_sut_tx
326 delta_sut_non_dp_tx = t3_sut_non_dp_tx - t2_sut_non_dp_tx
327 tot_sut_non_dp_tx += delta_sut_non_dp_tx
328 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc = t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc
330 if self.test['test'] == 'fixed_rate':
331 if lat_avail == core_avail == True:
332 lat_avail = core_avail = False
333 time_loop_data['pps_req_tx'] = (delta_tx + delta_drop - delta_rx)/time_loop_data['actual_duration']/1000000
334 time_loop_data['pps_tx'] = delta_tx/time_loop_data['actual_duration']/1000000
335 if self.sut_machine != None and sut_avail:
336 time_loop_data['pps_sut_tx'] = delta_sut_tx/single_sut_core_measurement_duration/1000000
339 time_loop_data['pps_sut_tx'] = None
340 time_loop_data['pps_rx'] = delta_rx/time_loop_data['actual_duration']/1000000
341 time_loop_data['abs_tx'] = delta_dp_tx
342 time_loop_data['abs_rx'] = delta_dp_rx
343 time_loop_prefix = {'speed' : '',
347 'abs_drop_rate' : '',
349 RapidLog.info(self.report_result(flow_number, size, time_loop_data,
351 time_loop_data['test'] = self.test['testname']
352 time_loop_data['environment_file'] = self.test['environment_file']
353 time_loop_data['Flows'] = flow_number
354 time_loop_data['Size'] = size
355 time_loop_data['RequestedSpeed'] = RapidTest.get_pps(speed, size)
356 _ = self.post_data(time_loop_data)
357 end_bg_gen_stats = []
358 for bg_gen_machine in self.background_machines:
359 bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, bg_hz = bg_gen_machine.core_stats()
360 bg_gen_stat = {"bg_dp_rx" : bg_rx - bg_non_dp_rx,
361 "bg_dp_tx" : bg_tx - bg_non_dp_tx,
365 end_bg_gen_stats.append(dict(bg_gen_stat))
366 self.stop_background_traffic(self.background_machines)
369 while i < len(end_bg_gen_stats):
370 bg_rates.append(0.000001*(end_bg_gen_stats[i]['bg_dp_rx'] -
371 start_bg_gen_stats[i]['bg_dp_rx']) / ((end_bg_gen_stats[i]['bg_tsc'] -
372 start_bg_gen_stats[i]['bg_tsc']) * 1.0 / end_bg_gen_stats[i]['bg_hz']))
375 iteration_data['avg_bg_rate'] = sum(bg_rates) / len(bg_rates)
376 RapidLog.debug('Average Background traffic rate: {:>7.3f} Mpps'.format(iteration_data['avg_bg_rate']))
378 iteration_data['avg_bg_rate'] = None
380 self.gen_machine.stop_gen_cores()
382 self.gen_machine.stop_latency_cores()
383 iteration_data['r'] += 1
384 iteration_data['lat_avg'] = old_div(iteration_data['lat_avg'], float(tot_lat_measurement_duration))
385 iteration_data['lat_used'] = old_div(iteration_data['lat_used'], float(tot_lat_measurement_duration))
387 while t4_tsc == t2_tsc:
388 t4_rx, t4_non_dp_rx, t4_tx, t4_non_dp_tx, t4_drop, t4_tx_fail, t4_tsc, abs_tsc_hz = self.gen_machine.core_stats()
389 if self.test['test'] == 'fixed_rate':
390 iteration_data['lat_tsc'] = t2_lat_tsc
391 while iteration_data['lat_tsc'] == t2_lat_tsc:
392 iteration_data.update(self.gen_machine.lat_stats())
394 for percentile, bucket in enumerate(iteration_data['buckets'],start=1):
395 sample_count += bucket
396 if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
398 iteration_data['lat_perc_max'] = (percentile == len(iteration_data['buckets']))
399 iteration_data['lat_perc'] = percentile * iteration_data['bucket_size']
400 delta_rx = t4_rx - t2_rx
401 delta_non_dp_rx = t4_non_dp_rx - t2_non_dp_rx
402 delta_tx = t4_tx - t2_tx
403 delta_non_dp_tx = t4_non_dp_tx - t2_non_dp_tx
404 delta_dp_tx = delta_tx -delta_non_dp_tx
405 delta_dp_rx = delta_rx -delta_non_dp_rx
406 iteration_data['abs_tx'] = delta_dp_tx
407 iteration_data['abs_rx'] = delta_dp_rx
408 iteration_data['abs_dropped'] += delta_dp_tx - delta_dp_rx
409 iteration_data['pps_req_tx'] = None
410 iteration_data['pps_tx'] = None
411 iteration_data['pps_sut_tx'] = None
412 iteration_data['drop_rate'] = 100.0*(iteration_data['abs_tx']-iteration_data['abs_rx'])/iteration_data['abs_tx']
413 iteration_data['actual_duration'] = None
414 break ## Not really needed since the while loop will stop when evaluating the value of r
417 for percentile, bucket in enumerate(iteration_data['buckets'],start=1):
418 sample_count += bucket
419 if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
421 iteration_data['lat_perc_max'] = (percentile == len(iteration_data['buckets']))
422 iteration_data['lat_perc'] = percentile * iteration_data['bucket_size']
423 iteration_data['pps_req_tx'] = (tot_tx + tot_drop - tot_rx)/iteration_data['actual_duration']/1000000.0 # tot_drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
424 iteration_data['pps_tx'] = tot_tx/iteration_data['actual_duration']/1000000.0 # tot_tx is all generated packets actually accepted by the interface
425 iteration_data['pps_rx'] = tot_rx/iteration_data['actual_duration']/1000000.0 # tot_rx is all packets received by the nop task = all packets received in the gen VM
426 if self.sut_machine != None and sut_avail:
427 iteration_data['pps_sut_tx'] = tot_sut_tx / tot_sut_core_measurement_duration / 1000000.0
429 iteration_data['pps_sut_tx'] = None
430 iteration_data['abs_tx'] = (t4_tx - t1_tx) - (t4_non_dp_tx - t1_non_dp_tx)
431 iteration_data['abs_rx'] = (t4_rx - t1_rx) - (t4_non_dp_rx - t1_non_dp_rx)
432 iteration_data['abs_dropped'] = iteration_data['abs_tx'] - iteration_data['abs_rx']
433 iteration_data['drop_rate'] = 100.0*iteration_data['abs_dropped']/iteration_data['abs_tx']
434 if ((iteration_data['drop_rate'] < self.test['drop_rate_threshold']) or (iteration_data['abs_dropped'] == self.test['drop_rate_threshold'] ==0) or (iteration_data['abs_dropped'] > self.test['maxz'])):
436 self.gen_machine.stop_latency_cores()
437 iteration_data['abs_tx_fail'] = t4_tx_fail - t1_tx_fail
438 return (iteration_data)