4 ## Copyright (c) 2020 Intel Corporation
6 ## Licensed under the Apache License, Version 2.0 (the "License");
7 ## you may not use this file except in compliance with the License.
8 ## You may obtain a copy of the License at
11 ## http://www.apache.org/licenses/LICENSE-2.0
13 ## Unless required by applicable law or agreed to in writing, software
14 ## distributed under the License is distributed on an "AS IS" BASIS,
15 ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 ## See the License for the specific language governing permissions and
17 ## limitations under the License.
25 from past.utils import old_div
26 from rapid_log import RapidLog
27 from rapid_log import bcolors
29 from datetime import datetime as dt
31 _CURR_DIR = os.path.dirname(os.path.realpath(__file__))
33 class RapidTest(object):
35 Class to manage the testing
37 def __init__(self, test_param, runtime, testname, environment_file ):
38 self.test = test_param
39 self.test['runtime'] = runtime
40 self.test['testname'] = testname
41 self.test['environment_file'] = environment_file
42 if 'maxr' not in self.test.keys():
44 if 'maxz' not in self.test.keys():
45 self.test['maxz'] = inf
46 with open(os.path.join(_CURR_DIR,'format.yaml')) as f:
47 self.data_format = yaml.load(f, Loader=yaml.FullLoader)
50 def get_percentageof10Gbps(pps_speed,size):
51 # speed is given in pps, returning % of 10Gb/s
52 # 12 bytes is the inter packet gap
53 # pre-amble is 7 bytes
54 # SFD (start of frame delimiter) is 1 byte
55 # Total of 20 bytes overhead per packet
56 return (pps_speed / 1000000.0 * 0.08 * (size+20))
59 def get_pps(speed,size):
60 # speed is given in % of 10Gb/s, returning Mpps
61 # 12 bytes is the inter packet gap
62 # pre-amble is 7 bytes
63 # SFD (start of frame delimiter) is 1 byte
64 # Total of 20 bytes overhead per packet
65 return (speed * 100.0 / (8*(size+20)))
68 def get_speed(packet_speed,size):
69 # return speed in Gb/s
70 # 12 bytes is the inter packet gap
71 # pre-amble is 7 bytes
72 # SFD (start of frame delimiter) is 1 byte
73 # Total of 20 bytes overhead per packet
74 return (packet_speed / 1000.0 * (8*(size+20)))
77 def set_background_flows(background_machines, number_of_flows):
78 for machine in background_machines:
79 _ = machine.set_flows(number_of_flows)
82 def set_background_speed(background_machines, speed):
83 for machine in background_machines:
84 machine.set_generator_speed(speed)
87 def set_background_size(background_machines, imix):
88 # imixs is a list of packet sizes
89 for machine in background_machines:
90 machine.set_udp_packet_size(imix)
93 def start_background_traffic(background_machines):
94 for machine in background_machines:
98 def stop_background_traffic(background_machines):
99 for machine in background_machines:
103 def parse_data_format_dict(data_format, variables):
104 for k, v in data_format.items():
106 RapidTest.parse_data_format_dict(v, variables)
108 if v in variables.keys():
109 data_format[k] = variables[v]
111 def post_data(self, variables):
112 test_type = type(self).__name__
113 var = copy.deepcopy(self.data_format)
114 self.parse_data_format_dict(var, variables)
115 if var.keys() >= {'URL', test_type, 'Format'}:
117 for value in var['URL'].values():
119 HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'application/rapid'}
120 if var['Format'] == 'PushGateway':
121 data = "\n".join("{} {}".format(k, v) for k, v in var[test_type].items()) + "\n"
122 response = requests.post(url=URL, data=data,headers=HEADERS)
123 elif var['Format'] == 'Xtesting':
124 data = var[test_type]
125 response = requests.post(url=URL, json=data)
126 if (response.status_code >= 300):
127 RapidLog.info('Cannot send metrics to {}'.format(URL))
129 return (var[test_type])
132 def report_result(flow_number, size, data, prefix):
134 flow_number_str = '| ({:>4}) |'.format(abs(flow_number))
136 flow_number_str = '|{:>7} |'.format(flow_number)
137 if data['pps_req_tx'] is None:
138 pps_req_tx_str = '{0: >14}'.format(' NA |')
140 pps_req_tx_str = '{:>7.3f} Mpps |'.format(data['pps_req_tx'])
141 if data['pps_tx'] is None:
142 pps_tx_str = '{0: >14}'.format(' NA |')
144 pps_tx_str = '{:>7.3f} Mpps |'.format(data['pps_tx'])
145 if data['pps_sut_tx'] is None:
146 pps_sut_tx_str = '{0: >14}'.format(' NA |')
148 pps_sut_tx_str = '{:>7.3f} Mpps |'.format(data['pps_sut_tx'])
149 if data['pps_rx'] is None:
150 pps_rx_str = '{0: >25}'.format('NA |')
152 pps_rx_str = bcolors.OKBLUE + '{:>4.1f} Gb/s |{:7.3f} Mpps {}|'.format(
153 RapidTest.get_speed(data['pps_rx'],size),data['pps_rx'],bcolors.ENDC)
154 if data['abs_dropped'] is None:
155 tot_drop_str = ' | NA | '
157 tot_drop_str = ' | {:>9.0f} | '.format(data['abs_dropped'])
158 if data['lat_perc'] is None:
159 lat_perc_str = '|{:^10.10}|'.format('NA')
160 elif data['lat_perc_max'] == True:
161 lat_perc_str = '|>{}{:>5.0f} us{} |'.format(prefix['lat_perc'],
162 float(data['lat_perc']), bcolors.ENDC)
164 lat_perc_str = '| {}{:>5.0f} us{} |'.format(prefix['lat_perc'],
165 float(data['lat_perc']), bcolors.ENDC)
166 if data['actual_duration'] is None:
167 elapsed_time_str = ' NA |'
169 elapsed_time_str = '{:>3.0f} |'.format(data['actual_duration'])
170 if data['mis_ordered'] is None:
171 mis_ordered_str = ' NA '
173 mis_ordered_str = '{:>9.0f} '.format(data['mis_ordered'])
174 return(flow_number_str + '{:>5.1f}'.format(data['speed']) + '% ' + prefix['speed']
175 + '{:>6.3f}'.format(RapidTest.get_pps(data['speed'],size)) + ' Mpps|' +
176 pps_req_tx_str + pps_tx_str + bcolors.ENDC + pps_sut_tx_str +
177 pps_rx_str + prefix['lat_avg'] + ' {:>6.0f}'.format(data['lat_avg']) +
178 ' us' + lat_perc_str +prefix['lat_max']+'{:>6.0f}'.format(data['lat_max'])
179 + ' us | ' + '{:>9.0f}'.format(data['abs_tx']) + ' | {:>9.0f}'.format(data['abs_rx']) +
180 ' | '+ prefix['abs_drop_rate']+ '{:>9.0f}'.format(data['abs_tx']-data['abs_rx']) +
181 tot_drop_str + prefix['drop_rate'] +
182 '{:>5.2f}'.format(100*old_div(float(data['abs_tx']-data['abs_rx']),data['abs_tx'])) + ' |' +
183 prefix['mis_ordered'] + mis_ordered_str + bcolors.ENDC +
184 ' |' + elapsed_time_str)
186 def run_iteration(self, requested_duration, flow_number, size, speed):
187 BUCKET_SIZE_EXP = self.gen_machine.bucket_size_exp
188 sleep_time = self.test['sleep_time']
189 LAT_PERCENTILE = self.test['lat_percentile']
192 iteration_data['r'] = 0;
194 while (iteration_data['r'] < self.test['maxr']):
195 self.gen_machine.start_latency_cores()
196 time.sleep(sleep_time)
197 # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
198 t1_rx, t1_non_dp_rx, t1_tx, t1_non_dp_tx, t1_drop, t1_tx_fail, t1_tsc, abs_tsc_hz = self.gen_machine.core_stats()
199 t1_dp_rx = t1_rx - t1_non_dp_rx
200 t1_dp_tx = t1_tx - t1_non_dp_tx
201 self.gen_machine.set_generator_speed(0)
202 self.gen_machine.start_gen_cores()
203 self.set_background_speed(self.background_machines, 0)
204 self.start_background_traffic(self.background_machines)
205 if 'ramp_step' in self.test.keys():
206 ramp_speed = self.test['ramp_step']
209 while ramp_speed < speed:
210 self.gen_machine.set_generator_speed(ramp_speed)
211 self.set_background_speed(self.background_machines, ramp_speed)
213 ramp_speed = ramp_speed + self.test['ramp_step']
214 self.gen_machine.set_generator_speed(speed)
215 self.set_background_speed(self.background_machines, speed)
216 iteration_data['speed'] = speed
217 time_loop_data['speed'] = speed
218 time.sleep(2) ## Needs to be 2 seconds since this 1 sec is the time that PROX uses to refresh the stats. Note that this can be changed in PROX!! Don't do it.
219 start_bg_gen_stats = []
220 for bg_gen_machine in self.background_machines:
221 bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, _ = bg_gen_machine.core_stats()
223 "bg_dp_rx" : bg_rx - bg_non_dp_rx,
224 "bg_dp_tx" : bg_tx - bg_non_dp_tx,
227 start_bg_gen_stats.append(dict(bg_gen_stat))
228 if self.sut_machine!= None:
229 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
230 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc, tsc_hz = self.gen_machine.core_stats()
232 iteration_data['abs_tx'] = tx - (t2_non_dp_tx - t1_non_dp_tx )
233 iteration_data['abs_rx'] = t2_rx - t1_rx - (t2_non_dp_rx - t1_non_dp_rx)
234 iteration_data['abs_dropped'] = iteration_data['abs_tx'] - iteration_data['abs_rx']
236 RapidLog.critical("TX = 0. Test interrupted since no packet has been sent.")
237 if iteration_data['abs_tx'] == 0:
238 RapidLog.critical("Only non-dataplane packets (e.g. ARP) sent. Test interrupted since no packet has been sent.")
239 # Ask PROX to calibrate the bucket size once we have a PROX function to do this.
240 # Measure latency statistics per second
241 iteration_data.update(self.gen_machine.lat_stats())
242 t2_lat_tsc = iteration_data['lat_tsc']
244 for sample_percentile, bucket in enumerate(iteration_data['buckets'],start=1):
245 sample_count += bucket
246 if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
248 iteration_data['lat_perc_max'] = (sample_percentile == len(iteration_data['buckets']))
249 iteration_data['bucket_size'] = float(2 ** BUCKET_SIZE_EXP) / (old_div(float(iteration_data['lat_hz']),float(10**6)))
250 time_loop_data['bucket_size'] = iteration_data['bucket_size']
251 iteration_data['lat_perc'] = sample_percentile * iteration_data['bucket_size']
252 if self.test['test'] == 'fixed_rate':
253 iteration_data['pps_req_tx'] = None
254 iteration_data['pps_tx'] = None
255 iteration_data['pps_sut_tx'] = None
256 iteration_data['pps_rx'] = None
257 iteration_data['lat_perc'] = None
258 iteration_data['actual_duration'] = None
259 iteration_prefix = {'speed' : '',
263 'abs_drop_rate' : '',
266 RapidLog.info(self.report_result(flow_number, size,
267 iteration_data, iteration_prefix ))
268 tot_rx = tot_non_dp_rx = tot_tx = tot_non_dp_tx = tot_drop = 0
269 iteration_data['lat_avg'] = iteration_data['lat_used'] = 0
270 tot_lat_measurement_duration = float(0)
271 iteration_data['actual_duration'] = float(0)
272 tot_sut_core_measurement_duration = float(0)
273 tot_sut_rx = tot_sut_non_dp_rx = tot_sut_tx = tot_sut_non_dp_tx = tot_sut_drop = tot_sut_tx_fail = tot_sut_tsc = 0
274 lat_avail = core_avail = sut_avail = False
275 while (iteration_data['actual_duration'] - float(requested_duration) <= 0.1) or (tot_lat_measurement_duration - float(requested_duration) <= 0.1):
277 time_loop_data.update(self.gen_machine.lat_stats())
278 # Get statistics after some execution time
279 if time_loop_data['lat_tsc'] != t2_lat_tsc:
280 single_lat_measurement_duration = (time_loop_data['lat_tsc'] - t2_lat_tsc) * 1.0 / time_loop_data['lat_hz'] # time difference between the 2 measurements, expressed in seconds.
281 # A second has passed in between to lat_stats requests. Hence we need to process the results
282 tot_lat_measurement_duration = tot_lat_measurement_duration + single_lat_measurement_duration
283 if iteration_data['lat_min'] > time_loop_data['lat_min']:
284 iteration_data['lat_min'] = time_loop_data['lat_min']
285 if iteration_data['lat_max'] < time_loop_data['lat_max']:
286 iteration_data['lat_max'] = time_loop_data['lat_max']
287 iteration_data['lat_avg'] = iteration_data['lat_avg'] + time_loop_data['lat_avg'] * single_lat_measurement_duration # Sometimes, There is more than 1 second between 2 lat_stats. Hence we will take the latest measurement
288 iteration_data['lat_used'] = iteration_data['lat_used'] + time_loop_data['lat_used'] * single_lat_measurement_duration # and give it more weigth.
290 for sample_percentile, bucket in enumerate(time_loop_data['buckets'],start=1):
291 sample_count += bucket
292 if sample_count > sum(time_loop_data['buckets']) * LAT_PERCENTILE:
294 time_loop_data['lat_perc_max'] = (sample_percentile == len(time_loop_data['buckets']))
295 time_loop_data['lat_perc'] = sample_percentile * iteration_data['bucket_size']
296 iteration_data['buckets'] = [iteration_data['buckets'][i] + time_loop_data['buckets'][i] for i in range(len(iteration_data['buckets']))]
297 t2_lat_tsc = time_loop_data['lat_tsc']
299 t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc, tsc_hz = self.gen_machine.core_stats()
301 time_loop_data['actual_duration'] = (t3_tsc - t2_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
302 iteration_data['actual_duration'] = iteration_data['actual_duration'] + time_loop_data['actual_duration']
303 delta_rx = t3_rx - t2_rx
305 delta_non_dp_rx = t3_non_dp_rx - t2_non_dp_rx
306 tot_non_dp_rx += delta_non_dp_rx
307 delta_tx = t3_tx - t2_tx
309 delta_non_dp_tx = t3_non_dp_tx - t2_non_dp_tx
310 tot_non_dp_tx += delta_non_dp_tx
311 delta_dp_tx = delta_tx -delta_non_dp_tx
312 delta_dp_rx = delta_rx -delta_non_dp_rx
313 time_loop_data['abs_dropped'] = delta_dp_tx - delta_dp_rx
314 iteration_data['abs_dropped'] += time_loop_data['abs_dropped']
315 delta_drop = t3_drop - t2_drop
316 tot_drop += delta_drop
317 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc = t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc
319 if self.sut_machine!=None:
320 t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
321 if t3_sut_tsc != t2_sut_tsc:
322 single_sut_core_measurement_duration = (t3_sut_tsc - t2_sut_tsc) * 1.0 / sut_tsc_hz # time difference between the 2 measurements, expressed in seconds.
323 tot_sut_core_measurement_duration = tot_sut_core_measurement_duration + single_sut_core_measurement_duration
324 tot_sut_rx += t3_sut_rx - t2_sut_rx
325 tot_sut_non_dp_rx += t3_sut_non_dp_rx - t2_sut_non_dp_rx
326 delta_sut_tx = t3_sut_tx - t2_sut_tx
327 tot_sut_tx += delta_sut_tx
328 delta_sut_non_dp_tx = t3_sut_non_dp_tx - t2_sut_non_dp_tx
329 tot_sut_non_dp_tx += delta_sut_non_dp_tx
330 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc = t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc
332 if self.test['test'] == 'fixed_rate':
333 if lat_avail == core_avail == True:
334 lat_avail = core_avail = False
335 time_loop_data['pps_req_tx'] = (delta_tx + delta_drop - delta_rx)/time_loop_data['actual_duration']/1000000
336 time_loop_data['pps_tx'] = delta_tx/time_loop_data['actual_duration']/1000000
337 if self.sut_machine != None and sut_avail:
338 time_loop_data['pps_sut_tx'] = delta_sut_tx/single_sut_core_measurement_duration/1000000
341 time_loop_data['pps_sut_tx'] = None
342 time_loop_data['pps_rx'] = delta_rx/time_loop_data['actual_duration']/1000000
343 time_loop_data['abs_tx'] = delta_dp_tx
344 time_loop_data['abs_rx'] = delta_dp_rx
345 time_loop_prefix = {'speed' : '',
349 'abs_drop_rate' : '',
352 RapidLog.info(self.report_result(flow_number, size, time_loop_data,
354 time_loop_data['test'] = self.test['testname']
355 time_loop_data['environment_file'] = self.test['environment_file']
356 time_loop_data['Flows'] = flow_number
357 time_loop_data['Size'] = size
358 time_loop_data['RequestedSpeed'] = RapidTest.get_pps(speed, size)
359 _ = self.post_data(time_loop_data)
360 end_bg_gen_stats = []
361 for bg_gen_machine in self.background_machines:
362 bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, bg_hz = bg_gen_machine.core_stats()
363 bg_gen_stat = {"bg_dp_rx" : bg_rx - bg_non_dp_rx,
364 "bg_dp_tx" : bg_tx - bg_non_dp_tx,
368 end_bg_gen_stats.append(dict(bg_gen_stat))
369 self.stop_background_traffic(self.background_machines)
372 while i < len(end_bg_gen_stats):
373 bg_rates.append(0.000001*(end_bg_gen_stats[i]['bg_dp_rx'] -
374 start_bg_gen_stats[i]['bg_dp_rx']) / ((end_bg_gen_stats[i]['bg_tsc'] -
375 start_bg_gen_stats[i]['bg_tsc']) * 1.0 / end_bg_gen_stats[i]['bg_hz']))
378 iteration_data['avg_bg_rate'] = sum(bg_rates) / len(bg_rates)
379 RapidLog.debug('Average Background traffic rate: {:>7.3f} Mpps'.format(iteration_data['avg_bg_rate']))
381 iteration_data['avg_bg_rate'] = None
383 self.gen_machine.stop_gen_cores()
385 self.gen_machine.stop_latency_cores()
386 iteration_data['r'] += 1
387 iteration_data['lat_avg'] = old_div(iteration_data['lat_avg'], float(tot_lat_measurement_duration))
388 iteration_data['lat_used'] = old_div(iteration_data['lat_used'], float(tot_lat_measurement_duration))
390 while t4_tsc == t2_tsc:
391 t4_rx, t4_non_dp_rx, t4_tx, t4_non_dp_tx, t4_drop, t4_tx_fail, t4_tsc, abs_tsc_hz = self.gen_machine.core_stats()
392 if self.test['test'] == 'fixed_rate':
393 iteration_data['lat_tsc'] = t2_lat_tsc
394 while iteration_data['lat_tsc'] == t2_lat_tsc:
395 iteration_data.update(self.gen_machine.lat_stats())
397 for percentile, bucket in enumerate(iteration_data['buckets'],start=1):
398 sample_count += bucket
399 if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
401 iteration_data['lat_perc_max'] = (percentile == len(iteration_data['buckets']))
402 iteration_data['lat_perc'] = percentile * iteration_data['bucket_size']
403 delta_rx = t4_rx - t2_rx
404 delta_non_dp_rx = t4_non_dp_rx - t2_non_dp_rx
405 delta_tx = t4_tx - t2_tx
406 delta_non_dp_tx = t4_non_dp_tx - t2_non_dp_tx
407 delta_dp_tx = delta_tx -delta_non_dp_tx
408 delta_dp_rx = delta_rx -delta_non_dp_rx
409 iteration_data['abs_tx'] = delta_dp_tx
410 iteration_data['abs_rx'] = delta_dp_rx
411 iteration_data['abs_dropped'] += delta_dp_tx - delta_dp_rx
412 iteration_data['pps_req_tx'] = None
413 iteration_data['pps_tx'] = None
414 iteration_data['pps_sut_tx'] = None
415 iteration_data['drop_rate'] = 100.0*(iteration_data['abs_tx']-iteration_data['abs_rx'])/iteration_data['abs_tx']
416 iteration_data['actual_duration'] = None
417 break ## Not really needed since the while loop will stop when evaluating the value of r
420 for percentile, bucket in enumerate(iteration_data['buckets'],start=1):
421 sample_count += bucket
422 if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
424 iteration_data['lat_perc_max'] = (percentile == len(iteration_data['buckets']))
425 iteration_data['lat_perc'] = percentile * iteration_data['bucket_size']
426 iteration_data['pps_req_tx'] = (tot_tx + tot_drop - tot_rx)/iteration_data['actual_duration']/1000000.0 # tot_drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
427 iteration_data['pps_tx'] = tot_tx/iteration_data['actual_duration']/1000000.0 # tot_tx is all generated packets actually accepted by the interface
428 iteration_data['pps_rx'] = tot_rx/iteration_data['actual_duration']/1000000.0 # tot_rx is all packets received by the nop task = all packets received in the gen VM
429 if self.sut_machine != None and sut_avail:
430 iteration_data['pps_sut_tx'] = tot_sut_tx / tot_sut_core_measurement_duration / 1000000.0
432 iteration_data['pps_sut_tx'] = None
433 iteration_data['abs_tx'] = (t4_tx - t1_tx) - (t4_non_dp_tx - t1_non_dp_tx)
434 iteration_data['abs_rx'] = (t4_rx - t1_rx) - (t4_non_dp_rx - t1_non_dp_rx)
435 iteration_data['abs_dropped'] = iteration_data['abs_tx'] - iteration_data['abs_rx']
436 iteration_data['drop_rate'] = 100.0*iteration_data['abs_dropped']/iteration_data['abs_tx']
437 if ((iteration_data['drop_rate'] < self.test['drop_rate_threshold']) or (iteration_data['abs_dropped'] == self.test['drop_rate_threshold'] ==0) or (iteration_data['abs_dropped'] > self.test['maxz'])):
439 self.gen_machine.stop_latency_cores()
440 iteration_data['abs_tx_fail'] = t4_tx_fail - t1_tx_fail
441 return (iteration_data)