4 ## Copyright (c) 2020 Intel Corporation
6 ## Licensed under the Apache License, Version 2.0 (the "License");
7 ## you may not use this file except in compliance with the License.
8 ## You may obtain a copy of the License at
11 ## http://www.apache.org/licenses/LICENSE-2.0
13 ## Unless required by applicable law or agreed to in writing, software
14 ## distributed under the License is distributed on an "AS IS" BASIS,
15 ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 ## See the License for the specific language governing permissions and
17 ## limitations under the License.
25 from past.utils import old_div
26 from rapid_log import RapidLog
27 from rapid_log import bcolors
29 from datetime import datetime as dt
31 _CURR_DIR = os.path.dirname(os.path.realpath(__file__))
33 class RapidTest(object):
35 Class to manage the testing
37 def __init__(self, test_param, runtime, testname, environment_file ):
38 self.test = test_param
39 self.test['runtime'] = runtime
40 self.test['testname'] = testname
41 self.test['environment_file'] = environment_file
42 if 'maxr' not in self.test.keys():
44 if 'maxz' not in self.test.keys():
45 self.test['maxz'] = inf
46 with open(os.path.join(_CURR_DIR,'format.yaml')) as f:
47 self.data_format = yaml.load(f, Loader=yaml.FullLoader)
50 def get_percentageof10Gbps(pps_speed,size):
51 # speed is given in pps, returning % of 10Gb/s
52 # 12 bytes is the inter packet gap
53 # pre-amble is 7 bytes
54 # SFD (start of frame delimiter) is 1 byte
55 # Total of 20 bytes overhead per packet
56 return (pps_speed / 1000000.0 * 0.08 * (size+20))
59 def get_pps(speed,size):
60 # speed is given in % of 10Gb/s, returning Mpps
61 # 12 bytes is the inter packet gap
62 # pre-amble is 7 bytes
63 # SFD (start of frame delimiter) is 1 byte
64 # Total of 20 bytes overhead per packet
65 return (speed * 100.0 / (8*(size+20)))
68 def get_speed(packet_speed,size):
69 # return speed in Gb/s
70 # 12 bytes is the inter packet gap
71 # pre-amble is 7 bytes
72 # SFD (start of frame delimiter) is 1 byte
73 # Total of 20 bytes overhead per packet
74 return (packet_speed / 1000.0 * (8*(size+20)))
77 def set_background_flows(background_machines, number_of_flows):
78 for machine in background_machines:
79 _ = machine.set_flows(number_of_flows)
82 def set_background_speed(background_machines, speed):
83 for machine in background_machines:
84 machine.set_generator_speed(speed)
87 def set_background_size(background_machines, imix):
88 # imixs is a list of packet sizes
89 for machine in background_machines:
90 machine.set_udp_packet_size(imix)
93 def start_background_traffic(background_machines):
94 for machine in background_machines:
98 def stop_background_traffic(background_machines):
99 for machine in background_machines:
103 def parse_data_format_dict(data_format, variables):
104 for k, v in data_format.items():
106 RapidTest.parse_data_format_dict(v, variables)
108 if v in variables.keys():
109 data_format[k] = variables[v]
111 def post_data(self, variables):
112 test_type = type(self).__name__
113 var = copy.deepcopy(self.data_format)
114 self.parse_data_format_dict(var, variables)
115 if var.keys() >= {'URL', test_type, 'Format'}:
117 for value in var['URL'].values():
119 HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'application/rapid'}
120 if var['Format'] == 'PushGateway':
121 data = "\n".join("{} {}".format(k, v) for k, v in var[test_type].items()) + "\n"
122 response = requests.post(url=URL, data=data,headers=HEADERS)
123 elif var['Format'] == 'Xtesting':
124 data = var[test_type]
125 response = requests.post(url=URL, json=data)
126 if (response.status_code >= 300):
127 RapidLog.info('Cannot send metrics to {}'.format(URL))
129 return (var[test_type])
132 def report_result(flow_number, size, data, prefix):
134 flow_number_str = '| ({:>4}) |'.format(abs(flow_number))
136 flow_number_str = '|{:>7} |'.format(flow_number)
137 if data['pps_req_tx'] is None:
138 pps_req_tx_str = '{0: >14}'.format(' NA |')
140 pps_req_tx_str = '{:>7.3f} Mpps |'.format(data['pps_req_tx'])
141 if data['pps_tx'] is None:
142 pps_tx_str = '{0: >14}'.format(' NA |')
144 pps_tx_str = '{:>7.3f} Mpps |'.format(data['pps_tx'])
145 if data['pps_sut_tx'] is None:
146 pps_sut_tx_str = '{0: >14}'.format(' NA |')
148 pps_sut_tx_str = '{:>7.3f} Mpps |'.format(data['pps_sut_tx'])
149 if data['pps_rx'] is None:
150 pps_rx_str = '{0: >25}'.format('NA |')
152 pps_rx_str = bcolors.OKBLUE + '{:>4.1f} Gb/s |{:7.3f} Mpps {}|'.format(
153 RapidTest.get_speed(data['pps_rx'],size),data['pps_rx'],bcolors.ENDC)
154 if data['abs_dropped'] is None:
155 tot_drop_str = ' | NA | '
157 tot_drop_str = ' | {:>9.0f} | '.format(data['abs_dropped'])
158 if data['lat_perc'] is None:
159 lat_perc_str = '|{:^10.10}|'.format('NA')
160 elif data['lat_perc_max'] == True:
161 lat_perc_str = '|>{}{:>5.0f} us{} |'.format(prefix['lat_perc'],
162 float(data['lat_perc']), bcolors.ENDC)
164 lat_perc_str = '| {}{:>5.0f} us{} |'.format(prefix['lat_perc'],
165 float(data['lat_perc']), bcolors.ENDC)
166 if data['actual_duration'] is None:
167 elapsed_time_str = ' NA |'
169 elapsed_time_str = '{:>3.0f} |'.format(data['actual_duration'])
170 return(flow_number_str + '{:>5.1f}'.format(data['speed']) + '% ' + prefix['speed']
171 + '{:>6.3f}'.format(RapidTest.get_pps(data['speed'],size)) + ' Mpps|' +
172 pps_req_tx_str + pps_tx_str + bcolors.ENDC + pps_sut_tx_str +
173 pps_rx_str + prefix['lat_avg'] + ' {:>6.0f}'.format(data['lat_avg']) +
174 ' us' + lat_perc_str +prefix['lat_max']+'{:>6.0f}'.format(data['lat_max'])
175 + ' us | ' + '{:>9.0f}'.format(data['abs_tx']) + ' | {:>9.0f}'.format(data['abs_rx']) +
176 ' | '+ prefix['abs_drop_rate']+ '{:>9.0f}'.format(data['abs_tx']-data['abs_rx']) +
177 tot_drop_str + prefix['drop_rate'] +
178 '{:>5.2f}'.format(100*old_div(float(data['abs_tx']-data['abs_rx']),data['abs_tx'])) + bcolors.ENDC +
179 ' |' + elapsed_time_str)
181 def run_iteration(self, requested_duration, flow_number, size, speed):
182 BUCKET_SIZE_EXP = self.gen_machine.bucket_size_exp
183 LAT_PERCENTILE = self.test['lat_percentile']
186 iteration_data['r'] = 0;
188 while (iteration_data['r'] < self.test['maxr']):
189 self.gen_machine.start_latency_cores()
190 time.sleep(sleep_time)
191 # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
192 t1_rx, t1_non_dp_rx, t1_tx, t1_non_dp_tx, t1_drop, t1_tx_fail, t1_tsc, abs_tsc_hz = self.gen_machine.core_stats()
193 t1_dp_rx = t1_rx - t1_non_dp_rx
194 t1_dp_tx = t1_tx - t1_non_dp_tx
195 self.gen_machine.set_generator_speed(0)
196 self.gen_machine.start_gen_cores()
197 self.set_background_speed(self.background_machines, 0)
198 self.start_background_traffic(self.background_machines)
199 if 'ramp_step' in self.test.keys():
200 ramp_speed = self.test['ramp_step']
203 while ramp_speed < speed:
204 self.gen_machine.set_generator_speed(ramp_speed)
205 self.set_background_speed(self.background_machines, ramp_speed)
207 ramp_speed = ramp_speed + self.test['ramp_step']
208 self.gen_machine.set_generator_speed(speed)
209 self.set_background_speed(self.background_machines, speed)
210 iteration_data['speed'] = speed
211 time_loop_data['speed'] = speed
212 time.sleep(2) ## Needs to be 2 seconds since this 1 sec is the time that PROX uses to refresh the stats. Note that this can be changed in PROX!! Don't do it.
213 start_bg_gen_stats = []
214 for bg_gen_machine in self.background_machines:
215 bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, _ = bg_gen_machine.core_stats()
217 "bg_dp_rx" : bg_rx - bg_non_dp_rx,
218 "bg_dp_tx" : bg_tx - bg_non_dp_tx,
221 start_bg_gen_stats.append(dict(bg_gen_stat))
222 if self.sut_machine!= None:
223 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
224 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc, tsc_hz = self.gen_machine.core_stats()
226 iteration_data['abs_tx'] = tx - (t2_non_dp_tx - t1_non_dp_tx )
227 iteration_data['abs_rx'] = t2_rx - t1_rx - (t2_non_dp_rx - t1_non_dp_rx)
228 iteration_data['abs_dropped'] = iteration_data['abs_tx'] - iteration_data['abs_rx']
230 RapidLog.critical("TX = 0. Test interrupted since no packet has been sent.")
231 if iteration_data['abs_tx'] == 0:
232 RapidLog.critical("Only non-dataplane packets (e.g. ARP) sent. Test interrupted since no packet has been sent.")
233 # Ask PROX to calibrate the bucket size once we have a PROX function to do this.
234 # Measure latency statistics per second
235 iteration_data.update(self.gen_machine.lat_stats())
236 t2_lat_tsc = iteration_data['lat_tsc']
238 for sample_percentile, bucket in enumerate(iteration_data['buckets'],start=1):
239 sample_count += bucket
240 if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
242 iteration_data['lat_perc_max'] = (sample_percentile == len(iteration_data['buckets']))
243 iteration_data['bucket_size'] = float(2 ** BUCKET_SIZE_EXP) / (old_div(float(iteration_data['lat_hz']),float(10**6)))
244 time_loop_data['bucket_size'] = iteration_data['bucket_size']
245 iteration_data['lat_perc'] = sample_percentile * iteration_data['bucket_size']
246 if self.test['test'] == 'fixed_rate':
247 iteration_data['pps_req_tx'] = None
248 iteration_data['pps_tx'] = None
249 iteration_data['pps_sut_tx'] = None
250 iteration_data['pps_rx'] = None
251 iteration_data['lat_perc'] = None
252 iteration_data['actual_duration'] = None
253 iteration_prefix = {'speed' : '',
257 'abs_drop_rate' : '',
259 RapidLog.info(self.report_result(flow_number, size,
260 iteration_data, iteration_prefix ))
261 tot_rx = tot_non_dp_rx = tot_tx = tot_non_dp_tx = tot_drop = 0
262 iteration_data['lat_avg'] = iteration_data['lat_used'] = 0
263 tot_lat_measurement_duration = float(0)
264 iteration_data['actual_duration'] = float(0)
265 tot_sut_core_measurement_duration = float(0)
266 tot_sut_rx = tot_sut_non_dp_rx = tot_sut_tx = tot_sut_non_dp_tx = tot_sut_drop = tot_sut_tx_fail = tot_sut_tsc = 0
267 lat_avail = core_avail = sut_avail = False
268 while (iteration_data['actual_duration'] - float(requested_duration) <= 0.1) or (tot_lat_measurement_duration - float(requested_duration) <= 0.1):
270 time_loop_data.update(self.gen_machine.lat_stats())
271 # Get statistics after some execution time
272 if time_loop_data['lat_tsc'] != t2_lat_tsc:
273 single_lat_measurement_duration = (time_loop_data['lat_tsc'] - t2_lat_tsc) * 1.0 / time_loop_data['lat_hz'] # time difference between the 2 measurements, expressed in seconds.
274 # A second has passed in between to lat_stats requests. Hence we need to process the results
275 tot_lat_measurement_duration = tot_lat_measurement_duration + single_lat_measurement_duration
276 if iteration_data['lat_min'] > time_loop_data['lat_min']:
277 iteration_data['lat_min'] = time_loop_data['lat_min']
278 if iteration_data['lat_max'] < time_loop_data['lat_max']:
279 iteration_data['lat_max'] = time_loop_data['lat_max']
280 iteration_data['lat_avg'] = iteration_data['lat_avg'] + time_loop_data['lat_avg'] * single_lat_measurement_duration # Sometimes, There is more than 1 second between 2 lat_stats. Hence we will take the latest measurement
281 iteration_data['lat_used'] = iteration_data['lat_used'] + time_loop_data['lat_used'] * single_lat_measurement_duration # and give it more weigth.
283 for sample_percentile, bucket in enumerate(time_loop_data['buckets'],start=1):
284 sample_count += bucket
285 if sample_count > sum(time_loop_data['buckets']) * LAT_PERCENTILE:
287 time_loop_data['lat_perc_max'] = (sample_percentile == len(time_loop_data['buckets']))
288 time_loop_data['lat_perc'] = sample_percentile * iteration_data['bucket_size']
289 iteration_data['buckets'] = [iteration_data['buckets'][i] + time_loop_data['buckets'][i] for i in range(len(iteration_data['buckets']))]
290 t2_lat_tsc = time_loop_data['lat_tsc']
292 t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc, tsc_hz = self.gen_machine.core_stats()
294 time_loop_data['actual_duration'] = (t3_tsc - t2_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
295 iteration_data['actual_duration'] = iteration_data['actual_duration'] + time_loop_data['actual_duration']
296 delta_rx = t3_rx - t2_rx
298 delta_non_dp_rx = t3_non_dp_rx - t2_non_dp_rx
299 tot_non_dp_rx += delta_non_dp_rx
300 delta_tx = t3_tx - t2_tx
302 delta_non_dp_tx = t3_non_dp_tx - t2_non_dp_tx
303 tot_non_dp_tx += delta_non_dp_tx
304 delta_dp_tx = delta_tx -delta_non_dp_tx
305 delta_dp_rx = delta_rx -delta_non_dp_rx
306 time_loop_data['abs_dropped'] = delta_dp_tx - delta_dp_rx
307 iteration_data['abs_dropped'] += time_loop_data['abs_dropped']
308 delta_drop = t3_drop - t2_drop
309 tot_drop += delta_drop
310 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc = t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc
312 if self.sut_machine!=None:
313 t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
314 if t3_sut_tsc != t2_sut_tsc:
315 single_sut_core_measurement_duration = (t3_sut_tsc - t2_sut_tsc) * 1.0 / sut_tsc_hz # time difference between the 2 measurements, expressed in seconds.
316 tot_sut_core_measurement_duration = tot_sut_core_measurement_duration + single_sut_core_measurement_duration
317 tot_sut_rx += t3_sut_rx - t2_sut_rx
318 tot_sut_non_dp_rx += t3_sut_non_dp_rx - t2_sut_non_dp_rx
319 delta_sut_tx = t3_sut_tx - t2_sut_tx
320 tot_sut_tx += delta_sut_tx
321 delta_sut_non_dp_tx = t3_sut_non_dp_tx - t2_sut_non_dp_tx
322 tot_sut_non_dp_tx += delta_sut_non_dp_tx
323 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc = t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc
325 if self.test['test'] == 'fixed_rate':
326 if lat_avail == core_avail == True:
327 lat_avail = core_avail = False
328 time_loop_data['pps_req_tx'] = (delta_tx + delta_drop - delta_rx)/time_loop_data['actual_duration']/1000000
329 time_loop_data['pps_tx'] = delta_tx/time_loop_data['actual_duration']/1000000
330 if self.sut_machine != None and sut_avail:
331 time_loop_data['pps_sut_tx'] = delta_sut_tx/single_sut_core_measurement_duration/1000000
334 time_loop_data['pps_sut_tx'] = None
335 time_loop_data['pps_rx'] = delta_rx/time_loop_data['actual_duration']/1000000
336 time_loop_data['abs_tx'] = delta_dp_tx
337 time_loop_data['abs_rx'] = delta_dp_rx
338 time_loop_prefix = {'speed' : '',
342 'abs_drop_rate' : '',
344 RapidLog.info(self.report_result(flow_number, size, time_loop_data,
346 time_loop_data['test'] = self.test['testname']
347 time_loop_data['environment_file'] = self.test['environment_file']
348 time_loop_data['Flows'] = flow_number
349 time_loop_data['Size'] = size
350 time_loop_data['RequestedSpeed'] = RapidTest.get_pps(speed, size)
351 _ = self.post_data(time_loop_data)
352 end_bg_gen_stats = []
353 for bg_gen_machine in self.background_machines:
354 bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, bg_hz = bg_gen_machine.core_stats()
355 bg_gen_stat = {"bg_dp_rx" : bg_rx - bg_non_dp_rx,
356 "bg_dp_tx" : bg_tx - bg_non_dp_tx,
360 end_bg_gen_stats.append(dict(bg_gen_stat))
361 self.stop_background_traffic(self.background_machines)
364 while i < len(end_bg_gen_stats):
365 bg_rates.append(0.000001*(end_bg_gen_stats[i]['bg_dp_rx'] -
366 start_bg_gen_stats[i]['bg_dp_rx']) / ((end_bg_gen_stats[i]['bg_tsc'] -
367 start_bg_gen_stats[i]['bg_tsc']) * 1.0 / end_bg_gen_stats[i]['bg_hz']))
370 iteration_data['avg_bg_rate'] = sum(bg_rates) / len(bg_rates)
371 RapidLog.debug('Average Background traffic rate: {:>7.3f} Mpps'.format(iteration_data['avg_bg_rate']))
373 iteration_data['avg_bg_rate'] = None
375 self.gen_machine.stop_gen_cores()
376 self.gen_machine.stop_latency_cores()
377 iteration_data['r'] += 1
378 iteration_data['lat_avg'] = old_div(iteration_data['lat_avg'], float(tot_lat_measurement_duration))
379 iteration_data['lat_used'] = old_div(iteration_data['lat_used'], float(tot_lat_measurement_duration))
381 while t4_tsc == t2_tsc:
382 t4_rx, t4_non_dp_rx, t4_tx, t4_non_dp_tx, t4_drop, t4_tx_fail, t4_tsc, abs_tsc_hz = self.gen_machine.core_stats()
383 if self.test['test'] == 'fixed_rate':
384 iteration_data['lat_tsc'] = t2_lat_tsc
385 while iteration_data['lat_tsc'] == t2_lat_tsc:
386 iteration_data.update(self.gen_machine.lat_stats())
388 for percentile, bucket in enumerate(iteration_data['buckets'],start=1):
389 sample_count += bucket
390 if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
392 iteration_data['lat_perc_max'] = (percentile == len(iteration_data['buckets']))
393 iteration_data['lat_perc'] = percentile * iteration_data['bucket_size']
394 delta_rx = t4_rx - t2_rx
395 delta_non_dp_rx = t4_non_dp_rx - t2_non_dp_rx
396 delta_tx = t4_tx - t2_tx
397 delta_non_dp_tx = t4_non_dp_tx - t2_non_dp_tx
398 delta_dp_tx = delta_tx -delta_non_dp_tx
399 delta_dp_rx = delta_rx -delta_non_dp_rx
400 iteration_data['abs_tx'] = delta_dp_tx
401 iteration_data['abs_rx'] = delta_dp_rx
402 iteration_data['abs_dropped'] += delta_dp_tx - delta_dp_rx
403 iteration_data['pps_req_tx'] = None
404 iteration_data['pps_tx'] = None
405 iteration_data['pps_sut_tx'] = None
406 iteration_data['drop_rate'] = 100.0*(iteration_data['abs_tx']-iteration_data['abs_rx'])/iteration_data['abs_tx']
407 iteration_data['actual_duration'] = None
408 break ## Not really needed since the while loop will stop when evaluating the value of r
411 for percentile, bucket in enumerate(iteration_data['buckets'],start=1):
412 sample_count += bucket
413 if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
415 iteration_data['lat_perc_max'] = (percentile == len(iteration_data['buckets']))
416 iteration_data['lat_perc'] = percentile * iteration_data['bucket_size']
417 iteration_data['pps_req_tx'] = (tot_tx + tot_drop - tot_rx)/iteration_data['actual_duration']/1000000.0 # tot_drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
418 iteration_data['pps_tx'] = tot_tx/iteration_data['actual_duration']/1000000.0 # tot_tx is all generated packets actually accepted by the interface
419 iteration_data['pps_rx'] = tot_rx/iteration_data['actual_duration']/1000000.0 # tot_rx is all packets received by the nop task = all packets received in the gen VM
420 if self.sut_machine != None and sut_avail:
421 iteration_data['pps_sut_tx'] = tot_sut_tx / tot_sut_core_measurement_duration / 1000000.0
423 iteration_data['pps_sut_tx'] = None
424 iteration_data['abs_tx'] = (t4_tx - t1_tx) - (t4_non_dp_tx - t1_non_dp_tx)
425 iteration_data['abs_rx'] = (t4_rx - t1_rx) - (t4_non_dp_rx - t1_non_dp_rx)
426 iteration_data['abs_dropped'] = iteration_data['abs_tx'] - iteration_data['abs_rx']
427 iteration_data['drop_rate'] = 100.0*iteration_data['abs_dropped']/iteration_data['abs_tx']
428 if ((iteration_data['drop_rate'] < self.test['drop_rate_threshold']) or (iteration_data['abs_dropped'] == self.test['drop_rate_threshold'] ==0) or (iteration_data['abs_dropped'] > self.test['maxz'])):
430 self.gen_machine.stop_latency_cores()
431 iteration_data['abs_tx_fail'] = t4_tx_fail - t1_tx_fail
432 return (iteration_data)