4 ## Copyright (c) 2020 Intel Corporation
6 ## Licensed under the Apache License, Version 2.0 (the "License");
7 ## you may not use this file except in compliance with the License.
8 ## You may obtain a copy of the License at
11 ## http://www.apache.org/licenses/LICENSE-2.0
13 ## Unless required by applicable law or agreed to in writing, software
14 ## distributed under the License is distributed on an "AS IS" BASIS,
15 ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 ## See the License for the specific language governing permissions and
17 ## limitations under the License.
24 from past.utils import old_div
25 from rapid_log import RapidLog
26 from rapid_log import bcolors
28 from datetime import datetime as dt
30 class RapidTest(object):
32 Class to manage the testing
34 def __init__(self, test_param, runtime, testname, environment_file ):
35 self.test = test_param
36 self.test['runtime'] = runtime
37 self.test['testname'] = testname
38 self.test['environment_file'] = environment_file
39 if 'maxr' not in self.test.keys():
41 if 'maxz' not in self.test.keys():
42 self.test['maxz'] = inf
43 with open('format.yaml') as f:
44 self.data_format = yaml.load(f, Loader=yaml.FullLoader)
47 def get_percentageof10Gbps(pps_speed,size):
48 # speed is given in pps, returning % of 10Gb/s
49 # 12 bytes is the inter packet gap
50 # pre-amble is 7 bytes
51 # SFD (start of frame delimiter) is 1 byte
52 # Total of 20 bytes overhead per packet
53 return (pps_speed / 1000000.0 * 0.08 * (size+20))
56 def get_pps(speed,size):
57 # speed is given in % of 10Gb/s, returning Mpps
58 # 12 bytes is the inter packet gap
59 # pre-amble is 7 bytes
60 # SFD (start of frame delimiter) is 1 byte
61 # Total of 20 bytes overhead per packet
62 return (speed * 100.0 / (8*(size+20)))
65 def get_speed(packet_speed,size):
66 # return speed in Gb/s
67 # 12 bytes is the inter packet gap
68 # pre-amble is 7 bytes
69 # SFD (start of frame delimiter) is 1 byte
70 # Total of 20 bytes overhead per packet
71 return (packet_speed / 1000.0 * (8*(size+20)))
74 def set_background_flows(background_machines, number_of_flows):
75 for machine in background_machines:
76 _ = machine.set_flows(number_of_flows)
79 def set_background_speed(background_machines, speed):
80 for machine in background_machines:
81 machine.set_generator_speed(speed)
84 def set_background_size(background_machines, imix):
85 # imixs is a list of packet sizes
86 for machine in background_machines:
87 machine.set_udp_packet_size(imix)
90 def start_background_traffic(background_machines):
91 for machine in background_machines:
95 def stop_background_traffic(background_machines):
96 for machine in background_machines:
100 def parse_data_format_dict(data_format, variables):
101 for k, v in data_format.items():
103 RapidTest.parse_data_format_dict(v, variables)
105 if v in variables.keys():
106 data_format[k] = variables[v]
108 def post_data(self, test, variables):
109 var = copy.deepcopy(self.data_format)
110 self.parse_data_format_dict(var, variables)
111 if var.keys() >= {'URL', test, 'Format'}:
113 for value in var['URL'].values():
115 HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'application/rapid'}
116 if var['Format'] == 'PushGateway':
117 data = "\n".join("{} {}".format(k, v) for k, v in var[test].items()) + "\n"
118 response = requests.post(url=URL, data=data,headers=HEADERS)
119 elif var['Format'] == 'Xtesting':
121 response = requests.post(url=URL, json=data)
122 if (response.status_code >= 300):
123 RapidLog.info('Cannot send metrics to {}'.format(URL))
128 def report_result(flow_number, size, data, prefix):
130 flow_number_str = '| ({:>4}) |'.format(abs(flow_number))
132 flow_number_str = '|{:>7} |'.format(flow_number)
133 if data['pps_req_tx'] is None:
134 pps_req_tx_str = '{0: >14}'.format(' NA |')
136 pps_req_tx_str = '{:>7.3f} Mpps |'.format(data['pps_req_tx'])
137 if data['pps_tx'] is None:
138 pps_tx_str = '{0: >14}'.format(' NA |')
140 pps_tx_str = '{:>7.3f} Mpps |'.format(data['pps_tx'])
141 if data['pps_sut_tx'] is None:
142 pps_sut_tx_str = '{0: >14}'.format(' NA |')
144 pps_sut_tx_str = '{:>7.3f} Mpps |'.format(data['pps_sut_tx'])
145 if data['pps_rx'] is None:
146 pps_rx_str = '{0: >25}'.format('NA |')
148 pps_rx_str = bcolors.OKBLUE + '{:>4.1f} Gb/s |{:7.3f} Mpps {}|'.format(
149 RapidTest.get_speed(data['pps_rx'],size),data['pps_rx'],bcolors.ENDC)
150 if data['abs_dropped'] is None:
151 tot_drop_str = ' | NA | '
153 tot_drop_str = ' | {:>9.0f} | '.format(data['abs_dropped'])
154 if data['lat_perc'] is None:
155 lat_perc_str = '|{:^10.10}|'.format('NA')
156 elif data['lat_perc_max'] == True:
157 lat_perc_str = '|>{}{:>5.0f} us{} |'.format(prefix['lat_perc'],
158 float(data['lat_perc']), bcolors.ENDC)
160 lat_perc_str = '| {}{:>5.0f} us{} |'.format(prefix['lat_perc'],
161 float(data['lat_perc']), bcolors.ENDC)
162 if data['actual_duration'] is None:
163 elapsed_time_str = ' NA |'
165 elapsed_time_str = '{:>3.0f} |'.format(data['actual_duration'])
166 return(flow_number_str + '{:>5.1f}'.format(data['speed']) + '% ' + prefix['speed']
167 + '{:>6.3f}'.format(RapidTest.get_pps(data['speed'],size)) + ' Mpps|' +
168 pps_req_tx_str + pps_tx_str + bcolors.ENDC + pps_sut_tx_str +
169 pps_rx_str + prefix['lat_avg'] + ' {:>6.0f}'.format(data['lat_avg']) +
170 ' us' + lat_perc_str +prefix['lat_max']+'{:>6.0f}'.format(data['lat_max'])
171 + ' us | ' + '{:>9.0f}'.format(data['abs_tx']) + ' | {:>9.0f}'.format(data['abs_rx']) +
172 ' | '+ prefix['abs_drop_rate']+ '{:>9.0f}'.format(data['abs_tx']-data['abs_rx']) +
173 tot_drop_str + prefix['drop_rate'] +
174 '{:>5.2f}'.format(100*old_div(float(data['abs_tx']-data['abs_rx']),data['abs_tx'])) + bcolors.ENDC +
175 ' |' + elapsed_time_str)
177 def run_iteration(self, requested_duration, flow_number, size, speed):
178 BUCKET_SIZE_EXP = self.gen_machine.bucket_size_exp
179 LAT_PERCENTILE = self.test['lat_percentile']
182 iteration_data['r'] = 0;
184 while (iteration_data['r'] < self.test['maxr']):
185 self.gen_machine.start_latency_cores()
186 time.sleep(sleep_time)
187 # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
188 t1_rx, t1_non_dp_rx, t1_tx, t1_non_dp_tx, t1_drop, t1_tx_fail, t1_tsc, abs_tsc_hz = self.gen_machine.core_stats()
189 t1_dp_rx = t1_rx - t1_non_dp_rx
190 t1_dp_tx = t1_tx - t1_non_dp_tx
191 self.gen_machine.set_generator_speed(0)
192 self.gen_machine.start_gen_cores()
193 if self.background_machines:
194 self.set_background_speed(self.background_machines, 0)
195 self.start_background_traffic(self.background_machines)
196 if 'ramp_step' in self.test.keys():
197 ramp_speed = self.test['ramp_step']
200 while ramp_speed < speed:
201 self.gen_machine.set_generator_speed(ramp_speed)
202 if self.background_machines:
203 self.set_background_speed(self.background_machines, ramp_speed)
205 ramp_speed = ramp_speed + self.test['ramp_step']
206 self.gen_machine.set_generator_speed(speed)
207 if self.background_machines:
208 self.set_background_speed(self.background_machines, speed)
209 iteration_data['speed'] = time_loop_data['speed'] = speed
210 time.sleep(2) ## Needs to be 2 seconds since this 1 sec is the time that PROX uses to refresh the stats. Note that this can be changed in PROX!! Don't do it.
211 start_bg_gen_stats = []
212 for bg_gen_machine in self.background_machines:
213 bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, _ = bg_gen_machine.core_stats()
215 "bg_dp_rx" : bg_rx - bg_non_dp_rx,
216 "bg_dp_tx" : bg_tx - bg_non_dp_tx,
219 start_bg_gen_stats.append(dict(bg_gen_stat))
220 if self.sut_machine!= None:
221 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
222 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc, tsc_hz = self.gen_machine.core_stats()
224 iteration_data['abs_tx'] = tx - (t2_non_dp_tx - t1_non_dp_tx )
225 iteration_data['abs_rx'] = t2_rx - t1_rx - (t2_non_dp_rx - t1_non_dp_rx)
226 iteration_data['abs_dropped'] = iteration_data['abs_tx'] - iteration_data['abs_rx']
228 RapidLog.critical("TX = 0. Test interrupted since no packet has been sent.")
229 if iteration_data['abs_tx'] == 0:
230 RapidLog.critical("Only non-dataplane packets (e.g. ARP) sent. Test interrupted since no packet has been sent.")
231 # Ask PROX to calibrate the bucket size once we have a PROX function to do this.
232 # Measure latency statistics per second
233 iteration_data.update(self.gen_machine.lat_stats())
234 t2_lat_tsc = iteration_data['lat_tsc']
236 for sample_percentile, bucket in enumerate(iteration_data['buckets'],start=1):
237 sample_count += bucket
238 if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
240 iteration_data['lat_perc_max'] = (sample_percentile == len(iteration_data['buckets']))
241 iteration_data['bucket_size'] = float(2 ** BUCKET_SIZE_EXP) / (old_div(float(iteration_data['lat_hz']),float(10**6)))
242 time_loop_data['bucket_size'] = iteration_data['bucket_size']
243 iteration_data['lat_perc'] = sample_percentile * iteration_data['bucket_size']
244 if self.test['test'] == 'fixed_rate':
245 iteration_data['pps_req_tx'] = None
246 iteration_data['pps_tx'] = None
247 iteration_data['pps_sut_tx'] = None
248 iteration_data['pps_rx'] = None
249 iteration_data['lat_perc'] = None
250 iteration_data['actual_duration'] = None
251 iteration_prefix = {'speed' : '',
255 'abs_drop_rate' : '',
257 RapidLog.info(self.report_result(flow_number, size,
258 iteration_data, iteration_prefix ))
259 tot_rx = tot_non_dp_rx = tot_tx = tot_non_dp_tx = tot_drop = 0
260 iteration_data['lat_avg'] = iteration_data['lat_used'] = 0
261 tot_lat_measurement_duration = float(0)
262 iteration_data['actual_duration'] = float(0)
263 tot_sut_core_measurement_duration = float(0)
264 tot_sut_rx = tot_sut_non_dp_rx = tot_sut_tx = tot_sut_non_dp_tx = tot_sut_drop = tot_sut_tx_fail = tot_sut_tsc = 0
265 lat_avail = core_avail = sut_avail = False
266 while (iteration_data['actual_duration'] - float(requested_duration) <= 0.1) or (tot_lat_measurement_duration - float(requested_duration) <= 0.1):
268 time_loop_data.update(self.gen_machine.lat_stats())
269 # Get statistics after some execution time
270 if time_loop_data['lat_tsc'] != t2_lat_tsc:
271 single_lat_measurement_duration = (time_loop_data['lat_tsc'] - t2_lat_tsc) * 1.0 / time_loop_data['lat_hz'] # time difference between the 2 measurements, expressed in seconds.
272 # A second has passed in between to lat_stats requests. Hence we need to process the results
273 tot_lat_measurement_duration = tot_lat_measurement_duration + single_lat_measurement_duration
274 if iteration_data['lat_min'] > time_loop_data['lat_min']:
275 iteration_data['lat_min'] = time_loop_data['lat_min']
276 if iteration_data['lat_max'] < time_loop_data['lat_max']:
277 iteration_data['lat_max'] = time_loop_data['lat_max']
278 iteration_data['lat_avg'] = iteration_data['lat_avg'] + time_loop_data['lat_avg'] * single_lat_measurement_duration # Sometimes, There is more than 1 second between 2 lat_stats. Hence we will take the latest measurement
279 iteration_data['lat_used'] = iteration_data['lat_used'] + time_loop_data['lat_used'] * single_lat_measurement_duration # and give it more weigth.
281 for sample_percentile, bucket in enumerate(time_loop_data['buckets'],start=1):
282 sample_count += bucket
283 if sample_count > sum(time_loop_data['buckets']) * LAT_PERCENTILE:
285 time_loop_data['lat_perc_max'] = (sample_percentile == len(time_loop_data['buckets']))
286 time_loop_data['lat_perc'] = sample_percentile * iteration_data['bucket_size']
287 iteration_data['buckets'] = [iteration_data['buckets'][i] + time_loop_data['buckets'][i] for i in range(len(iteration_data['buckets']))]
288 t2_lat_tsc = time_loop_data['lat_tsc']
290 t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc, tsc_hz = self.gen_machine.core_stats()
292 time_loop_data['actual_duration'] = (t3_tsc - t2_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
293 iteration_data['actual_duration'] = iteration_data['actual_duration'] + time_loop_data['actual_duration']
294 delta_rx = t3_rx - t2_rx
296 delta_non_dp_rx = t3_non_dp_rx - t2_non_dp_rx
297 tot_non_dp_rx += delta_non_dp_rx
298 delta_tx = t3_tx - t2_tx
300 delta_non_dp_tx = t3_non_dp_tx - t2_non_dp_tx
301 tot_non_dp_tx += delta_non_dp_tx
302 delta_dp_tx = delta_tx -delta_non_dp_tx
303 delta_dp_rx = delta_rx -delta_non_dp_rx
304 time_loop_data['abs_dropped'] = delta_dp_tx - delta_dp_rx
305 iteration_data['abs_dropped'] += time_loop_data['abs_dropped']
306 delta_drop = t3_drop - t2_drop
307 tot_drop += delta_drop
308 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc = t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc
310 if self.sut_machine!=None:
311 t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
312 if t3_sut_tsc != t2_sut_tsc:
313 single_sut_core_measurement_duration = (t3_sut_tsc - t2_sut_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
314 tot_sut_core_measurement_duration = tot_sut_core_measurement_duration + single_sut_core_measurement_duration
315 tot_sut_rx += t3_sut_rx - t2_sut_rx
316 tot_sut_non_dp_rx += t3_sut_non_dp_rx - t2_sut_non_dp_rx
317 delta_sut_tx = t3_sut_tx - t2_sut_tx
318 tot_sut_tx += delta_sut_tx
319 delta_sut_non_dp_tx = t3_sut_non_dp_tx - t2_sut_non_dp_tx
320 tot_sut_non_dp_tx += delta_sut_non_dp_tx
321 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc = t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc
323 if self.test['test'] == 'fixed_rate':
324 if lat_avail == core_avail == True:
325 lat_avail = core_avail = False
326 time_loop_data['pps_req_tx'] = (delta_tx + delta_drop - delta_rx)/time_loop_data['actual_duration']/1000000
327 time_loop_data['pps_tx'] = delta_tx/time_loop_data['actual_duration']/1000000
328 if self.sut_machine != None and sut_avail:
329 time_loop_data['pps_sut_tx'] = delta_sut_tx/single_sut_core_measurement_duration/1000000
332 time_loop_data['pps_sut_tx'] = None
333 time_loop_data['pps_rx'] = delta_rx/time_loop_data['actual_duration']/1000000
334 time_loop_data['abs_tx'] = delta_dp_tx
335 time_loop_data['abs_rx'] = delta_dp_rx
336 time_loop_prefix = {'speed' : '',
340 'abs_drop_rate' : '',
342 RapidLog.info(self.report_result(flow_number, size, time_loop_data,
344 time_loop_data['test'] = self.test['testname']
345 time_loop_data['environment_file'] = self.test['environment_file']
346 time_loop_data['Flows'] = flow_number
347 time_loop_data['Size'] = size
348 time_loop_data['RequestedSpeed'] = RapidTest.get_pps(speed, size)
349 _ = self.post_data('rapid_flowsizetest', time_loop_data)
350 end_bg_gen_stats = []
351 for bg_gen_machine in self.background_machines:
352 bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, bg_hz = bg_gen_machine.core_stats()
353 bg_gen_stat = {"bg_dp_rx" : bg_rx - bg_non_dp_rx,
354 "bg_dp_tx" : bg_tx - bg_non_dp_tx,
358 end_bg_gen_stats.append(dict(bg_gen_stat))
359 if self.background_machines:
360 self.stop_background_traffic(self.background_machines)
363 while i < len(end_bg_gen_stats):
364 bg_rates.append(0.000001*(end_bg_gen_stats[i]['bg_dp_rx'] -
365 start_bg_gen_stats[i]['bg_dp_rx']) / ((end_bg_gen_stats[i]['bg_tsc'] -
366 start_bg_gen_stats[i]['bg_tsc']) * 1.0 / end_bg_gen_stats[i]['bg_hz']))
369 iteration_data['avg_bg_rate'] = sum(bg_rates) / len(bg_rates)
370 RapidLog.debug('Average Background traffic rate: {:>7.3f} Mpps'.format(iteration_data['avg_bg_rate']))
372 iteration_data['avg_bg_rate'] = None
374 self.gen_machine.stop_gen_cores()
375 iteration_data['r'] += 1
376 iteration_data['lat_avg'] = old_div(iteration_data['lat_avg'], float(tot_lat_measurement_duration))
377 iteration_data['lat_used'] = old_div(iteration_data['lat_used'], float(tot_lat_measurement_duration))
379 while t4_tsc == t2_tsc:
380 t4_rx, t4_non_dp_rx, t4_tx, t4_non_dp_tx, t4_drop, t4_tx_fail, t4_tsc, abs_tsc_hz = self.gen_machine.core_stats()
381 if self.test['test'] == 'fixed_rate':
382 iteration_data['lat_tsc'] = t2_lat_tsc
383 while iteration_data['lat_tsc'] == t2_lat_tsc:
384 iteration_data.update(self.gen_machine.lat_stats())
386 for percentile, bucket in enumerate(iteration_data['buckets'],start=1):
387 sample_count += bucket
388 if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
390 iteration_data['lat_perc_max'] = (percentile == len(iteration_data['buckets']))
391 iteration_data['lat_perc'] = percentile * iteration_data['bucket_size']
392 delta_rx = t4_rx - t2_rx
393 delta_non_dp_rx = t4_non_dp_rx - t2_non_dp_rx
394 delta_tx = t4_tx - t2_tx
395 delta_non_dp_tx = t4_non_dp_tx - t2_non_dp_tx
396 delta_dp_tx = delta_tx -delta_non_dp_tx
397 delta_dp_rx = delta_rx -delta_non_dp_rx
398 iteration_data['abs_tx'] = delta_dp_tx
399 iteration_data['abs_rx'] = delta_dp_rx
400 iteration_data['abs_dropped'] += delta_dp_tx - delta_dp_rx
401 iteration_data['pps_req_tx'] = None
402 iteration_data['pps_tx'] = None
403 iteration_data['pps_sut_tx'] = None
404 iteration_data['drop_rate'] = 100.0*(iteration_data['abs_tx']-iteration_data['abs_rx'])/iteration_data['abs_tx']
405 iteration_data['actual_duration'] = None
406 break ## Not really needed since the while loop will stop when evaluating the value of r
409 for percentile, bucket in enumerate(iteration_data['buckets'],start=1):
410 sample_count += bucket
411 if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
413 iteration_data['lat_perc_max'] = (percentile == len(iteration_data['buckets']))
414 iteration_data['lat_perc'] = percentile * iteration_data['bucket_size']
415 iteration_data['pps_req_tx'] = (tot_tx + tot_drop - tot_rx)/iteration_data['actual_duration']/1000000.0 # tot_drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
416 iteration_data['pps_tx'] = tot_tx/iteration_data['actual_duration']/1000000.0 # tot_tx is all generated packets actually accepted by the interface
417 iteration_data['pps_rx'] = tot_rx/iteration_data['actual_duration']/1000000.0 # tot_rx is all packets received by the nop task = all packets received in the gen VM
418 if self.sut_machine != None and sut_avail:
419 iteration_data['pps_sut_tx'] = tot_sut_tx / tot_sut_core_measurement_duration / 1000000.0
421 iteration_data['pps_sut_tx'] = None
422 iteration_data['abs_tx'] = (t4_tx - t1_tx) - (t4_non_dp_tx - t1_non_dp_tx)
423 iteration_data['abs_rx'] = (t4_rx - t1_rx) - (t4_non_dp_rx - t1_non_dp_rx)
424 iteration_data['abs_dropped'] = iteration_data['abs_tx'] - iteration_data['abs_rx']
425 iteration_data['drop_rate'] = 100.0*iteration_data['abs_dropped']/iteration_data['abs_tx']
426 if ((iteration_data['drop_rate'] < self.test['drop_rate_threshold']) or (iteration_data['abs_dropped'] == self.test['drop_rate_threshold'] ==0) or (iteration_data['abs_dropped'] > self.test['maxz'])):
428 self.gen_machine.stop_latency_cores()
429 iteration_data['abs_tx_fail'] = t4_tx_fail - t1_tx_fail
430 return (iteration_data)