4 ## Copyright (c) 2020 Intel Corporation
6 ## Licensed under the Apache License, Version 2.0 (the "License");
7 ## you may not use this file except in compliance with the License.
8 ## You may obtain a copy of the License at
11 ## http://www.apache.org/licenses/LICENSE-2.0
13 ## Unless required by applicable law or agreed to in writing, software
14 ## distributed under the License is distributed on an "AS IS" BASIS,
15 ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 ## See the License for the specific language governing permissions and
17 ## limitations under the License.
24 from past.utils import old_div
25 from rapid_log import RapidLog
26 from rapid_log import bcolors
28 from datetime import datetime as dt
30 class RapidTest(object):
32 Class to manage the testing
34 def __init__(self, test_param, runtime, testname, environment_file ):
35 self.test = test_param
36 self.test['runtime'] = runtime
37 self.test['testname'] = testname
38 self.test['environment_file'] = environment_file
39 if 'maxr' not in self.test.keys():
41 if 'maxz' not in self.test.keys():
42 self.test['maxz'] = inf
43 with open('format.yaml') as f:
44 self.data_format = yaml.load(f, Loader=yaml.FullLoader)
47 def get_percentageof10Gbps(pps_speed,size):
48 # speed is given in pps, returning % of 10Gb/s
49 # 12 bytes is the inter packet gap
50 # pre-amble is 7 bytes
51 # SFD (start of frame delimiter) is 1 byte
52 # Total of 20 bytes overhead per packet
53 return (pps_speed / 1000000.0 * 0.08 * (size+20))
56 def get_pps(speed,size):
57 # speed is given in % of 10Gb/s, returning Mpps
58 # 12 bytes is the inter packet gap
59 # pre-amble is 7 bytes
60 # SFD (start of frame delimiter) is 1 byte
61 # Total of 20 bytes overhead per packet
62 return (speed * 100.0 / (8*(size+20)))
65 def get_speed(packet_speed,size):
66 # return speed in Gb/s
67 # 12 bytes is the inter packet gap
68 # pre-amble is 7 bytes
69 # SFD (start of frame delimiter) is 1 byte
70 # Total of 20 bytes overhead per packet
71 return (packet_speed / 1000.0 * (8*(size+20)))
74 def set_background_flows(background_machines, number_of_flows):
75 for machine in background_machines:
76 _ = machine.set_flows(number_of_flows)
79 def set_background_speed(background_machines, speed):
80 for machine in background_machines:
81 machine.set_generator_speed(speed)
84 def set_background_size(background_machines, imix):
85 # imixs is a list of packet sizes
86 for machine in background_machines:
87 machine.set_udp_packet_size(imix)
90 def start_background_traffic(background_machines):
91 for machine in background_machines:
95 def stop_background_traffic(background_machines):
96 for machine in background_machines:
100 def parse_data_format_dict(data_format, variables):
101 for k, v in data_format.items():
103 RapidTest.parse_data_format_dict(v, variables)
105 if v in variables.keys():
106 data_format[k] = variables[v]
108 def record_start_time(self):
109 self.start = dt.now().strftime('%Y-%m-%d %H:%M:%S')
111 def record_stop_time(self):
112 self.stop = dt.now().strftime('%Y-%m-%d %H:%M:%S')
114 def post_data(self, test, variables):
115 var = copy.deepcopy(self.data_format)
116 self.parse_data_format_dict(var, variables)
117 if 'URL' not in var.keys():
119 if test not in var.keys():
122 for value in var['URL'].values():
124 HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'application/rapid'}
125 if 'Format' in var.keys():
126 if var['Format'] == 'PushGateway':
127 data = "\n".join("{} {}".format(k, v) for k, v in var[test].items()) + "\n"
128 response = requests.post(url=URL, data=data,headers=HEADERS)
129 elif var['Format'] == 'Xtesting':
131 response = requests.post(url=URL, json=data)
136 if (response.status_code != 202) and (response.status_code != 200):
137 RapidLog.info('Cannot send metrics to {}'.format(URL))
142 def report_result(flow_number, size, speed, pps_req_tx, pps_tx, pps_sut_tx,
143 pps_rx, lat_avg, lat_perc, lat_perc_max, lat_max, tx, rx, tot_drop,
144 elapsed_time,speed_prefix='', lat_avg_prefix='', lat_perc_prefix='',
145 lat_max_prefix='', abs_drop_rate_prefix='', drop_rate_prefix=''):
147 flow_number_str = '| ({:>4}) |'.format(abs(flow_number))
149 flow_number_str = '|{:>7} |'.format(flow_number)
150 if pps_req_tx is None:
151 pps_req_tx_str = '{0: >14}'.format(' NA |')
153 pps_req_tx_str = '{:>7.3f} Mpps |'.format(pps_req_tx)
155 pps_tx_str = '{0: >14}'.format(' NA |')
157 pps_tx_str = '{:>7.3f} Mpps |'.format(pps_tx)
158 if pps_sut_tx is None:
159 pps_sut_tx_str = '{0: >14}'.format(' NA |')
161 pps_sut_tx_str = '{:>7.3f} Mpps |'.format(pps_sut_tx)
163 pps_rx_str = '{0: >25}'.format('NA |')
165 pps_rx_str = bcolors.OKBLUE + '{:>4.1f} Gb/s |{:7.3f} Mpps {}|'.format(
166 RapidTest.get_speed(pps_rx,size),pps_rx,bcolors.ENDC)
168 tot_drop_str = ' | NA | '
170 tot_drop_str = ' | {:>9.0f} | '.format(tot_drop)
172 lat_perc_str = ' |{:^10.10}|'.format('NA')
173 elif lat_perc_max == True:
174 lat_perc_str = '|>{}{:>5.0f} us{} |'.format(lat_perc_prefix,
175 float(lat_perc), bcolors.ENDC)
177 lat_perc_str = '| {}{:>5.0f} us{} |'.format(lat_perc_prefix,
178 float(lat_perc), bcolors.ENDC)
179 if elapsed_time is None:
180 elapsed_time_str = ' NA |'
182 elapsed_time_str = '{:>3.0f} |'.format(elapsed_time)
183 return(flow_number_str + '{:>5.1f}'.format(speed) + '% ' + speed_prefix
184 + '{:>6.3f}'.format(RapidTest.get_pps(speed,size)) + ' Mpps|' +
185 pps_req_tx_str + pps_tx_str + bcolors.ENDC + pps_sut_tx_str +
186 pps_rx_str + lat_avg_prefix + ' {:>6.0f}'.format(lat_avg) +
187 ' us' + lat_perc_str +lat_max_prefix+'{:>6.0f}'.format(lat_max)
188 + ' us | ' + '{:>9.0f}'.format(tx) + ' | {:>9.0f}'.format(rx) +
189 ' | '+ abs_drop_rate_prefix+ '{:>9.0f}'.format(tx-rx) +
190 tot_drop_str +drop_rate_prefix +
191 '{:>5.2f}'.format(100*old_div(float(tx-rx),tx)) + bcolors.ENDC +
192 ' |' + elapsed_time_str)
194 def run_iteration(self, requested_duration, flow_number, size, speed):
195 BUCKET_SIZE_EXP = self.gen_machine.bucket_size_exp
196 LAT_PERCENTILE = self.test['lat_percentile']
199 while (r < self.test['maxr']):
200 time.sleep(sleep_time)
201 # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
202 t1_rx, t1_non_dp_rx, t1_tx, t1_non_dp_tx, t1_drop, t1_tx_fail, t1_tsc, abs_tsc_hz = self.gen_machine.core_stats()
203 t1_dp_rx = t1_rx - t1_non_dp_rx
204 t1_dp_tx = t1_tx - t1_non_dp_tx
205 self.gen_machine.set_generator_speed(0)
206 self.gen_machine.start_gen_cores()
207 if self.background_machines:
208 self.set_background_speed(self.background_machines, 0)
209 self.start_background_traffic(self.background_machines)
210 if 'ramp_step' in self.test.keys():
211 ramp_speed = self.test['ramp_step']
214 while ramp_speed < speed:
215 self.gen_machine.set_generator_speed(ramp_speed)
216 if self.background_machines:
217 self.set_background_speed(self.background_machines, ramp_speed)
219 ramp_speed = ramp_speed + self.test['ramp_step']
220 self.gen_machine.set_generator_speed(speed)
221 if self.background_machines:
222 self.set_background_speed(self.background_machines, speed)
223 time.sleep(2) ## Needs to be 2 seconds since this 1 sec is the time that PROX uses to refresh the stats. Note that this can be changed in PROX!! Don't do it.
224 start_bg_gen_stats = []
225 for bg_gen_machine in self.background_machines:
226 bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, _ = bg_gen_machine.core_stats()
228 "bg_dp_rx" : bg_rx - bg_non_dp_rx,
229 "bg_dp_tx" : bg_tx - bg_non_dp_tx,
232 start_bg_gen_stats.append(dict(bg_gen_stat))
233 if self.sut_machine!= None:
234 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
235 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc, tsc_hz = self.gen_machine.core_stats()
237 dp_tx = tx - (t2_non_dp_tx - t1_non_dp_tx )
238 dp_rx = t2_rx - t1_rx - (t2_non_dp_rx - t1_non_dp_rx)
239 tot_dp_drop = dp_tx - dp_rx
241 RapidLog.critical("TX = 0. Test interrupted since no packet has been sent.")
243 RapidLog.critical("Only non-dataplane packets (e.g. ARP) sent. Test interrupted since no packet has been sent.")
244 # Ask PROX to calibrate the bucket size once we have a PROX function to do this.
245 # Measure latency statistics per second
246 lat_min, lat_max, lat_avg, used_avg, t2_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats()
247 lat_samples = sum(buckets)
249 for sample_percentile, bucket in enumerate(buckets,start=1):
250 sample_count += bucket
251 if sample_count > (lat_samples * LAT_PERCENTILE):
253 percentile_max = (sample_percentile == len(buckets))
254 sample_percentile = sample_percentile * float(2 ** BUCKET_SIZE_EXP) / (old_div(float(lat_hz),float(10**6)))
255 if self.test['test'] == 'fixed_rate':
256 RapidLog.info(self.report_result(flow_number,size,speed,None,None,None,None,lat_avg,sample_percentile,percentile_max,lat_max, dp_tx, dp_rx , None, None))
257 tot_rx = tot_non_dp_rx = tot_tx = tot_non_dp_tx = tot_drop = 0
258 lat_avg = used_avg = 0
259 buckets_total = [0] * 128
261 tot_lat_measurement_duration = float(0)
262 tot_core_measurement_duration = float(0)
263 tot_sut_core_measurement_duration = float(0)
264 tot_sut_rx = tot_sut_non_dp_rx = tot_sut_tx = tot_sut_non_dp_tx = tot_sut_drop = tot_sut_tx_fail = tot_sut_tsc = 0
265 lat_avail = core_avail = sut_avail = False
266 while (tot_core_measurement_duration - float(requested_duration) <= 0.1) or (tot_lat_measurement_duration - float(requested_duration) <= 0.1):
268 lat_min_sample, lat_max_sample, lat_avg_sample, used_sample, t3_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats()
269 # Get statistics after some execution time
270 if t3_lat_tsc != t2_lat_tsc:
271 single_lat_measurement_duration = (t3_lat_tsc - t2_lat_tsc) * 1.0 / lat_hz # time difference between the 2 measurements, expressed in seconds.
272 # A second has passed in between to lat_stats requests. Hence we need to process the results
273 tot_lat_measurement_duration = tot_lat_measurement_duration + single_lat_measurement_duration
274 if lat_min > lat_min_sample:
275 lat_min = lat_min_sample
276 if lat_max < lat_max_sample:
277 lat_max = lat_max_sample
278 lat_avg = lat_avg + lat_avg_sample * single_lat_measurement_duration # Sometimes, There is more than 1 second between 2 lat_stats. Hence we will take the latest measurement
279 used_avg = used_avg + used_sample * single_lat_measurement_duration # and give it more weigth.
280 lat_samples = sum(buckets)
281 tot_lat_samples += lat_samples
283 for sample_percentile, bucket in enumerate(buckets,start=1):
284 sample_count += bucket
285 if sample_count > lat_samples * LAT_PERCENTILE:
287 percentile_max = (sample_percentile == len(buckets))
288 bucket_size = float(2 ** BUCKET_SIZE_EXP) / (old_div(float(lat_hz),float(10**6)))
289 sample_percentile = sample_percentile * bucket_size
290 buckets_total = [buckets_total[i] + buckets[i] for i in range(len(buckets_total))]
291 t2_lat_tsc = t3_lat_tsc
293 t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc, tsc_hz = self.gen_machine.core_stats()
295 single_core_measurement_duration = (t3_tsc - t2_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
296 tot_core_measurement_duration = tot_core_measurement_duration + single_core_measurement_duration
297 delta_rx = t3_rx - t2_rx
299 delta_non_dp_rx = t3_non_dp_rx - t2_non_dp_rx
300 tot_non_dp_rx += delta_non_dp_rx
301 delta_tx = t3_tx - t2_tx
303 delta_non_dp_tx = t3_non_dp_tx - t2_non_dp_tx
304 tot_non_dp_tx += delta_non_dp_tx
305 delta_dp_tx = delta_tx -delta_non_dp_tx
306 delta_dp_rx = delta_rx -delta_non_dp_rx
307 delta_dp_drop = delta_dp_tx - delta_dp_rx
308 tot_dp_drop += delta_dp_drop
309 delta_drop = t3_drop - t2_drop
310 tot_drop += delta_drop
311 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc = t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc
313 if self.sut_machine!=None:
314 t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
315 if t3_sut_tsc != t2_sut_tsc:
316 single_sut_core_measurement_duration = (t3_sut_tsc - t2_sut_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
317 tot_sut_core_measurement_duration = tot_sut_core_measurement_duration + single_sut_core_measurement_duration
318 tot_sut_rx += t3_sut_rx - t2_sut_rx
319 tot_sut_non_dp_rx += t3_sut_non_dp_rx - t2_sut_non_dp_rx
320 delta_sut_tx = t3_sut_tx - t2_sut_tx
321 tot_sut_tx += delta_sut_tx
322 delta_sut_non_dp_tx = t3_sut_non_dp_tx - t2_sut_non_dp_tx
323 tot_sut_non_dp_tx += delta_sut_non_dp_tx
324 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc = t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc
326 if self.test['test'] == 'fixed_rate':
327 if lat_avail == core_avail == True:
328 lat_avail = core_avail = False
329 pps_req_tx = (delta_tx + delta_drop - delta_rx)/single_core_measurement_duration/1000000
330 pps_tx = delta_tx/single_core_measurement_duration/1000000
331 if self.sut_machine != None and sut_avail:
332 pps_sut_tx = delta_sut_tx/single_sut_core_measurement_duration/1000000
336 pps_rx = delta_rx/single_core_measurement_duration/1000000
337 RapidLog.info(self.report_result(flow_number, size,
338 speed, pps_req_tx, pps_tx, pps_sut_tx, pps_rx,
339 lat_avg_sample, sample_percentile, percentile_max,
340 lat_max_sample, delta_dp_tx, delta_dp_rx,
341 tot_dp_drop, single_core_measurement_duration))
343 'Flows': flow_number,
345 'RequestedSpeed': self.get_pps(speed,size),
346 'CoreGenerated': pps_req_tx,
348 'FwdBySUT': pps_sut_tx,
350 'AvgLatency': lat_avg_sample,
351 'PCTLatency': sample_percentile,
352 'MaxLatency': lat_max_sample,
353 'PacketsSent': delta_dp_tx,
354 'PacketsReceived': delta_dp_rx,
355 'PacketsLost': tot_dp_drop,
356 'bucket_size': bucket_size,
359 self.post_data('rapid_flowsizetest', variables)
360 end_bg_gen_stats = []
361 for bg_gen_machine in self.background_machines:
362 bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, bg_hz = bg_gen_machine.core_stats()
363 bg_gen_stat = {"bg_dp_rx" : bg_rx - bg_non_dp_rx,
364 "bg_dp_tx" : bg_tx - bg_non_dp_tx,
368 end_bg_gen_stats.append(dict(bg_gen_stat))
371 while i < len(end_bg_gen_stats):
372 bg_rates.append(0.000001*(end_bg_gen_stats[i]['bg_dp_rx'] -
373 start_bg_gen_stats[i]['bg_dp_rx']) / ((end_bg_gen_stats[i]['bg_tsc'] -
374 start_bg_gen_stats[i]['bg_tsc']) * 1.0 / end_bg_gen_stats[i]['bg_hz']))
377 avg_bg_rate = sum(bg_rates) / len(bg_rates)
378 RapidLog.debug('Average Background traffic rate: {:>7.3f} Mpps'.format(avg_bg_rate))
382 self.gen_machine.stop_gen_cores()
384 lat_avg = old_div(lat_avg, float(tot_lat_measurement_duration))
385 used_avg = old_div(used_avg, float(tot_lat_measurement_duration))
387 while t4_tsc == t2_tsc:
388 t4_rx, t4_non_dp_rx, t4_tx, t4_non_dp_tx, t4_drop, t4_tx_fail, t4_tsc, abs_tsc_hz = self.gen_machine.core_stats()
389 if self.test['test'] == 'fixed_rate':
390 t4_lat_tsc = t2_lat_tsc
391 while t4_lat_tsc == t2_lat_tsc:
392 lat_min_sample, lat_max_sample, lat_avg_sample, used_sample, t4_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats()
394 lat_samples = sum(buckets)
395 for percentile, bucket in enumerate(buckets,start=1):
396 sample_count += bucket
397 if sample_count > lat_samples * LAT_PERCENTILE:
399 percentile_max = (percentile == len(buckets))
400 percentile = percentile * bucket_size
401 lat_max = lat_max_sample
402 lat_avg = lat_avg_sample
403 delta_rx = t4_rx - t2_rx
404 delta_non_dp_rx = t4_non_dp_rx - t2_non_dp_rx
405 delta_tx = t4_tx - t2_tx
406 delta_non_dp_tx = t4_non_dp_tx - t2_non_dp_tx
407 delta_dp_tx = delta_tx -delta_non_dp_tx
408 delta_dp_rx = delta_rx -delta_non_dp_rx
411 tot_dp_drop += delta_dp_tx - delta_dp_rx
416 drop_rate = 100.0*(dp_tx-dp_rx)/dp_tx
417 tot_core_measurement_duration = None
418 break ## Not really needed since the while loop will stop when evaluating the value of r
421 buckets = buckets_total
422 for percentile, bucket in enumerate(buckets_total,start=1):
423 sample_count += bucket
424 if sample_count > tot_lat_samples * LAT_PERCENTILE:
426 percentile_max = (percentile == len(buckets_total))
427 percentile = percentile * bucket_size
428 pps_req_tx = (tot_tx + tot_drop - tot_rx)/tot_core_measurement_duration/1000000.0 # tot_drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
429 pps_tx = tot_tx/tot_core_measurement_duration/1000000.0 # tot_tx is all generated packets actually accepted by the interface
430 pps_rx = tot_rx/tot_core_measurement_duration/1000000.0 # tot_rx is all packets received by the nop task = all packets received in the gen VM
431 if self.sut_machine != None and sut_avail:
432 pps_sut_tx = tot_sut_tx / tot_sut_core_measurement_duration / 1000000.0
435 dp_tx = (t4_tx - t1_tx) - (t4_non_dp_tx - t1_non_dp_tx)
436 dp_rx = (t4_rx - t1_rx) - (t4_non_dp_rx - t1_non_dp_rx)
437 tot_dp_drop = dp_tx - dp_rx
438 drop_rate = 100.0*tot_dp_drop/dp_tx
439 if ((drop_rate < self.test['drop_rate_threshold']) or (tot_dp_drop == self.test['drop_rate_threshold'] ==0) or (tot_dp_drop > self.test['maxz'])):
441 return(pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,percentile,percentile_max,lat_max,dp_tx,dp_rx,tot_dp_drop,(t4_tx_fail - t1_tx_fail),drop_rate,lat_min,used_avg,r,tot_core_measurement_duration,avg_bg_rate,bucket_size,buckets)