4 ## Copyright (c) 2020 Intel Corporation
6 ## Licensed under the Apache License, Version 2.0 (the "License");
7 ## you may not use this file except in compliance with the License.
8 ## You may obtain a copy of the License at
11 ## http://www.apache.org/licenses/LICENSE-2.0
13 ## Unless required by applicable law or agreed to in writing, software
14 ## distributed under the License is distributed on an "AS IS" BASIS,
15 ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 ## See the License for the specific language governing permissions and
17 ## limitations under the License.
24 from past.utils import old_div
25 from rapid_log import RapidLog
26 from rapid_log import bcolors
28 from datetime import datetime as dt
30 class RapidTest(object):
32 Class to manage the testing
34 def __init__(self, test_param, runtime, testname, environment_file ):
35 self.test = test_param
36 self.test['runtime'] = runtime
37 self.test['testname'] = testname
38 self.test['environment_file'] = environment_file
39 if 'maxr' not in self.test.keys():
41 if 'maxz' not in self.test.keys():
42 self.test['maxz'] = inf
43 with open('format.yaml') as f:
44 self.data_format = yaml.load(f, Loader=yaml.FullLoader)
47 def get_percentageof10Gbps(pps_speed,size):
48 # speed is given in pps, returning % of 10Gb/s
49 # 12 bytes is the inter packet gap
50 # pre-amble is 7 bytes
51 # SFD (start of frame delimiter) is 1 byte
52 # Total of 20 bytes overhead per packet
53 return (pps_speed / 1000000.0 * 0.08 * (size+20))
56 def get_pps(speed,size):
57 # speed is given in % of 10Gb/s, returning Mpps
58 # 12 bytes is the inter packet gap
59 # pre-amble is 7 bytes
60 # SFD (start of frame delimiter) is 1 byte
61 # Total of 20 bytes overhead per packet
62 return (speed * 100.0 / (8*(size+20)))
65 def get_speed(packet_speed,size):
66 # return speed in Gb/s
67 # 12 bytes is the inter packet gap
68 # pre-amble is 7 bytes
69 # SFD (start of frame delimiter) is 1 byte
70 # Total of 20 bytes overhead per packet
71 return (packet_speed / 1000.0 * (8*(size+20)))
74 def set_background_flows(background_machines, number_of_flows):
75 for machine in background_machines:
76 _ = machine.set_flows(number_of_flows)
79 def set_background_speed(background_machines, speed):
80 for machine in background_machines:
81 machine.set_generator_speed(speed)
84 def set_background_size(background_machines, imix):
85 # imixs is a list of packet sizes
86 for machine in background_machines:
87 machine.set_udp_packet_size(imix)
90 def start_background_traffic(background_machines):
91 for machine in background_machines:
95 def stop_background_traffic(background_machines):
96 for machine in background_machines:
100 def parse_data_format_dict(data_format, variables):
101 for k, v in data_format.items():
103 RapidTest.parse_data_format_dict(v, variables)
105 if v in variables.keys():
106 data_format[k] = variables[v]
108 def record_start_time(self):
109 self.start = dt.now().strftime('%Y-%m-%d %H:%M:%S')
111 def record_stop_time(self):
112 self.stop = dt.now().strftime('%Y-%m-%d %H:%M:%S')
114 def post_data(self, test, variables):
115 var = copy.deepcopy(self.data_format)
116 self.parse_data_format_dict(var, variables)
117 if var.keys() >= {'URL', test, 'Format'}:
119 for value in var['URL'].values():
121 HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'application/rapid'}
122 if var['Format'] == 'PushGateway':
123 data = "\n".join("{} {}".format(k, v) for k, v in var[test].items()) + "\n"
124 response = requests.post(url=URL, data=data,headers=HEADERS)
125 elif var['Format'] == 'Xtesting':
127 response = requests.post(url=URL, json=data)
128 if (response.status_code >= 300):
129 RapidLog.info('Cannot send metrics to {}'.format(URL))
134 def report_result(flow_number, size, speed, pps_req_tx, pps_tx, pps_sut_tx,
135 pps_rx, lat_avg, lat_perc, lat_perc_max, lat_max, tx, rx, tot_drop,
136 elapsed_time,speed_prefix='', lat_avg_prefix='', lat_perc_prefix='',
137 lat_max_prefix='', abs_drop_rate_prefix='', drop_rate_prefix=''):
139 flow_number_str = '| ({:>4}) |'.format(abs(flow_number))
141 flow_number_str = '|{:>7} |'.format(flow_number)
142 if pps_req_tx is None:
143 pps_req_tx_str = '{0: >14}'.format(' NA |')
145 pps_req_tx_str = '{:>7.3f} Mpps |'.format(pps_req_tx)
147 pps_tx_str = '{0: >14}'.format(' NA |')
149 pps_tx_str = '{:>7.3f} Mpps |'.format(pps_tx)
150 if pps_sut_tx is None:
151 pps_sut_tx_str = '{0: >14}'.format(' NA |')
153 pps_sut_tx_str = '{:>7.3f} Mpps |'.format(pps_sut_tx)
155 pps_rx_str = '{0: >25}'.format('NA |')
157 pps_rx_str = bcolors.OKBLUE + '{:>4.1f} Gb/s |{:7.3f} Mpps {}|'.format(
158 RapidTest.get_speed(pps_rx,size),pps_rx,bcolors.ENDC)
160 tot_drop_str = ' | NA | '
162 tot_drop_str = ' | {:>9.0f} | '.format(tot_drop)
164 lat_perc_str = ' |{:^10.10}|'.format('NA')
165 elif lat_perc_max == True:
166 lat_perc_str = '|>{}{:>5.0f} us{} |'.format(lat_perc_prefix,
167 float(lat_perc), bcolors.ENDC)
169 lat_perc_str = '| {}{:>5.0f} us{} |'.format(lat_perc_prefix,
170 float(lat_perc), bcolors.ENDC)
171 if elapsed_time is None:
172 elapsed_time_str = ' NA |'
174 elapsed_time_str = '{:>3.0f} |'.format(elapsed_time)
175 return(flow_number_str + '{:>5.1f}'.format(speed) + '% ' + speed_prefix
176 + '{:>6.3f}'.format(RapidTest.get_pps(speed,size)) + ' Mpps|' +
177 pps_req_tx_str + pps_tx_str + bcolors.ENDC + pps_sut_tx_str +
178 pps_rx_str + lat_avg_prefix + ' {:>6.0f}'.format(lat_avg) +
179 ' us' + lat_perc_str +lat_max_prefix+'{:>6.0f}'.format(lat_max)
180 + ' us | ' + '{:>9.0f}'.format(tx) + ' | {:>9.0f}'.format(rx) +
181 ' | '+ abs_drop_rate_prefix+ '{:>9.0f}'.format(tx-rx) +
182 tot_drop_str +drop_rate_prefix +
183 '{:>5.2f}'.format(100*old_div(float(tx-rx),tx)) + bcolors.ENDC +
184 ' |' + elapsed_time_str)
186 def run_iteration(self, requested_duration, flow_number, size, speed):
187 BUCKET_SIZE_EXP = self.gen_machine.bucket_size_exp
188 LAT_PERCENTILE = self.test['lat_percentile']
191 while (r < self.test['maxr']):
192 time.sleep(sleep_time)
193 # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
194 t1_rx, t1_non_dp_rx, t1_tx, t1_non_dp_tx, t1_drop, t1_tx_fail, t1_tsc, abs_tsc_hz = self.gen_machine.core_stats()
195 t1_dp_rx = t1_rx - t1_non_dp_rx
196 t1_dp_tx = t1_tx - t1_non_dp_tx
197 self.gen_machine.set_generator_speed(0)
198 self.gen_machine.start_gen_cores()
199 if self.background_machines:
200 self.set_background_speed(self.background_machines, 0)
201 self.start_background_traffic(self.background_machines)
202 if 'ramp_step' in self.test.keys():
203 ramp_speed = self.test['ramp_step']
206 while ramp_speed < speed:
207 self.gen_machine.set_generator_speed(ramp_speed)
208 if self.background_machines:
209 self.set_background_speed(self.background_machines, ramp_speed)
211 ramp_speed = ramp_speed + self.test['ramp_step']
212 self.gen_machine.set_generator_speed(speed)
213 if self.background_machines:
214 self.set_background_speed(self.background_machines, speed)
215 time.sleep(2) ## Needs to be 2 seconds since this 1 sec is the time that PROX uses to refresh the stats. Note that this can be changed in PROX!! Don't do it.
216 start_bg_gen_stats = []
217 for bg_gen_machine in self.background_machines:
218 bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, _ = bg_gen_machine.core_stats()
220 "bg_dp_rx" : bg_rx - bg_non_dp_rx,
221 "bg_dp_tx" : bg_tx - bg_non_dp_tx,
224 start_bg_gen_stats.append(dict(bg_gen_stat))
225 if self.sut_machine!= None:
226 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
227 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc, tsc_hz = self.gen_machine.core_stats()
229 dp_tx = tx - (t2_non_dp_tx - t1_non_dp_tx )
230 dp_rx = t2_rx - t1_rx - (t2_non_dp_rx - t1_non_dp_rx)
231 tot_dp_drop = dp_tx - dp_rx
233 RapidLog.critical("TX = 0. Test interrupted since no packet has been sent.")
235 RapidLog.critical("Only non-dataplane packets (e.g. ARP) sent. Test interrupted since no packet has been sent.")
236 # Ask PROX to calibrate the bucket size once we have a PROX function to do this.
237 # Measure latency statistics per second
238 lat_min, lat_max, lat_avg, used_avg, t2_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats()
239 lat_samples = sum(buckets)
241 for sample_percentile, bucket in enumerate(buckets,start=1):
242 sample_count += bucket
243 if sample_count > (lat_samples * LAT_PERCENTILE):
245 percentile_max = (sample_percentile == len(buckets))
246 sample_percentile = sample_percentile * float(2 ** BUCKET_SIZE_EXP) / (old_div(float(lat_hz),float(10**6)))
247 if self.test['test'] == 'fixed_rate':
248 RapidLog.info(self.report_result(flow_number,size,speed,None,None,None,None,lat_avg,sample_percentile,percentile_max,lat_max, dp_tx, dp_rx , None, None))
249 tot_rx = tot_non_dp_rx = tot_tx = tot_non_dp_tx = tot_drop = 0
250 lat_avg = used_avg = 0
251 buckets_total = buckets
252 tot_lat_samples = sum(buckets)
253 tot_lat_measurement_duration = float(0)
254 tot_core_measurement_duration = float(0)
255 tot_sut_core_measurement_duration = float(0)
256 tot_sut_rx = tot_sut_non_dp_rx = tot_sut_tx = tot_sut_non_dp_tx = tot_sut_drop = tot_sut_tx_fail = tot_sut_tsc = 0
257 lat_avail = core_avail = sut_avail = False
258 while (tot_core_measurement_duration - float(requested_duration) <= 0.1) or (tot_lat_measurement_duration - float(requested_duration) <= 0.1):
260 lat_min_sample, lat_max_sample, lat_avg_sample, used_sample, t3_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats()
261 # Get statistics after some execution time
262 if t3_lat_tsc != t2_lat_tsc:
263 single_lat_measurement_duration = (t3_lat_tsc - t2_lat_tsc) * 1.0 / lat_hz # time difference between the 2 measurements, expressed in seconds.
264 # A second has passed in between to lat_stats requests. Hence we need to process the results
265 tot_lat_measurement_duration = tot_lat_measurement_duration + single_lat_measurement_duration
266 if lat_min > lat_min_sample:
267 lat_min = lat_min_sample
268 if lat_max < lat_max_sample:
269 lat_max = lat_max_sample
270 lat_avg = lat_avg + lat_avg_sample * single_lat_measurement_duration # Sometimes, There is more than 1 second between 2 lat_stats. Hence we will take the latest measurement
271 used_avg = used_avg + used_sample * single_lat_measurement_duration # and give it more weigth.
272 lat_samples = sum(buckets)
273 tot_lat_samples += lat_samples
275 for sample_percentile, bucket in enumerate(buckets,start=1):
276 sample_count += bucket
277 if sample_count > lat_samples * LAT_PERCENTILE:
279 percentile_max = (sample_percentile == len(buckets))
280 bucket_size = float(2 ** BUCKET_SIZE_EXP) / (old_div(float(lat_hz),float(10**6)))
281 sample_percentile = sample_percentile * bucket_size
282 buckets_total = [buckets_total[i] + buckets[i] for i in range(len(buckets_total))]
283 t2_lat_tsc = t3_lat_tsc
285 t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc, tsc_hz = self.gen_machine.core_stats()
287 single_core_measurement_duration = (t3_tsc - t2_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
288 tot_core_measurement_duration = tot_core_measurement_duration + single_core_measurement_duration
289 delta_rx = t3_rx - t2_rx
291 delta_non_dp_rx = t3_non_dp_rx - t2_non_dp_rx
292 tot_non_dp_rx += delta_non_dp_rx
293 delta_tx = t3_tx - t2_tx
295 delta_non_dp_tx = t3_non_dp_tx - t2_non_dp_tx
296 tot_non_dp_tx += delta_non_dp_tx
297 delta_dp_tx = delta_tx -delta_non_dp_tx
298 delta_dp_rx = delta_rx -delta_non_dp_rx
299 delta_dp_drop = delta_dp_tx - delta_dp_rx
300 tot_dp_drop += delta_dp_drop
301 delta_drop = t3_drop - t2_drop
302 tot_drop += delta_drop
303 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc = t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc
305 if self.sut_machine!=None:
306 t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
307 if t3_sut_tsc != t2_sut_tsc:
308 single_sut_core_measurement_duration = (t3_sut_tsc - t2_sut_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
309 tot_sut_core_measurement_duration = tot_sut_core_measurement_duration + single_sut_core_measurement_duration
310 tot_sut_rx += t3_sut_rx - t2_sut_rx
311 tot_sut_non_dp_rx += t3_sut_non_dp_rx - t2_sut_non_dp_rx
312 delta_sut_tx = t3_sut_tx - t2_sut_tx
313 tot_sut_tx += delta_sut_tx
314 delta_sut_non_dp_tx = t3_sut_non_dp_tx - t2_sut_non_dp_tx
315 tot_sut_non_dp_tx += delta_sut_non_dp_tx
316 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc = t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc
318 if self.test['test'] == 'fixed_rate':
319 if lat_avail == core_avail == True:
320 lat_avail = core_avail = False
321 pps_req_tx = (delta_tx + delta_drop - delta_rx)/single_core_measurement_duration/1000000
322 pps_tx = delta_tx/single_core_measurement_duration/1000000
323 if self.sut_machine != None and sut_avail:
324 pps_sut_tx = delta_sut_tx/single_sut_core_measurement_duration/1000000
328 pps_rx = delta_rx/single_core_measurement_duration/1000000
329 RapidLog.info(self.report_result(flow_number, size,
330 speed, pps_req_tx, pps_tx, pps_sut_tx, pps_rx,
331 lat_avg_sample, sample_percentile, percentile_max,
332 lat_max_sample, delta_dp_tx, delta_dp_rx,
333 tot_dp_drop, single_core_measurement_duration))
335 'Flows': flow_number,
337 'RequestedSpeed': self.get_pps(speed,size),
338 'CoreGenerated': pps_req_tx,
340 'FwdBySUT': pps_sut_tx,
342 'AvgLatency': lat_avg_sample,
343 'PCTLatency': sample_percentile,
344 'MaxLatency': lat_max_sample,
345 'PacketsSent': delta_dp_tx,
346 'PacketsReceived': delta_dp_rx,
347 'PacketsLost': tot_dp_drop,
348 'bucket_size': bucket_size,
351 self.post_data('rapid_flowsizetest', variables)
352 end_bg_gen_stats = []
353 for bg_gen_machine in self.background_machines:
354 bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, bg_hz = bg_gen_machine.core_stats()
355 bg_gen_stat = {"bg_dp_rx" : bg_rx - bg_non_dp_rx,
356 "bg_dp_tx" : bg_tx - bg_non_dp_tx,
360 end_bg_gen_stats.append(dict(bg_gen_stat))
363 while i < len(end_bg_gen_stats):
364 bg_rates.append(0.000001*(end_bg_gen_stats[i]['bg_dp_rx'] -
365 start_bg_gen_stats[i]['bg_dp_rx']) / ((end_bg_gen_stats[i]['bg_tsc'] -
366 start_bg_gen_stats[i]['bg_tsc']) * 1.0 / end_bg_gen_stats[i]['bg_hz']))
369 avg_bg_rate = sum(bg_rates) / len(bg_rates)
370 RapidLog.debug('Average Background traffic rate: {:>7.3f} Mpps'.format(avg_bg_rate))
374 self.gen_machine.stop_gen_cores()
376 lat_avg = old_div(lat_avg, float(tot_lat_measurement_duration))
377 used_avg = old_div(used_avg, float(tot_lat_measurement_duration))
379 while t4_tsc == t2_tsc:
380 t4_rx, t4_non_dp_rx, t4_tx, t4_non_dp_tx, t4_drop, t4_tx_fail, t4_tsc, abs_tsc_hz = self.gen_machine.core_stats()
381 if self.test['test'] == 'fixed_rate':
382 t4_lat_tsc = t2_lat_tsc
383 while t4_lat_tsc == t2_lat_tsc:
384 lat_min_sample, lat_max_sample, lat_avg_sample, used_sample, t4_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats()
386 lat_samples = sum(buckets)
387 for percentile, bucket in enumerate(buckets,start=1):
388 sample_count += bucket
389 if sample_count > lat_samples * LAT_PERCENTILE:
391 percentile_max = (percentile == len(buckets))
392 percentile = percentile * bucket_size
393 lat_max = lat_max_sample
394 lat_avg = lat_avg_sample
395 delta_rx = t4_rx - t2_rx
396 delta_non_dp_rx = t4_non_dp_rx - t2_non_dp_rx
397 delta_tx = t4_tx - t2_tx
398 delta_non_dp_tx = t4_non_dp_tx - t2_non_dp_tx
399 delta_dp_tx = delta_tx -delta_non_dp_tx
400 delta_dp_rx = delta_rx -delta_non_dp_rx
403 tot_dp_drop += delta_dp_tx - delta_dp_rx
408 drop_rate = 100.0*(dp_tx-dp_rx)/dp_tx
409 tot_core_measurement_duration = None
410 break ## Not really needed since the while loop will stop when evaluating the value of r
413 buckets = buckets_total
414 for percentile, bucket in enumerate(buckets_total,start=1):
415 sample_count += bucket
416 if sample_count > tot_lat_samples * LAT_PERCENTILE:
418 percentile_max = (percentile == len(buckets_total))
419 percentile = percentile * bucket_size
420 pps_req_tx = (tot_tx + tot_drop - tot_rx)/tot_core_measurement_duration/1000000.0 # tot_drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
421 pps_tx = tot_tx/tot_core_measurement_duration/1000000.0 # tot_tx is all generated packets actually accepted by the interface
422 pps_rx = tot_rx/tot_core_measurement_duration/1000000.0 # tot_rx is all packets received by the nop task = all packets received in the gen VM
423 if self.sut_machine != None and sut_avail:
424 pps_sut_tx = tot_sut_tx / tot_sut_core_measurement_duration / 1000000.0
427 dp_tx = (t4_tx - t1_tx) - (t4_non_dp_tx - t1_non_dp_tx)
428 dp_rx = (t4_rx - t1_rx) - (t4_non_dp_rx - t1_non_dp_rx)
429 tot_dp_drop = dp_tx - dp_rx
430 drop_rate = 100.0*tot_dp_drop/dp_tx
431 if ((drop_rate < self.test['drop_rate_threshold']) or (tot_dp_drop == self.test['drop_rate_threshold'] ==0) or (tot_dp_drop > self.test['maxz'])):
433 return(pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,percentile,percentile_max,lat_max,dp_tx,dp_rx,tot_dp_drop,(t4_tx_fail - t1_tx_fail),drop_rate,lat_min,used_avg,r,tot_core_measurement_duration,avg_bg_rate,bucket_size,buckets)