4 ## Copyright (c) 2020 Intel Corporation
6 ## Licensed under the Apache License, Version 2.0 (the "License");
7 ## you may not use this file except in compliance with the License.
8 ## You may obtain a copy of the License at
11 ## http://www.apache.org/licenses/LICENSE-2.0
13 ## Unless required by applicable law or agreed to in writing, software
14 ## distributed under the License is distributed on an "AS IS" BASIS,
15 ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 ## See the License for the specific language governing permissions and
17 ## limitations under the License.
24 from past.utils import old_div
25 from rapid_log import RapidLog
26 from rapid_log import bcolors
28 from datetime import datetime as dt
30 class RapidTest(object):
32 Class to manage the testing
34 def __init__(self, test_param, runtime, testname, environment_file ):
35 self.test = test_param
36 self.test['runtime'] = runtime
37 self.test['testname'] = testname
38 self.test['environment_file'] = environment_file
39 if 'maxr' not in self.test.keys():
41 if 'maxz' not in self.test.keys():
42 self.test['maxz'] = inf
43 with open('format.yaml') as f:
44 self.data_format = yaml.load(f, Loader=yaml.FullLoader)
47 def get_percentageof10Gbps(pps_speed,size):
48 # speed is given in pps, returning % of 10Gb/s
49 # 12 bytes is the inter packet gap
50 # pre-amble is 7 bytes
51 # SFD (start of frame delimiter) is 1 byte
52 # Total of 20 bytes overhead per packet
53 return (pps_speed / 1000000.0 * 0.08 * (size+20))
56 def get_pps(speed,size):
57 # speed is given in % of 10Gb/s, returning Mpps
58 # 12 bytes is the inter packet gap
59 # pre-amble is 7 bytes
60 # SFD (start of frame delimiter) is 1 byte
61 # Total of 20 bytes overhead per packet
62 return (speed * 100.0 / (8*(size+20)))
65 def get_speed(packet_speed,size):
66 # return speed in Gb/s
67 # 12 bytes is the inter packet gap
68 # pre-amble is 7 bytes
69 # SFD (start of frame delimiter) is 1 byte
70 # Total of 20 bytes overhead per packet
71 return (packet_speed / 1000.0 * (8*(size+20)))
74 def set_background_flows(background_machines, number_of_flows):
75 for machine in background_machines:
76 _ = machine.set_flows(number_of_flows)
79 def set_background_speed(background_machines, speed):
80 for machine in background_machines:
81 machine.set_generator_speed(speed)
84 def set_background_size(background_machines, imix):
85 # imixs is a list of packet sizes
86 for machine in background_machines:
87 machine.set_udp_packet_size(imix)
90 def start_background_traffic(background_machines):
91 for machine in background_machines:
95 def stop_background_traffic(background_machines):
96 for machine in background_machines:
100 def parse_data_format_dict(data_format, variables):
101 for k, v in data_format.items():
103 RapidTest.parse_data_format_dict(v, variables)
105 if v in variables.keys():
106 data_format[k] = variables[v]
108 def post_data(self, test, variables):
109 var = copy.deepcopy(self.data_format)
110 self.parse_data_format_dict(var, variables)
111 if var.keys() >= {'URL', test, 'Format'}:
113 for value in var['URL'].values():
115 HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'application/rapid'}
116 if var['Format'] == 'PushGateway':
117 data = "\n".join("{} {}".format(k, v) for k, v in var[test].items()) + "\n"
118 response = requests.post(url=URL, data=data,headers=HEADERS)
119 elif var['Format'] == 'Xtesting':
121 response = requests.post(url=URL, json=data)
122 if (response.status_code >= 300):
123 RapidLog.info('Cannot send metrics to {}'.format(URL))
128 def report_result(flow_number, size, speed, pps_req_tx, pps_tx, pps_sut_tx,
129 pps_rx, lat_avg, lat_perc, lat_perc_max, lat_max, tx, rx, tot_drop,
130 elapsed_time,speed_prefix='', lat_avg_prefix='', lat_perc_prefix='',
131 lat_max_prefix='', abs_drop_rate_prefix='', drop_rate_prefix=''):
133 flow_number_str = '| ({:>4}) |'.format(abs(flow_number))
135 flow_number_str = '|{:>7} |'.format(flow_number)
136 if pps_req_tx is None:
137 pps_req_tx_str = '{0: >14}'.format(' NA |')
139 pps_req_tx_str = '{:>7.3f} Mpps |'.format(pps_req_tx)
141 pps_tx_str = '{0: >14}'.format(' NA |')
143 pps_tx_str = '{:>7.3f} Mpps |'.format(pps_tx)
144 if pps_sut_tx is None:
145 pps_sut_tx_str = '{0: >14}'.format(' NA |')
147 pps_sut_tx_str = '{:>7.3f} Mpps |'.format(pps_sut_tx)
149 pps_rx_str = '{0: >25}'.format('NA |')
151 pps_rx_str = bcolors.OKBLUE + '{:>4.1f} Gb/s |{:7.3f} Mpps {}|'.format(
152 RapidTest.get_speed(pps_rx,size),pps_rx,bcolors.ENDC)
154 tot_drop_str = ' | NA | '
156 tot_drop_str = ' | {:>9.0f} | '.format(tot_drop)
158 lat_perc_str = ' |{:^10.10}|'.format('NA')
159 elif lat_perc_max == True:
160 lat_perc_str = '|>{}{:>5.0f} us{} |'.format(lat_perc_prefix,
161 float(lat_perc), bcolors.ENDC)
163 lat_perc_str = '| {}{:>5.0f} us{} |'.format(lat_perc_prefix,
164 float(lat_perc), bcolors.ENDC)
165 if elapsed_time is None:
166 elapsed_time_str = ' NA |'
168 elapsed_time_str = '{:>3.0f} |'.format(elapsed_time)
169 return(flow_number_str + '{:>5.1f}'.format(speed) + '% ' + speed_prefix
170 + '{:>6.3f}'.format(RapidTest.get_pps(speed,size)) + ' Mpps|' +
171 pps_req_tx_str + pps_tx_str + bcolors.ENDC + pps_sut_tx_str +
172 pps_rx_str + lat_avg_prefix + ' {:>6.0f}'.format(lat_avg) +
173 ' us' + lat_perc_str +lat_max_prefix+'{:>6.0f}'.format(lat_max)
174 + ' us | ' + '{:>9.0f}'.format(tx) + ' | {:>9.0f}'.format(rx) +
175 ' | '+ abs_drop_rate_prefix+ '{:>9.0f}'.format(tx-rx) +
176 tot_drop_str +drop_rate_prefix +
177 '{:>5.2f}'.format(100*old_div(float(tx-rx),tx)) + bcolors.ENDC +
178 ' |' + elapsed_time_str)
180 def run_iteration(self, requested_duration, flow_number, size, speed):
181 BUCKET_SIZE_EXP = self.gen_machine.bucket_size_exp
182 LAT_PERCENTILE = self.test['lat_percentile']
185 while (r < self.test['maxr']):
186 self.gen_machine.start_latency_cores()
187 time.sleep(sleep_time)
188 # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
189 t1_rx, t1_non_dp_rx, t1_tx, t1_non_dp_tx, t1_drop, t1_tx_fail, t1_tsc, abs_tsc_hz = self.gen_machine.core_stats()
190 t1_dp_rx = t1_rx - t1_non_dp_rx
191 t1_dp_tx = t1_tx - t1_non_dp_tx
192 self.gen_machine.set_generator_speed(0)
193 self.gen_machine.start_gen_cores()
194 if self.background_machines:
195 self.set_background_speed(self.background_machines, 0)
196 self.start_background_traffic(self.background_machines)
197 if 'ramp_step' in self.test.keys():
198 ramp_speed = self.test['ramp_step']
201 while ramp_speed < speed:
202 self.gen_machine.set_generator_speed(ramp_speed)
203 if self.background_machines:
204 self.set_background_speed(self.background_machines, ramp_speed)
206 ramp_speed = ramp_speed + self.test['ramp_step']
207 self.gen_machine.set_generator_speed(speed)
208 if self.background_machines:
209 self.set_background_speed(self.background_machines, speed)
210 time.sleep(2) ## Needs to be 2 seconds since this 1 sec is the time that PROX uses to refresh the stats. Note that this can be changed in PROX!! Don't do it.
211 start_bg_gen_stats = []
212 for bg_gen_machine in self.background_machines:
213 bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, _ = bg_gen_machine.core_stats()
215 "bg_dp_rx" : bg_rx - bg_non_dp_rx,
216 "bg_dp_tx" : bg_tx - bg_non_dp_tx,
219 start_bg_gen_stats.append(dict(bg_gen_stat))
220 if self.sut_machine!= None:
221 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
222 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc, tsc_hz = self.gen_machine.core_stats()
224 dp_tx = tx - (t2_non_dp_tx - t1_non_dp_tx )
225 dp_rx = t2_rx - t1_rx - (t2_non_dp_rx - t1_non_dp_rx)
226 tot_dp_drop = dp_tx - dp_rx
228 RapidLog.critical("TX = 0. Test interrupted since no packet has been sent.")
230 RapidLog.critical("Only non-dataplane packets (e.g. ARP) sent. Test interrupted since no packet has been sent.")
231 # Ask PROX to calibrate the bucket size once we have a PROX function to do this.
232 # Measure latency statistics per second
233 lat_min, lat_max, lat_avg, used_avg, t2_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats()
234 lat_samples = sum(buckets)
236 for sample_percentile, bucket in enumerate(buckets,start=1):
237 sample_count += bucket
238 if sample_count > (lat_samples * LAT_PERCENTILE):
240 percentile_max = (sample_percentile == len(buckets))
241 sample_percentile = sample_percentile * float(2 ** BUCKET_SIZE_EXP) / (old_div(float(lat_hz),float(10**6)))
242 if self.test['test'] == 'fixed_rate':
243 RapidLog.info(self.report_result(flow_number,size,speed,None,None,None,None,lat_avg,sample_percentile,percentile_max,lat_max, dp_tx, dp_rx , None, None))
244 tot_rx = tot_non_dp_rx = tot_tx = tot_non_dp_tx = tot_drop = 0
245 lat_avg = used_avg = 0
246 buckets_total = buckets
247 tot_lat_samples = sum(buckets)
248 tot_lat_measurement_duration = float(0)
249 tot_core_measurement_duration = float(0)
250 tot_sut_core_measurement_duration = float(0)
251 tot_sut_rx = tot_sut_non_dp_rx = tot_sut_tx = tot_sut_non_dp_tx = tot_sut_drop = tot_sut_tx_fail = tot_sut_tsc = 0
252 lat_avail = core_avail = sut_avail = False
253 while (tot_core_measurement_duration - float(requested_duration) <= 0.1) or (tot_lat_measurement_duration - float(requested_duration) <= 0.1):
255 lat_min_sample, lat_max_sample, lat_avg_sample, used_sample, t3_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats()
256 # Get statistics after some execution time
257 if t3_lat_tsc != t2_lat_tsc:
258 single_lat_measurement_duration = (t3_lat_tsc - t2_lat_tsc) * 1.0 / lat_hz # time difference between the 2 measurements, expressed in seconds.
259 # A second has passed in between to lat_stats requests. Hence we need to process the results
260 tot_lat_measurement_duration = tot_lat_measurement_duration + single_lat_measurement_duration
261 if lat_min > lat_min_sample:
262 lat_min = lat_min_sample
263 if lat_max < lat_max_sample:
264 lat_max = lat_max_sample
265 lat_avg = lat_avg + lat_avg_sample * single_lat_measurement_duration # Sometimes, There is more than 1 second between 2 lat_stats. Hence we will take the latest measurement
266 used_avg = used_avg + used_sample * single_lat_measurement_duration # and give it more weigth.
267 lat_samples = sum(buckets)
268 tot_lat_samples += lat_samples
270 for sample_percentile, bucket in enumerate(buckets,start=1):
271 sample_count += bucket
272 if sample_count > lat_samples * LAT_PERCENTILE:
274 percentile_max = (sample_percentile == len(buckets))
275 bucket_size = float(2 ** BUCKET_SIZE_EXP) / (old_div(float(lat_hz),float(10**6)))
276 sample_percentile = sample_percentile * bucket_size
277 buckets_total = [buckets_total[i] + buckets[i] for i in range(len(buckets_total))]
278 t2_lat_tsc = t3_lat_tsc
280 t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc, tsc_hz = self.gen_machine.core_stats()
282 single_core_measurement_duration = (t3_tsc - t2_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
283 tot_core_measurement_duration = tot_core_measurement_duration + single_core_measurement_duration
284 delta_rx = t3_rx - t2_rx
286 delta_non_dp_rx = t3_non_dp_rx - t2_non_dp_rx
287 tot_non_dp_rx += delta_non_dp_rx
288 delta_tx = t3_tx - t2_tx
290 delta_non_dp_tx = t3_non_dp_tx - t2_non_dp_tx
291 tot_non_dp_tx += delta_non_dp_tx
292 delta_dp_tx = delta_tx -delta_non_dp_tx
293 delta_dp_rx = delta_rx -delta_non_dp_rx
294 delta_dp_drop = delta_dp_tx - delta_dp_rx
295 tot_dp_drop += delta_dp_drop
296 delta_drop = t3_drop - t2_drop
297 tot_drop += delta_drop
298 t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc = t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc
300 if self.sut_machine!=None:
301 t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
302 if t3_sut_tsc != t2_sut_tsc:
303 single_sut_core_measurement_duration = (t3_sut_tsc - t2_sut_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
304 tot_sut_core_measurement_duration = tot_sut_core_measurement_duration + single_sut_core_measurement_duration
305 tot_sut_rx += t3_sut_rx - t2_sut_rx
306 tot_sut_non_dp_rx += t3_sut_non_dp_rx - t2_sut_non_dp_rx
307 delta_sut_tx = t3_sut_tx - t2_sut_tx
308 tot_sut_tx += delta_sut_tx
309 delta_sut_non_dp_tx = t3_sut_non_dp_tx - t2_sut_non_dp_tx
310 tot_sut_non_dp_tx += delta_sut_non_dp_tx
311 t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc = t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc
313 if self.test['test'] == 'fixed_rate':
314 if lat_avail == core_avail == True:
315 lat_avail = core_avail = False
316 pps_req_tx = (delta_tx + delta_drop - delta_rx)/single_core_measurement_duration/1000000
317 pps_tx = delta_tx/single_core_measurement_duration/1000000
318 if self.sut_machine != None and sut_avail:
319 pps_sut_tx = delta_sut_tx/single_sut_core_measurement_duration/1000000
323 pps_rx = delta_rx/single_core_measurement_duration/1000000
324 RapidLog.info(self.report_result(flow_number, size,
325 speed, pps_req_tx, pps_tx, pps_sut_tx, pps_rx,
326 lat_avg_sample, sample_percentile, percentile_max,
327 lat_max_sample, delta_dp_tx, delta_dp_rx,
328 tot_dp_drop, single_core_measurement_duration))
330 'Flows': flow_number,
332 'RequestedSpeed': self.get_pps(speed,size),
333 'CoreGenerated': pps_req_tx,
335 'FwdBySUT': pps_sut_tx,
337 'AvgLatency': lat_avg_sample,
338 'PCTLatency': sample_percentile,
339 'MaxLatency': lat_max_sample,
340 'PacketsSent': delta_dp_tx,
341 'PacketsReceived': delta_dp_rx,
342 'PacketsLost': tot_dp_drop,
343 'bucket_size': bucket_size,
346 self.post_data('rapid_flowsizetest', variables)
347 end_bg_gen_stats = []
348 for bg_gen_machine in self.background_machines:
349 bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, bg_hz = bg_gen_machine.core_stats()
350 bg_gen_stat = {"bg_dp_rx" : bg_rx - bg_non_dp_rx,
351 "bg_dp_tx" : bg_tx - bg_non_dp_tx,
355 end_bg_gen_stats.append(dict(bg_gen_stat))
358 while i < len(end_bg_gen_stats):
359 bg_rates.append(0.000001*(end_bg_gen_stats[i]['bg_dp_rx'] -
360 start_bg_gen_stats[i]['bg_dp_rx']) / ((end_bg_gen_stats[i]['bg_tsc'] -
361 start_bg_gen_stats[i]['bg_tsc']) * 1.0 / end_bg_gen_stats[i]['bg_hz']))
364 avg_bg_rate = sum(bg_rates) / len(bg_rates)
365 RapidLog.debug('Average Background traffic rate: {:>7.3f} Mpps'.format(avg_bg_rate))
369 self.gen_machine.stop_gen_cores()
371 lat_avg = old_div(lat_avg, float(tot_lat_measurement_duration))
372 used_avg = old_div(used_avg, float(tot_lat_measurement_duration))
374 while t4_tsc == t2_tsc:
375 t4_rx, t4_non_dp_rx, t4_tx, t4_non_dp_tx, t4_drop, t4_tx_fail, t4_tsc, abs_tsc_hz = self.gen_machine.core_stats()
376 if self.test['test'] == 'fixed_rate':
377 t4_lat_tsc = t2_lat_tsc
378 while t4_lat_tsc == t2_lat_tsc:
379 lat_min_sample, lat_max_sample, lat_avg_sample, used_sample, t4_lat_tsc, lat_hz, buckets = self.gen_machine.lat_stats()
381 lat_samples = sum(buckets)
382 for percentile, bucket in enumerate(buckets,start=1):
383 sample_count += bucket
384 if sample_count > lat_samples * LAT_PERCENTILE:
386 percentile_max = (percentile == len(buckets))
387 percentile = percentile * bucket_size
388 lat_max = lat_max_sample
389 lat_avg = lat_avg_sample
390 delta_rx = t4_rx - t2_rx
391 delta_non_dp_rx = t4_non_dp_rx - t2_non_dp_rx
392 delta_tx = t4_tx - t2_tx
393 delta_non_dp_tx = t4_non_dp_tx - t2_non_dp_tx
394 delta_dp_tx = delta_tx -delta_non_dp_tx
395 delta_dp_rx = delta_rx -delta_non_dp_rx
398 tot_dp_drop += delta_dp_tx - delta_dp_rx
403 drop_rate = 100.0*(dp_tx-dp_rx)/dp_tx
404 tot_core_measurement_duration = None
405 break ## Not really needed since the while loop will stop when evaluating the value of r
408 buckets = buckets_total
409 for percentile, bucket in enumerate(buckets_total,start=1):
410 sample_count += bucket
411 if sample_count > tot_lat_samples * LAT_PERCENTILE:
413 percentile_max = (percentile == len(buckets_total))
414 percentile = percentile * bucket_size
415 pps_req_tx = (tot_tx + tot_drop - tot_rx)/tot_core_measurement_duration/1000000.0 # tot_drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
416 pps_tx = tot_tx/tot_core_measurement_duration/1000000.0 # tot_tx is all generated packets actually accepted by the interface
417 pps_rx = tot_rx/tot_core_measurement_duration/1000000.0 # tot_rx is all packets received by the nop task = all packets received in the gen VM
418 if self.sut_machine != None and sut_avail:
419 pps_sut_tx = tot_sut_tx / tot_sut_core_measurement_duration / 1000000.0
422 dp_tx = (t4_tx - t1_tx) - (t4_non_dp_tx - t1_non_dp_tx)
423 dp_rx = (t4_rx - t1_rx) - (t4_non_dp_rx - t1_non_dp_rx)
424 tot_dp_drop = dp_tx - dp_rx
425 drop_rate = 100.0*tot_dp_drop/dp_tx
426 if ((drop_rate < self.test['drop_rate_threshold']) or (tot_dp_drop == self.test['drop_rate_threshold'] ==0) or (tot_dp_drop > self.test['maxz'])):
428 self.gen_machine.stop_latency_cores()
429 return(pps_req_tx,pps_tx,pps_sut_tx,pps_rx,lat_avg,percentile,percentile_max,lat_max,dp_tx,dp_rx,tot_dp_drop,(t4_tx_fail - t1_tx_fail),drop_rate,lat_min,used_avg,r,tot_core_measurement_duration,avg_bg_rate,bucket_size,buckets)