bugfix: Qemu shared dir failure
[vswitchperf.git] / testcases / testcase.py
1 # Copyright 2015-2016 Intel Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #   http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """TestCase base class
15 """
16
17 from collections import OrderedDict
18 import copy
19 import csv
20 import logging
21 import math
22 import os
23 import re
24 import time
25 import subprocess
26
27 from conf import settings as S
28 from conf import get_test_param
29 import core.component_factory as component_factory
30 from core.loader import Loader
31 from core.results.results_constants import ResultsConstants
32 from tools import tasks
33 from tools import hugepages
34 from tools import functions
35 from tools.pkt_gen.trafficgen.trafficgenhelper import TRAFFIC_DEFAULTS
36
37
38 class TestCase(object):
39     """TestCase base class
40
41     In this basic form runs RFC2544 throughput test
42     """
43     def __init__(self, cfg):
44         """Pull out fields from test config
45
46         :param cfg: A dictionary of string-value pairs describing the test
47             configuration. Both the key and values strings use well-known
48             values.
49         :param results_dir: Where the csv formatted results are written.
50         """
51         self._testcase_start_time = time.time()
52         self._hugepages_mounted = False
53         self._traffic_ctl = None
54         self._vnf_ctl = None
55         self._vswitch_ctl = None
56         self._collector = None
57         self._loadgen = None
58         self._output_file = None
59         self._tc_results = None
60         self._settings_original = {}
61         self._settings_paths_modified = False
62         self._testcast_run_time = None
63
64         self._update_settings('VSWITCH', cfg.get('vSwitch', S.getValue('VSWITCH')))
65         self._update_settings('VNF', cfg.get('VNF', S.getValue('VNF')))
66         self._update_settings('TRAFFICGEN', cfg.get('Trafficgen', S.getValue('TRAFFICGEN')))
67         self._update_settings('TEST_PARAMS', cfg.get('Parameters', S.getValue('TEST_PARAMS')))
68
69         # update global settings
70         functions.settings_update_paths()
71         guest_loopback = get_test_param('guest_loopback', None)
72         if guest_loopback:
73             # we can put just one item, it'll be expanded automatically for all VMs
74             self._update_settings('GUEST_LOOPBACK', [guest_loopback])
75
76         # set test parameters; CLI options take precedence to testcase settings
77         self._logger = logging.getLogger(__name__)
78         self.name = cfg['Name']
79         self.desc = cfg.get('Description', 'No description given.')
80         self.test = cfg.get('TestSteps', None)
81
82         bidirectional = cfg.get('biDirectional', TRAFFIC_DEFAULTS['bidir'])
83         bidirectional = get_test_param('bidirectional', bidirectional)
84         if not isinstance(bidirectional, str):
85             raise TypeError(
86                 'Bi-dir value must be of type string in testcase configuration')
87         bidirectional = bidirectional.title()  # Keep things consistent
88
89         traffic_type = cfg.get('Traffic Type', TRAFFIC_DEFAULTS['traffic_type'])
90         traffic_type = get_test_param('traffic_type', traffic_type)
91
92         framerate = cfg.get('iLoad', TRAFFIC_DEFAULTS['frame_rate'])
93         framerate = get_test_param('iload', framerate)
94
95         self.deployment = cfg['Deployment']
96         self._frame_mod = cfg.get('Frame Modification', None)
97
98         self._tunnel_type = None
99         self._tunnel_operation = None
100
101         if self.deployment == 'op2p':
102             self._tunnel_operation = cfg['Tunnel Operation']
103
104             if 'Tunnel Type' in cfg:
105                 self._tunnel_type = cfg['Tunnel Type']
106                 self._tunnel_type = get_test_param('tunnel_type',
107                                                    self._tunnel_type)
108
109         # read configuration of streams; CLI parameter takes precedence to
110         # testcase definition
111         multistream = cfg.get('MultiStream', TRAFFIC_DEFAULTS['multistream'])
112         multistream = get_test_param('multistream', multistream)
113         stream_type = cfg.get('Stream Type', TRAFFIC_DEFAULTS['stream_type'])
114         stream_type = get_test_param('stream_type', stream_type)
115         pre_installed_flows = cfg.get('Pre-installed Flows', TRAFFIC_DEFAULTS['pre_installed_flows'])
116         pre_installed_flows = get_test_param('pre-installed_flows', pre_installed_flows)
117
118         # check if test requires background load and which generator it uses
119         self._load_cfg = cfg.get('Load', None)
120         if self._load_cfg and 'tool' in self._load_cfg:
121             self._loadgen = self._load_cfg['tool']
122         else:
123             # background load is not requested, so use dummy implementation
124             self._loadgen = "Dummy"
125
126         if self._frame_mod:
127             self._frame_mod = self._frame_mod.lower()
128         self._results_dir = S.getValue('RESULTS_PATH')
129
130         # set traffic details, so they can be passed to vswitch and traffic ctls
131         self._traffic = copy.deepcopy(TRAFFIC_DEFAULTS)
132         self._traffic.update({'traffic_type': traffic_type,
133                               'flow_type': cfg.get('Flow Type', TRAFFIC_DEFAULTS['flow_type']),
134                               'bidir': bidirectional,
135                               'tunnel_type': self._tunnel_type,
136                               'multistream': int(multistream),
137                               'stream_type': stream_type,
138                               'pre_installed_flows' : pre_installed_flows,
139                               'frame_rate': int(framerate)})
140
141         # Packet Forwarding mode
142         self._vswitch_none = 'none' == S.getValue('VSWITCH').strip().lower()
143
144         # trafficgen configuration required for tests of tunneling protocols
145         if self.deployment == "op2p":
146             self._traffic['l2'].update({'srcmac':
147                                         S.getValue('TRAFFICGEN_PORT1_MAC'),
148                                         'dstmac':
149                                         S.getValue('TRAFFICGEN_PORT2_MAC')})
150
151             self._traffic['l3'].update({'srcip':
152                                         S.getValue('TRAFFICGEN_PORT1_IP'),
153                                         'dstip':
154                                         S.getValue('TRAFFICGEN_PORT2_IP')})
155
156             if self._tunnel_operation == "decapsulation":
157                 self._traffic['l2'] = S.getValue(self._tunnel_type.upper() + '_FRAME_L2')
158                 self._traffic['l3'] = S.getValue(self._tunnel_type.upper() + '_FRAME_L3')
159                 self._traffic['l4'] = S.getValue(self._tunnel_type.upper() + '_FRAME_L4')
160         elif S.getValue('NICS')[0]['type'] == 'vf' or S.getValue('NICS')[1]['type'] == 'vf':
161             mac1 = S.getValue('NICS')[0]['mac']
162             mac2 = S.getValue('NICS')[1]['mac']
163             if mac1 and mac2:
164                 self._traffic['l2'].update({'srcmac': mac2, 'dstmac': mac1})
165             else:
166                 self._logger.debug("MAC addresses can not be read")
167
168     def run_initialize(self):
169         """ Prepare test execution environment
170         """
171         self._logger.debug(self.name)
172
173         # mount hugepages if needed
174         self._mount_hugepages()
175
176         self._logger.debug("Controllers:")
177         loader = Loader()
178         self._traffic_ctl = component_factory.create_traffic(
179             self._traffic['traffic_type'],
180             loader.get_trafficgen_class())
181
182         self._vnf_ctl = component_factory.create_vnf(
183             self.deployment,
184             loader.get_vnf_class())
185
186         # verify enough hugepages are free to run the testcase
187         if not self._check_for_enough_hugepages():
188             raise RuntimeError('Not enough hugepages free to run test.')
189
190         # perform guest related handling
191         if self._vnf_ctl.get_vnfs_number():
192             # copy sources of l2 forwarding tools into VM shared dir if needed
193             self._copy_fwd_tools_for_all_guests()
194
195             # in case of multi VM in parallel, set the number of streams to the number of VMs
196             if self.deployment.startswith('pvpv'):
197                 # for each VM NIC pair we need an unique stream
198                 streams = 0
199                 for vm_nic in S.getValue('GUEST_NICS_NR')[:self._vnf_ctl.get_vnfs_number()]:
200                     streams += int(vm_nic / 2) if vm_nic > 1 else 1
201                 self._logger.debug("VMs with parallel connection were detected. "
202                                    "Thus Number of streams was set to %s", streams)
203                 self._traffic.update({'multistream': streams})
204
205             # OVS Vanilla requires guest VM MAC address and IPs to work
206             if 'linux_bridge' in S.getValue('GUEST_LOOPBACK'):
207                 self._traffic['l2'].update({'srcmac': S.getValue('VANILLA_TGEN_PORT1_MAC'),
208                                             'dstmac': S.getValue('VANILLA_TGEN_PORT2_MAC')})
209                 self._traffic['l3'].update({'srcip': S.getValue('VANILLA_TGEN_PORT1_IP'),
210                                             'dstip': S.getValue('VANILLA_TGEN_PORT2_IP')})
211
212         if self._vswitch_none:
213             self._vswitch_ctl = component_factory.create_pktfwd(
214                 self.deployment,
215                 loader.get_pktfwd_class())
216         else:
217             self._vswitch_ctl = component_factory.create_vswitch(
218                 self.deployment,
219                 loader.get_vswitch_class(),
220                 self._traffic,
221                 self._tunnel_operation)
222
223         self._collector = component_factory.create_collector(
224             loader.get_collector_class(),
225             self._results_dir, self.name)
226         self._loadgen = component_factory.create_loadgen(
227             self._loadgen,
228             self._load_cfg)
229
230         self._output_file = os.path.join(self._results_dir, "result_" + self.name +
231                                          "_" + self.deployment + ".csv")
232
233         self._logger.debug("Setup:")
234
235     def run_finalize(self):
236         """ Tear down test execution environment and record test results
237         """
238         # umount hugepages if mounted
239         self._umount_hugepages()
240
241         # restore original settings
242         S.load_from_dict(self._settings_original)
243
244         # cleanup any namespaces created
245         if os.path.isdir('/tmp/namespaces'):
246             import tools.namespace
247             namespace_list = os.listdir('/tmp/namespaces')
248             if len(namespace_list):
249                 self._logger.info('Cleaning up namespaces')
250             for name in namespace_list:
251                 tools.namespace.delete_namespace(name)
252             os.rmdir('/tmp/namespaces')
253         # cleanup any veth ports created
254         if os.path.isdir('/tmp/veth'):
255             import tools.veth
256             veth_list = os.listdir('/tmp/veth')
257             if len(veth_list):
258                 self._logger.info('Cleaning up veth ports')
259             for eth in veth_list:
260                 port1, port2 = eth.split('-')
261                 tools.veth.del_veth_port(port1, port2)
262             os.rmdir('/tmp/veth')
263
264     def run_report(self):
265         """ Report test results
266         """
267         self._logger.debug("self._collector Results:")
268         self._collector.print_results()
269
270         if S.getValue('mode') != 'trafficgen-off':
271             self._logger.debug("Traffic Results:")
272             self._traffic_ctl.print_results()
273
274             self._tc_results = self._append_results(self._traffic_ctl.get_results())
275             TestCase.write_result_to_file(self._tc_results, self._output_file)
276
277     def run(self):
278         """Run the test
279
280         All setup and teardown through controllers is included.
281         """
282         # prepare test execution environment
283         self.run_initialize()
284
285         with self._vswitch_ctl, self._loadgen:
286             with self._vnf_ctl, self._collector:
287                 if not self._vswitch_none:
288                     self._add_flows()
289
290                 # run traffic generator if requested, otherwise wait for manual termination
291                 if S.getValue('mode') == 'trafficgen-off':
292                     time.sleep(2)
293                     self._logger.debug("All is set. Please run traffic generator manually.")
294                     input(os.linesep + "Press Enter to terminate vswitchperf..." + os.linesep + os.linesep)
295                 else:
296                     if S.getValue('mode') == 'trafficgen-pause':
297                         time.sleep(2)
298                         true_vals = ('yes', 'y', 'ye', None)
299                         while True:
300                             choice = input(os.linesep + 'Transmission paused, should'
301                                            ' transmission be resumed? ' + os.linesep).lower()
302                             if not choice or choice not in true_vals:
303                                 print('Please respond with \'yes\' or \'y\' ', end='')
304                             else:
305                                 break
306                     with self._traffic_ctl:
307                         self._traffic_ctl.send_traffic(self._traffic)
308
309                     # dump vswitch flows before they are affected by VNF termination
310                     if not self._vswitch_none:
311                         self._vswitch_ctl.dump_vswitch_flows()
312
313         # tear down test execution environment and log results
314         self.run_finalize()
315
316         self._testcase_run_time = time.strftime("%H:%M:%S",
317                                   time.gmtime(time.time() - self._testcase_start_time))
318         logging.info("Testcase execution time: " + self._testcase_run_time)
319         # report test results
320         self.run_report()
321
322     def _update_settings(self, param, value):
323         """ Check value of given configuration parameter
324         In case that new value is different, then testcase
325         specific settings is updated and original value stored
326
327         :param param: Name of parameter inside settings
328         :param value: Disired parameter value
329         """
330         orig_value = S.getValue(param)
331         if orig_value != value:
332             self._settings_original[param] = orig_value
333             S.setValue(param, value)
334
335     def _append_results(self, results):
336         """
337         Method appends mandatory Test Case results to list of dictionaries.
338
339         :param results: list of dictionaries which contains results from
340                 traffic generator.
341
342         :returns: modified list of dictionaries.
343         """
344         for item in results:
345             item[ResultsConstants.ID] = self.name
346             item[ResultsConstants.DEPLOYMENT] = self.deployment
347             item[ResultsConstants.TRAFFIC_TYPE] = self._traffic['l3']['proto']
348             item[ResultsConstants.TEST_RUN_TIME] = self._testcase_run_time
349             if self._traffic['multistream']:
350                 item[ResultsConstants.SCAL_STREAM_COUNT] = self._traffic['multistream']
351                 item[ResultsConstants.SCAL_STREAM_TYPE] = self._traffic['stream_type']
352                 item[ResultsConstants.SCAL_PRE_INSTALLED_FLOWS] = self._traffic['pre_installed_flows']
353             if self._vnf_ctl.get_vnfs_number():
354                 item[ResultsConstants.GUEST_LOOPBACK] = ' '.join(S.getValue('GUEST_LOOPBACK'))
355             if self._tunnel_type:
356                 item[ResultsConstants.TUNNEL_TYPE] = self._tunnel_type
357         return results
358
359     def _copy_fwd_tools_for_all_guests(self):
360         """Copy dpdk and l2fwd code to GUEST_SHARE_DIR[s] based on selected deployment.
361         """
362         # consider only VNFs involved in the test
363         for guest_dir in set(S.getValue('GUEST_SHARE_DIR')[:self._vnf_ctl.get_vnfs_number()]):
364             self._copy_fwd_tools_for_guest(guest_dir)
365
366     def _copy_fwd_tools_for_guest(self, guest_dir):
367         """Copy dpdk and l2fwd code to GUEST_SHARE_DIR of VM
368
369         :param index: Index of VM starting from 1 (i.e. 1st VM has index 1)
370         """
371         # remove shared dir if it exists to avoid issues with file consistency
372         if os.path.exists(guest_dir):
373             tasks.run_task(['rm', '-f', '-r', guest_dir], self._logger,
374                            'Removing content of shared directory...', True)
375
376         # directory to share files between host and guest
377         os.makedirs(guest_dir)
378
379         # copy sources into shared dir only if neccessary
380         guest_loopback = set(S.getValue('GUEST_LOOPBACK'))
381         if 'testpmd' in guest_loopback:
382             try:
383                 # exclude whole .git/ subdirectory and all o-files;
384                 # It is assumed, that the same RTE_TARGET is used in both host
385                 # and VMs; This simplification significantly speeds up testpmd
386                 # build. If we will need a different RTE_TARGET in VM,
387                 # then we have to build whole DPDK from the scratch in VM.
388                 # In that case we can copy just DPDK sources (e.g. by excluding
389                 # all items obtained by git status -unormal --porcelain).
390                 # NOTE: Excluding RTE_TARGET directory won't help on systems,
391                 # where DPDK is built for multiple targets (e.g. for gcc & icc)
392                 exclude = []
393                 exclude.append(r'--exclude=.git/')
394                 exclude.append(r'--exclude=*.o')
395                 tasks.run_task(['rsync', '-a', '-r', '-l'] + exclude +
396                                [os.path.join(S.getValue('TOOLS')['dpdk_src'], ''),
397                                 os.path.join(guest_dir, 'DPDK')],
398                                self._logger,
399                                'Copying DPDK to shared directory...',
400                                True)
401             except subprocess.CalledProcessError:
402                 self._logger.error('Unable to copy DPDK to shared directory')
403                 raise
404         if 'l2fwd' in guest_loopback:
405             try:
406                 tasks.run_task(['rsync', '-a', '-r', '-l',
407                                 os.path.join(S.getValue('ROOT_DIR'), 'src/l2fwd/'),
408                                 os.path.join(guest_dir, 'l2fwd')],
409                                self._logger,
410                                'Copying l2fwd to shared directory...',
411                                True)
412             except subprocess.CalledProcessError:
413                 self._logger.error('Unable to copy l2fwd to shared directory')
414                 raise
415
416     def _mount_hugepages(self):
417         """Mount hugepages if usage of DPDK or Qemu is detected
418         """
419         # hugepages are needed by DPDK and Qemu
420         if not self._hugepages_mounted and \
421             (self.deployment.count('v') or \
422              S.getValue('VSWITCH').lower().count('dpdk') or \
423              self._vswitch_none or \
424              self.test and 'vnf' in [step[0][0:3] for step in self.test]):
425             hugepages.mount_hugepages()
426             self._hugepages_mounted = True
427
428     def _umount_hugepages(self):
429         """Umount hugepages if they were mounted before
430         """
431         if self._hugepages_mounted:
432             hugepages.umount_hugepages()
433             self._hugepages_mounted = False
434
435     def _check_for_enough_hugepages(self):
436         """Check to make sure enough hugepages are free to satisfy the
437         test environment.
438         """
439         hugepages_needed = 0
440         hugepage_size = hugepages.get_hugepage_size()
441         # get hugepage amounts per guest involved in the test
442         for guest in range(self._vnf_ctl.get_vnfs_number()):
443             hugepages_needed += math.ceil((int(S.getValue(
444                 'GUEST_MEMORY')[guest]) * 1000) / hugepage_size)
445
446         # get hugepage amounts for each socket on dpdk
447         sock0_mem, sock1_mem = 0, 0
448         if S.getValue('VSWITCH').lower().count('dpdk'):
449             # the import below needs to remain here and not put into the module
450             # imports because of an exception due to settings not yet loaded
451             from vswitches import ovs_dpdk_vhost
452             if ovs_dpdk_vhost.OvsDpdkVhost.old_dpdk_config():
453                 match = re.search(
454                     r'-socket-mem\s+(\d+),(\d+)',
455                     ''.join(S.getValue('VSWITCHD_DPDK_ARGS')))
456                 if match:
457                     sock0_mem, sock1_mem = (int(match.group(1)) * 1024 / hugepage_size,
458                                             int(match.group(2)) * 1024 / hugepage_size)
459                 else:
460                     logging.info(
461                         'Could not parse socket memory config in dpdk params.')
462             else:
463                 sock0_mem, sock1_mem = (
464                     S.getValue(
465                         'VSWITCHD_DPDK_CONFIG')['dpdk-socket-mem'].split(','))
466                 sock0_mem, sock1_mem = (int(sock0_mem) * 1024 / hugepage_size,
467                                         int(sock1_mem) * 1024 / hugepage_size)
468
469         # If hugepages needed, verify the amounts are free
470         if any([hugepages_needed, sock0_mem, sock1_mem]):
471             free_hugepages = hugepages.get_free_hugepages()
472             if hugepages_needed:
473                 logging.info('Need %s hugepages free for guests',
474                              hugepages_needed)
475                 result1 = free_hugepages >= hugepages_needed
476                 free_hugepages -= hugepages_needed
477             else:
478                 result1 = True
479
480             if sock0_mem:
481                 logging.info('Need %s hugepages free for dpdk socket 0',
482                              sock0_mem)
483                 result2 = hugepages.get_free_hugepages('0') >= sock0_mem
484                 free_hugepages -= sock0_mem
485             else:
486                 result2 = True
487
488             if sock1_mem:
489                 logging.info('Need %s hugepages free for dpdk socket 1',
490                              sock1_mem)
491                 result3 = hugepages.get_free_hugepages('1') >= sock1_mem
492                 free_hugepages -= sock1_mem
493             else:
494                 result3 = True
495
496             logging.info('Need a total of {} total hugepages'.format(
497                 hugepages_needed + sock1_mem + sock0_mem))
498
499             # The only drawback here is sometimes dpdk doesn't release
500             # its hugepages on a test failure. This could cause a test
501             # to fail when dpdk would be OK to start because it will just
502             # use the previously allocated hugepages.
503             result4 = True if free_hugepages >= 0 else False
504
505             return all([result1, result2, result3, result4])
506         else:
507             return True
508
509     @staticmethod
510     def write_result_to_file(results, output):
511         """Write list of dictionaries to a CSV file.
512
513         Each element on list will create separate row in output file.
514         If output file already exists, data will be appended at the end,
515         otherwise it will be created.
516
517         :param results: list of dictionaries.
518         :param output: path to output file.
519         """
520         with open(output, 'a') as csvfile:
521
522             logging.info("Write results to file: " + output)
523             fieldnames = TestCase._get_unique_keys(results)
524
525             writer = csv.DictWriter(csvfile, fieldnames)
526
527             if not csvfile.tell():  # file is now empty
528                 writer.writeheader()
529
530             for result in results:
531                 writer.writerow(result)
532
533     @staticmethod
534     def _get_unique_keys(list_of_dicts):
535         """Gets unique key values as ordered list of strings in given dicts
536
537         :param list_of_dicts: list of dictionaries.
538
539         :returns: list of unique keys(strings).
540         """
541         result = OrderedDict()
542         for item in list_of_dicts:
543             for key in item.keys():
544                 result[key] = ''
545
546         return list(result.keys())
547
548     def _add_flows(self):
549         """Add flows to the vswitch
550         """
551         vswitch = self._vswitch_ctl.get_vswitch()
552         # TODO BOM 15-08-07 the frame mod code assumes that the
553         # physical ports are ports 1 & 2. The actual numbers
554         # need to be retrived from the vSwitch and the metadata value
555         # updated accordingly.
556         bridge = S.getValue('VSWITCH_BRIDGE_NAME')
557         if self._frame_mod == "vlan":
558             # 0x8100 => VLAN ethertype
559             self._logger.debug(" ****   VLAN   ***** ")
560             flow = {'table':'2', 'priority':'1000', 'metadata':'2',
561                     'actions': ['push_vlan:0x8100', 'goto_table:3']}
562             vswitch.add_flow(bridge, flow)
563             flow = {'table':'2', 'priority':'1000', 'metadata':'1',
564                     'actions': ['push_vlan:0x8100', 'goto_table:3']}
565             vswitch.add_flow(bridge, flow)
566         elif self._frame_mod == "mpls":
567             # 0x8847 => MPLS unicast ethertype
568             self._logger.debug(" ****   MPLS  ***** ")
569             flow = {'table':'2', 'priority':'1000', 'metadata':'2',
570                     'actions': ['push_mpls:0x8847', 'goto_table:3']}
571             vswitch.add_flow(bridge, flow)
572             flow = {'table':'2', 'priority':'1000', 'metadata':'1',
573                     'actions': ['push_mpls:0x8847', 'goto_table:3']}
574             vswitch.add_flow(bridge, flow)
575         elif self._frame_mod == "mac":
576             flow = {'table':'2', 'priority':'1000', 'metadata':'2',
577                     'actions': ['mod_dl_src:22:22:22:22:22:22',
578                                 'goto_table:3']}
579             vswitch.add_flow(bridge, flow)
580             flow = {'table':'2', 'priority':'1000', 'metadata':'1',
581                     'actions': ['mod_dl_src:11:11:11:11:11:11',
582                                 'goto_table:3']}
583             vswitch.add_flow(bridge, flow)
584         elif self._frame_mod == "dscp":
585             # DSCP 184d == 0x4E<<2 => 'Expedited Forwarding'
586             flow = {'table':'2', 'priority':'1000', 'metadata':'2',
587                     'dl_type':'0x0800',
588                     'actions': ['mod_nw_tos:184', 'goto_table:3']}
589             vswitch.add_flow(bridge, flow)
590             flow = {'table':'2', 'priority':'1000', 'metadata':'1',
591                     'dl_type':'0x0800',
592                     'actions': ['mod_nw_tos:184', 'goto_table:3']}
593             vswitch.add_flow(bridge, flow)
594         elif self._frame_mod == "ttl":
595             # 251 and 241 are the highest prime numbers < 255
596             flow = {'table':'2', 'priority':'1000', 'metadata':'2',
597                     'dl_type':'0x0800',
598                     'actions': ['mod_nw_ttl:251', 'goto_table:3']}
599             vswitch.add_flow(bridge, flow)
600             flow = {'table':'2', 'priority':'1000', 'metadata':'1',
601                     'dl_type':'0x0800',
602                     'actions': ['mod_nw_ttl:241', 'goto_table:3']}
603             vswitch.add_flow(bridge, flow)
604         elif self._frame_mod == "ip_addr":
605             flow = {'table':'2', 'priority':'1000', 'metadata':'2',
606                     'dl_type':'0x0800',
607                     'actions': ['mod_nw_src:10.10.10.10',
608                                 'mod_nw_dst:20.20.20.20',
609                                 'goto_table:3']}
610             vswitch.add_flow(bridge, flow)
611             flow = {'table':'2', 'priority':'1000', 'metadata':'1',
612                     'dl_type':'0x0800',
613                     'actions': ['mod_nw_src:20.20.20.20',
614                                 'mod_nw_dst:10.10.10.10',
615                                 'goto_table:3']}
616             vswitch.add_flow(bridge, flow)
617         elif self._frame_mod == "ip_port":
618             # TODO BOM 15-08-27 The traffic generated is assumed
619             # to be UDP (nw_proto 17d) which is the default case but
620             # we will need to pick up the actual traffic params in use.
621             flow = {'table':'2', 'priority':'1000', 'metadata':'2',
622                     'dl_type':'0x0800', 'nw_proto':'17',
623                     'actions': ['mod_tp_src:44444',
624                                 'mod_tp_dst:44444', 'goto_table:3']}
625             vswitch.add_flow(bridge, flow)
626             flow = {'table':'2', 'priority':'1000', 'metadata':'1',
627                     'dl_type':'0x0800', 'nw_proto':'17',
628                     'actions': ['mod_tp_src:44444',
629                                 'mod_tp_dst:44444', 'goto_table:3']}
630             vswitch.add_flow(bridge, flow)
631         else:
632             pass