Adding Mitaka networking-old module with the ODL topology based port 45/18745/2
authorWojciech Dec <wdec@cisco.com>
Tue, 16 Aug 2016 17:27:01 +0000 (19:27 +0200)
committerWojciech Dec <wdec@cisco.com>
Tue, 16 Aug 2016 17:29:27 +0000 (19:29 +0200)
binding resolution mechanism from https://review.openstack.org/333186

Change-Id: I10d400aac9bb639c146527f0f93e6925cb74d9de
Signed-off-by: Wojciech Dec <wdec@cisco.com>
171 files changed:
.gitreview [new file with mode: 0644]
networking-odl/.coveragerc [new file with mode: 0644]
networking-odl/.gitignore [new file with mode: 0644]
networking-odl/.gitreview [new file with mode: 0644]
networking-odl/.mailmap [new file with mode: 0644]
networking-odl/.pylintrc [new file with mode: 0644]
networking-odl/.testr.conf [new file with mode: 0644]
networking-odl/CONTRIBUTING.rst [new file with mode: 0644]
networking-odl/HACKING.rst [new file with mode: 0644]
networking-odl/LICENSE [new file with mode: 0644]
networking-odl/MANIFEST.in [new file with mode: 0644]
networking-odl/README.rst [new file with mode: 0644]
networking-odl/TESTING.rst [new file with mode: 0644]
networking-odl/babel.cfg [new file with mode: 0644]
networking-odl/devstack/README.rst [new file with mode: 0644]
networking-odl/devstack/devstackgaterc [new file with mode: 0644]
networking-odl/devstack/entry_points [new file with mode: 0644]
networking-odl/devstack/functions [new file with mode: 0644]
networking-odl/devstack/local.conf.example [new file with mode: 0644]
networking-odl/devstack/odl-releases/beryllium-0.4.0 [new file with mode: 0644]
networking-odl/devstack/odl-releases/beryllium-0.4.1-SR1 [new file with mode: 0644]
networking-odl/devstack/odl-releases/beryllium-0.4.2-SR2 [new file with mode: 0644]
networking-odl/devstack/odl-releases/beryllium-0.4.3-SR3 [new file with mode: 0644]
networking-odl/devstack/odl-releases/beryllium-snapshot-0.4.3 [new file with mode: 0644]
networking-odl/devstack/odl-releases/beryllium-snapshot-0.4.4 [new file with mode: 0644]
networking-odl/devstack/odl-releases/boron-snapshot-0.5.0 [new file with mode: 0644]
networking-odl/devstack/odl-releases/carbon-snapshot-0.6.0 [new file with mode: 0644]
networking-odl/devstack/odl-releases/common [new file with mode: 0644]
networking-odl/devstack/odl-releases/helium-0.2.3-SR3 [new file with mode: 0644]
networking-odl/devstack/odl-releases/lithium-0.3.1-SR1 [new file with mode: 0644]
networking-odl/devstack/odl-releases/lithium-0.3.2-SR2 [new file with mode: 0644]
networking-odl/devstack/odl-releases/lithium-0.3.3-SR3 [new file with mode: 0644]
networking-odl/devstack/odl-releases/lithium-0.3.4-SR4 [new file with mode: 0644]
networking-odl/devstack/override-defaults [new file with mode: 0644]
networking-odl/devstack/plugin.sh [new file with mode: 0644]
networking-odl/devstack/post_test_hook.sh [new file with mode: 0644]
networking-odl/devstack/pre_test_hook.sh [new file with mode: 0644]
networking-odl/devstack/settings [new file with mode: 0644]
networking-odl/devstack/settings.odl [new file with mode: 0644]
networking-odl/devstack/setup_java.sh [new file with mode: 0644]
networking-odl/doc/source/conf.py [new file with mode: 0644]
networking-odl/doc/source/contributing.rst [new file with mode: 0644]
networking-odl/doc/source/devref/hostconfig.rst [new file with mode: 0644]
networking-odl/doc/source/devref/index.rst [new file with mode: 0644]
networking-odl/doc/source/index.rst [new file with mode: 0644]
networking-odl/doc/source/installation.rst [new file with mode: 0644]
networking-odl/doc/source/readme.rst [new file with mode: 0644]
networking-odl/doc/source/specs.rst [new file with mode: 0644]
networking-odl/doc/source/specs/journal-recovery.rst [new file with mode: 0644]
networking-odl/doc/source/specs/qos-driver.rst [new file with mode: 0644]
networking-odl/doc/source/specs/sfc-driver.rst [new file with mode: 0644]
networking-odl/doc/source/usage.rst [new file with mode: 0644]
networking-odl/etc/neutron/plugins/ml2/ml2_conf_odl.ini [new file with mode: 0644]
networking-odl/etc/policy.json [new file with mode: 0644]
networking-odl/networking_odl/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/_i18n.py [new file with mode: 0644]
networking-odl/networking_odl/cmd/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/cmd/set_ovs_hostconfigs.py [new file with mode: 0644]
networking-odl/networking_odl/cmd/test_setup_hostconfig.sh [new file with mode: 0755]
networking-odl/networking_odl/common/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/common/cache.py [new file with mode: 0644]
networking-odl/networking_odl/common/callback.py [new file with mode: 0644]
networking-odl/networking_odl/common/client.py [new file with mode: 0644]
networking-odl/networking_odl/common/config.py [new file with mode: 0644]
networking-odl/networking_odl/common/constants.py [new file with mode: 0644]
networking-odl/networking_odl/common/exceptions.py [new file with mode: 0644]
networking-odl/networking_odl/common/filters.py [new file with mode: 0644]
networking-odl/networking_odl/common/lightweight_testing.py [new file with mode: 0644]
networking-odl/networking_odl/common/utils.py [new file with mode: 0644]
networking-odl/networking_odl/db/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/db/db.py [new file with mode: 0644]
networking-odl/networking_odl/db/migration/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/db/migration/alembic_migrations/README [new file with mode: 0644]
networking-odl/networking_odl/db/migration/alembic_migrations/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/db/migration/alembic_migrations/env.py [new file with mode: 0644]
networking-odl/networking_odl/db/migration/alembic_migrations/script.py.mako [new file with mode: 0644]
networking-odl/networking_odl/db/migration/alembic_migrations/versions/CONTRACT_HEAD [new file with mode: 0644]
networking-odl/networking_odl/db/migration/alembic_migrations/versions/EXPAND_HEAD [new file with mode: 0644]
networking-odl/networking_odl/db/migration/alembic_migrations/versions/b89a299e19f9_initial_branchpoint.py [new file with mode: 0644]
networking-odl/networking_odl/db/migration/alembic_migrations/versions/mitaka/contract/383acb0d38a0_initial_contract.py [new file with mode: 0644]
networking-odl/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/247501328046_initial_expand.py [new file with mode: 0644]
networking-odl/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/37e242787ae5_opendaylight_neutron_mechanism_driver_.py [new file with mode: 0644]
networking-odl/networking_odl/db/migration/alembic_migrations/versions/newton/expand/703dbf02afde_add_journal_maintenance_table.py [new file with mode: 0644]
networking-odl/networking_odl/db/models.py [new file with mode: 0644]
networking-odl/networking_odl/fwaas/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/fwaas/driver.py [new file with mode: 0644]
networking-odl/networking_odl/journal/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/journal/cleanup.py [new file with mode: 0644]
networking-odl/networking_odl/journal/dependency_validations.py [new file with mode: 0644]
networking-odl/networking_odl/journal/full_sync.py [new file with mode: 0644]
networking-odl/networking_odl/journal/journal.py [new file with mode: 0644]
networking-odl/networking_odl/journal/maintenance.py [new file with mode: 0644]
networking-odl/networking_odl/l2gateway/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/l2gateway/driver.py [new file with mode: 0644]
networking-odl/networking_odl/l3/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/l3/l3_odl.py [new file with mode: 0644]
networking-odl/networking_odl/l3/l3_odl_v2.py [new file with mode: 0644]
networking-odl/networking_odl/lbaas/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/lbaas/driver_v1.py [new file with mode: 0644]
networking-odl/networking_odl/lbaas/driver_v2.py [new file with mode: 0644]
networking-odl/networking_odl/ml2/README.odl [new file with mode: 0644]
networking-odl/networking_odl/ml2/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/ml2/legacy_port_binding.py [new file with mode: 0644]
networking-odl/networking_odl/ml2/mech_driver.py [new file with mode: 0644]
networking-odl/networking_odl/ml2/mech_driver_v2.py [new file with mode: 0644]
networking-odl/networking_odl/ml2/network_topology.py [new file with mode: 0644]
networking-odl/networking_odl/ml2/ovsdb_topology.py [new file with mode: 0644]
networking-odl/networking_odl/ml2/port_binding.py [new file with mode: 0644]
networking-odl/networking_odl/ml2/pseudo_agentdb_binding.py [new file with mode: 0644]
networking-odl/networking_odl/tests/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/tests/base.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/common/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/common/test_cache.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/common/test_callback.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/common/test_lightweight_testing.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/common/test_utils.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/db/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/db/test_db.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/fwaas/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/fwaas/test_fwaas_odl.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/journal/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/journal/test_dependency_validations.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/journal/test_full_sync.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/journal/test_maintenance.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/l2gateway/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/l2gateway/test_driver.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/l3/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/l3/test_l3_odl.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/l3/test_l3_odl_v2.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/lbaas/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/lbaas/test_lbaas_odl_v1.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/lbaas/test_lbaas_odl_v2.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/ml2/__init__.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/ml2/config-ovs-external_ids.sh [new file with mode: 0755]
networking-odl/networking_odl/tests/unit/ml2/odl_teststub.js [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/ml2/ovs_topology.json [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/ml2/test_driver.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/ml2/test_legacy_port_binding.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/ml2/test_mechanism_odl.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/ml2/test_mechanism_odl_v2.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/ml2/test_networking_topology.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/ml2/test_ovsdb_topology.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/ml2/test_port_binding.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/ml2/test_pseudo_agentdb_binding.py [new file with mode: 0644]
networking-odl/networking_odl/tests/unit/ml2/vhostuser_topology.json [new file with mode: 0644]
networking-odl/rally-jobs/README.rst [new file with mode: 0644]
networking-odl/rally-jobs/extra/README.rst [new file with mode: 0644]
networking-odl/rally-jobs/odl.yaml [new file with mode: 0644]
networking-odl/rally-jobs/plugins/README.rst [new file with mode: 0644]
networking-odl/rally-jobs/plugins/__init__.py [new file with mode: 0644]
networking-odl/releasenotes/notes/.placeholder [new file with mode: 0644]
networking-odl/releasenotes/source/_static/.placeholder [new file with mode: 0644]
networking-odl/releasenotes/source/_templates/.placeholder [new file with mode: 0644]
networking-odl/releasenotes/source/conf.py [new file with mode: 0644]
networking-odl/releasenotes/source/index.rst [new file with mode: 0644]
networking-odl/releasenotes/source/unreleased.rst [new file with mode: 0644]
networking-odl/requirements.txt [new file with mode: 0644]
networking-odl/setup.cfg [new file with mode: 0644]
networking-odl/setup.py [new file with mode: 0644]
networking-odl/test-requirements.txt [new file with mode: 0644]
networking-odl/tools/check_bash.sh [new file with mode: 0644]
networking-odl/tools/check_i18n.py [new file with mode: 0644]
networking-odl/tools/check_i18n_test_case.txt [new file with mode: 0644]
networking-odl/tools/clean.sh [new file with mode: 0755]
networking-odl/tools/i18n_cfg.py [new file with mode: 0644]
networking-odl/tools/install_venv.py [new file with mode: 0644]
networking-odl/tools/pretty_tox.sh [new file with mode: 0755]
networking-odl/tools/subunit-trace.py [new file with mode: 0755]
networking-odl/tools/with_venv.sh [new file with mode: 0755]
networking-odl/tox.ini [new file with mode: 0644]

diff --git a/.gitreview b/.gitreview
new file mode 100644 (file)
index 0000000..dbfd245
--- /dev/null
@@ -0,0 +1,4 @@
+[gerrit]
+host=gerrit.opnfv.org
+port=29418
+project=fds.git
diff --git a/networking-odl/.coveragerc b/networking-odl/.coveragerc
new file mode 100644 (file)
index 0000000..d723574
--- /dev/null
@@ -0,0 +1,7 @@
+[run]
+branch = True
+source = networking_odl
+omit = networking_odl/tests/*
+
+[report]
+ignore_errors = True
diff --git a/networking-odl/.gitignore b/networking-odl/.gitignore
new file mode 100644 (file)
index 0000000..7287aff
--- /dev/null
@@ -0,0 +1,31 @@
+AUTHORS
+build/*
+build-stamp
+ChangeLog
+cover/
+covhtml/
+dist/
+doc/build
+*.DS_Store
+*.pyc
+etc/neutron/plugins/ml2/ml2_conf_odl.ini.sample
+networking_odl.egg-info/
+networking_odl/vcsversion.py
+networking_odl/versioninfo
+pbr*.egg/
+run_tests.err.log
+run_tests.log
+# Files create dy releasenotes build
+releasenotes/build
+setuptools*.egg/
+subunit.log
+*.mo
+*.sw?
+*~
+/.*
+!/.coveragerc
+!/.gitignore
+!/.gitreview
+!/.mailmap
+!/.pylintrc
+!/.testr.conf
diff --git a/networking-odl/.gitreview b/networking-odl/.gitreview
new file mode 100644 (file)
index 0000000..ad57c24
--- /dev/null
@@ -0,0 +1,4 @@
+[gerrit]
+host=review.openstack.org
+port=29418
+project=openstack/networking-odl.git
diff --git a/networking-odl/.mailmap b/networking-odl/.mailmap
new file mode 100644 (file)
index 0000000..f3e7e5e
--- /dev/null
@@ -0,0 +1,11 @@
+# Format is:
+# <preferred e-mail> <other e-mail 1>
+# <preferred e-mail> <other e-mail 2>
+lawrancejing <lawrancejing@gmail.com> <liuqing@windawn.com>
+Jiajun Liu <jiajun@unitedstack.com> <iamljj@gmail.com>
+Zhongyue Luo <zhongyue.nah@intel.com> <lzyeval@gmail.com>
+Kun Huang <gareth@unitedstack.com> <academicgareth@gmail.com>
+Zhenguo Niu <zhenguo@unitedstack.com> <Niu.ZGlinux@gmail.com>
+Isaku Yamahata <isaku.yamahata@intel.com> <isaku.yamahata@gmail.com>
+Isaku Yamahata <isaku.yamahata@intel.com> <yamahata@private.email.ne.jp>
+Morgan Fainberg <morgan.fainberg@gmail.com> <m@metacloud.com>
diff --git a/networking-odl/.pylintrc b/networking-odl/.pylintrc
new file mode 100644 (file)
index 0000000..d343d72
--- /dev/null
@@ -0,0 +1,130 @@
+# The format of this file isn't really documented; just use --generate-rcfile
+[MASTER]
+# Add <file or directory> to the black list. It should be a base name, not a
+# path. You may set this option multiple times.
+#
+# Note the 'openstack' below is intended to match only
+# neutron.openstack.common.  If we ever have another 'openstack'
+# dirname, then we'll need to expand the ignore features in pylint :/
+ignore=.git,tests,openstack
+
+[MESSAGES CONTROL]
+# NOTE(gus): This is a long list.  A number of these are important and
+# should be re-enabled once the offending code is fixed (or marked
+# with a local disable)
+disable=
+# "F" Fatal errors that prevent further processing
+ import-error,
+# "I" Informational noise
+ locally-disabled,
+# "E" Error for important programming issues (likely bugs)
+ access-member-before-definition,
+ bad-super-call,
+ maybe-no-member,
+ no-member,
+ no-method-argument,
+ no-self-argument,
+ not-callable,
+ no-value-for-parameter,
+ super-on-old-class,
+ too-few-format-args,
+# "W" Warnings for stylistic problems or minor programming issues
+ abstract-method,
+ anomalous-backslash-in-string,
+ anomalous-unicode-escape-in-string,
+ arguments-differ,
+ attribute-defined-outside-init,
+ bad-builtin,
+ bad-indentation,
+ broad-except,
+ dangerous-default-value,
+ deprecated-lambda,
+ duplicate-key,
+ expression-not-assigned,
+ fixme,
+ global-statement,
+ global-variable-not-assigned,
+ logging-not-lazy,
+ no-init,
+ non-parent-init-called,
+ protected-access,
+ redefined-builtin,
+ redefined-outer-name,
+ redefine-in-handler,
+ signature-differs,
+ star-args,
+ super-init-not-called,
+ unnecessary-lambda,
+ unnecessary-pass,
+ unpacking-non-sequence,
+ unreachable,
+ unused-argument,
+ unused-import,
+ unused-variable,
+# "C" Coding convention violations
+ bad-continuation,
+ invalid-name,
+ missing-docstring,
+ old-style-class,
+ superfluous-parens,
+# "R" Refactor recommendations
+ abstract-class-little-used,
+ abstract-class-not-used,
+ duplicate-code,
+ interface-not-implemented,
+ no-self-use,
+ too-few-public-methods,
+ too-many-ancestors,
+ too-many-arguments,
+ too-many-branches,
+ too-many-instance-attributes,
+ too-many-lines,
+ too-many-locals,
+ too-many-public-methods,
+ too-many-return-statements,
+ too-many-statements
+
+[BASIC]
+# Variable names can be 1 to 31 characters long, with lowercase and underscores
+variable-rgx=[a-z_][a-z0-9_]{0,30}$
+
+# Argument names can be 2 to 31 characters long, with lowercase and underscores
+argument-rgx=[a-z_][a-z0-9_]{1,30}$
+
+# Method names should be at least 3 characters long
+# and be lowecased with underscores
+method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$
+
+# Module names matching neutron-* are ok (files in bin/)
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$
+
+# Don't require docstrings on tests.
+no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$
+
+[FORMAT]
+# Maximum number of characters on a single line.
+max-line-length=79
+
+[VARIABLES]
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+# _ is used by our localization
+additional-builtins=_
+
+[CLASSES]
+# List of interface methods to ignore, separated by a comma.
+ignore-iface-methods=
+
+[IMPORTS]
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=
+# should use openstack.common.jsonutils
+ json
+
+[TYPECHECK]
+# List of module names for which member attributes should not be checked
+ignored-modules=six.moves,_MovedItems
+
+[REPORTS]
+# Tells whether to display a full report or only the messages
+reports=no
diff --git a/networking-odl/.testr.conf b/networking-odl/.testr.conf
new file mode 100644 (file)
index 0000000..4b24f61
--- /dev/null
@@ -0,0 +1,8 @@
+[DEFAULT]
+test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
+             OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
+             OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
+             OS_LOG_CAPTURE=1 \
+             ${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION
+test_id_option=--load-list $IDFILE
+test_list_option=--list
diff --git a/networking-odl/CONTRIBUTING.rst b/networking-odl/CONTRIBUTING.rst
new file mode 100644 (file)
index 0000000..8620367
--- /dev/null
@@ -0,0 +1,16 @@
+If you would like to contribute to the development of OpenStack,
+you must follow the steps documented at:
+
+   http://docs.openstack.org/infra/manual/developers.html
+
+Once those steps have been completed, changes to OpenStack
+should be submitted for review via the Gerrit tool, following
+the workflow documented at:
+
+   http://docs.openstack.org/infra/manual/developers.html#development-workflow
+
+Pull requests submitted through GitHub will be ignored.
+
+Bugs should be filed on Launchpad, not GitHub:
+
+   https://bugs.launchpad.net/networking-odl
diff --git a/networking-odl/HACKING.rst b/networking-odl/HACKING.rst
new file mode 100644 (file)
index 0000000..e08eb54
--- /dev/null
@@ -0,0 +1,33 @@
+Neutron Style Commandments
+=======================
+
+- Step 1: Read the OpenStack Style Commandments
+  http://docs.openstack.org/developer/hacking/
+- Step 2: Read on
+
+Neutron Specific Commandments
+--------------------------
+
+- [N319] Validate that debug level logs are not translated
+- [N320] Validate that LOG messages, except debug ones, have translations
+- [N321] Validate that jsonutils module is used instead of json
+- [N322] We do not use @authors tags in source files. We have git to track
+  authorship.
+- [N323] Detect common errors with assert_called_once_with
+
+Creating Unit Tests
+-------------------
+For every new feature, unit tests should be created that both test and
+(implicitly) document the usage of said feature. If submitting a patch for a
+bug that had no unit test, a new passing unit test should be added. If a
+submitted bug fix does have a unit test, be sure to add a new one that fails
+without the patch and passes with the patch.
+
+All unittest classes must ultimately inherit from testtools.TestCase. In the
+Neutron test suite, this should be done by inheriting from
+neutron.tests.base.BaseTestCase.
+
+All setUp and tearDown methods must upcall using the super() method.
+tearDown methods should be avoided and addCleanup calls should be preferred.
+Never manually create tempfiles. Always use the tempfile fixtures from
+the fixture library to ensure that they are cleaned up.
diff --git a/networking-odl/LICENSE b/networking-odl/LICENSE
new file mode 100644 (file)
index 0000000..68c771a
--- /dev/null
@@ -0,0 +1,176 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
diff --git a/networking-odl/MANIFEST.in b/networking-odl/MANIFEST.in
new file mode 100644 (file)
index 0000000..4e527c7
--- /dev/null
@@ -0,0 +1,14 @@
+include AUTHORS
+include README.rst
+include ChangeLog
+include LICENSE
+include neutron/db/migration/README
+include neutron/db/migration/alembic.ini
+include neutron/db/migration/alembic_migrations/script.py.mako
+include neutron/db/migration/alembic_migrations/versions/README
+recursive-include neutron/locale *
+
+exclude .gitignore
+exclude .gitreview
+
+global-exclude *.pyc
diff --git a/networking-odl/README.rst b/networking-odl/README.rst
new file mode 100644 (file)
index 0000000..9554ee2
--- /dev/null
@@ -0,0 +1,21 @@
+Welcome!
+========
+
+This is the external, decomposed plugin library for the OpenDaylight ML2
+MechanismDriver. This is the backend code which handles communication with
+OpenDaylight.
+
+External Resources:
+===================
+
+The OpenDaylight homepage:
+   http://www.opendaylight.org/
+
+The Launchpad page for openstack/networking-odl:
+   https://launchpad.net/networking-odl
+
+File bugs for this project here:
+   https://bugs.launchpad.net/networking-odl
+
+The Neutron homepage:
+   http://launchpad.net/neutron.
diff --git a/networking-odl/TESTING.rst b/networking-odl/TESTING.rst
new file mode 100644 (file)
index 0000000..d181286
--- /dev/null
@@ -0,0 +1,173 @@
+Testing Networking-odl + neutron
+================================
+
+Overview
+--------
+
+The unit tests (networking_odl/tests/unit/) are meant to cover as much code as
+possible and should be executed without the service running. They are
+designed to test the various pieces of the neutron tree to make sure
+any new changes don't break existing functionality.
+
+# TODO (Manjeet): Update functional testing doc.
+
+Development process
+-------------------
+
+It is expected that any new changes that are proposed for merge
+come with tests for that feature or code area. Ideally any bugs
+fixes that are submitted also have tests to prove that they stay
+fixed!  In addition, before proposing for merge, all of the
+current tests should be passing.
+
+Virtual environments
+~~~~~~~~~~~~~~~~~~~~
+
+Testing OpenStack projects, including Neutron, is made easier with `DevStack <https://git.openstack.org/cgit/openstack-dev/devstack>`_.
+
+Create a machine (such as a VM or Vagrant box) running a distribution supported
+by DevStack and install DevStack there. For example, there is a Vagrant script
+for DevStack at https://github.com/bcwaldon/vagrant_devstack.
+
+ .. note::
+
+    If you prefer not to use DevStack, you can still check out source code on your local
+    machine and develop from there.
+
+
+Running unit tests
+------------------
+
+There are two mechanisms for running tests: tox, and nose. Before submitting
+a patch for review you should always ensure all test pass; a tox run is
+triggered by the jenkins gate executed on gerrit for each patch pushed for
+review.
+
+With these mechanisms you can either run the tests in the standard
+environment or create a virtual environment to run them in.
+
+By default after running all of the tests, any pep8 errors
+found in the tree will be reported.
+
+
+With `nose`
+~~~~~~~~~~~
+
+You can use `nose`_ to run individual tests, as well as use for debugging
+portions of your code::
+
+    source .venv/bin/activate
+    pip install nose
+    nosetests
+
+There are disadvantages to running Nose - the tests are run sequentially, so
+race condition bugs will not be triggered, and the full test suite will
+take significantly longer than tox & testr. The upside is that testr has
+some rough edges when it comes to diagnosing errors and failures, and there is
+no easy way to set a breakpoint in the Neutron code, and enter an
+interactive debugging session while using testr.
+
+.. _nose: https://nose.readthedocs.org/en/latest/index.html
+
+With `tox`
+~~~~~~~~~~
+
+Networking-odl, like other OpenStack projects, uses `tox`_ for managing the virtual
+environments for running test cases. It uses `Testr`_ for managing the running
+of the test cases.
+
+Tox handles the creation of a series of `virtualenvs`_ that target specific
+versions of Python (2.6, 2.7, 3.3, etc).
+
+Testr handles the parallel execution of series of test cases as well as
+the tracking of long-running tests and other things.
+
+Running unit tests is as easy as executing this in the root directory of the
+Neutron source code::
+
+    tox
+
+Running tests for syntax and style check for written code::
+
+    tox -e pep8
+
+For more information on the standard Tox-based test infrastructure used by
+OpenStack and how to do some common test/debugging procedures with Testr,
+see this wiki page:
+
+  https://wiki.openstack.org/wiki/Testr
+
+.. _Testr: https://wiki.openstack.org/wiki/Testr
+.. _tox: http://tox.readthedocs.org/en/latest/
+.. _virtualenvs: https://pypi.python.org/pypi/virtualenv
+
+Tests written can also be debugged by adding pdb break points. Normally if you add
+a break point and just run the tests with normal flags they will end up in failing.
+There is debug flag you can use to run after adding pdb break points in the tests.
+
+Set break points in your test code and run::
+
+    tox -e debug networking_odl.tests.unit.db.test_db.DbTestCase.test_validate_updates_same_object_uuid
+
+The package oslotest was used to enable debugging in the tests. For more
+information see the link:
+
+  http://docs.openstack.org/developer/oslotest/features.html
+
+
+Running individual tests
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+For running individual test modules or cases, you just need to pass
+the dot-separated path to the module you want as an argument to it.
+
+For executing a specific test case, specify the name of the test case
+class separating it from the module path with a colon.
+
+For example, the following would run only the Testodll3 tests from
+networking_odl/tests/unit/l3/test_odl_l3.py ::
+
+      $ tox -e py27 networking_odl.tests.unit.l3.test_l3_odl.Testodll3
+
+Adding more tests
+~~~~~~~~~~~~~~~~~
+
+There might not be full coverage yet. New patches for adding tests
+which are not there are always welcome.
+
+To get a grasp of the areas where tests are needed, you can check
+current coverage by running::
+
+    $ tox -e cover
+
+Debugging
+---------
+
+It's possible to debug tests in a tox environment::
+
+    $ tox -e venv -- python -m testtools.run [test module path]
+
+Tox-created virtual environments (venv's) can also be activated
+after a tox run and reused for debugging::
+
+    $ tox -e venv
+    $ . .tox/venv/bin/activate
+    $ python -m testtools.run [test module path]
+
+Tox packages and installs the neutron source tree in a given venv
+on every invocation, but if modifications need to be made between
+invocation (e.g. adding more pdb statements), it is recommended
+that the source tree be installed in the venv in editable mode::
+
+    # run this only after activating the venv
+    $ pip install --editable .
+
+Editable mode ensures that changes made to the source tree are
+automatically reflected in the venv, and that such changes are not
+overwritten during the next tox run.
+
+References
+==========
+
+.. [#pudb] PUDB debugger:
+   https://pypi.python.org/pypi/pudb
diff --git a/networking-odl/babel.cfg b/networking-odl/babel.cfg
new file mode 100644 (file)
index 0000000..15cd6cb
--- /dev/null
@@ -0,0 +1,2 @@
+[python: **.py]
+
diff --git a/networking-odl/devstack/README.rst b/networking-odl/devstack/README.rst
new file mode 100644 (file)
index 0000000..2b4bd1c
--- /dev/null
@@ -0,0 +1,80 @@
+======================
+ Enabling in Devstack
+======================
+
+1. Download DevStack
+
+2. Copy the sample local.conf over::
+
+     cp devstack/local.conf.example local.conf
+
+3. Optionally, to manually configure this:
+
+   Add this repo as an external repository::
+
+     > cat local.conf
+     [[local|localrc]]
+     enable_plugin networking-odl http://git.openstack.org/openstack/networking-odl
+
+4. Optionally, to enable support for OpenDaylight L3 router functionality,
+   add the below.
+   Note: This is only relevant when using old netvirt (ovsdb based, default)::
+
+     > cat local.conf
+     [[local|localrc]]
+     ODL_L3=True
+
+5. If you need to route the traffic out of the box (e.g. br-ex), set
+   ODL_PROVIDER_MAPPINGS to map the physical provider network to device
+   mapping, as shown below::
+
+     > cat local.conf
+     [[local|localrc]]
+     ODL_L3=True
+     ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS:-br-ex:eth2}    # for old netvirt (ovsdb based)
+     ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS:-physnet1:eth2} # for new netvirt (vpnservice based)
+
+6. Optionally, to enable support for OpenDaylight with LBaaS V2, add this::
+
+     > cat local.conf
+     [[local|localrc]]
+     enable_plugin neutron-lbaas http://git.openstack.org/openstack/neutron-lbaas
+     enable_service q-lbaasv2
+     NEUTRON_LBAAS_SERVICE_PROVIDERV2="LOADBALANCERV2:opendaylight:networking_odl.lbaas.driver_v2.OpenDaylightLbaasDriverV2:default"
+
+7. run ``stack.sh``
+
+8. Note: In a multi-node devstack environment, for each compute node you will
+   want to add this to the local.conf file::
+
+     > cat local.conf
+     [[local|localrc]]
+     enable_plugin networking-odl http://git.openstack.org/openstack/networking-odl
+     ODL_MODE=compute
+
+9. Note: In a node using a release of Open vSwitch provided from another source
+   than your Linux distribution you have to enable in your local.conf skipping
+   of OVS installation step by setting *SKIP_OVS_INSTALL=True*. For example
+   when stacking together with `networking-ovs-dpdk
+   <https://github.com/openstack/networking-ovs-dpdk/>`_ Neutron plug-in to
+   avoid conflicts between openvswitch and ovs-dpdk you have to add this to
+   the local.conf file::
+
+     > cat local.conf
+     [[local|localrc]]
+     enable_plugin networking-ovs-dpdk http://git.openstack.org/openstack/networking-ovs-dpdk
+     enable_plugin networking-odl http://git.openstack.org/openstack/networking-odl
+     SKIP_OVS_INSTALL=True
+     Q_ML2_PLUGIN_MECHANISM_DRIVERS=opendaylight
+
+10. Note: Optionally, to use the new netvirt implementation
+    (netvirt-vpnservice-openstack), add the following to the local.conf file
+    (only allinone topology is currently supported by devstack, since tunnel
+    endpoints are not automatically configured). For tunnel configurations
+    after loading devstack, please refer to this guide
+    https://wiki.opendaylight.org/view/Netvirt:_L2Gateway_HowTo#Configuring_Tunnels::
+
+      > cat local.conf
+      [[local|localrc]]
+      ODL_NETVIRT_KARAF_FEATURE=odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs,odl-netvirt-vpnservice-openstack
+      ODL_BOOT_WAIT_URL=restconf/operational/network-topology:network-topology/ # Workaround since netvirt:1 no longer exists in DS!
diff --git a/networking-odl/devstack/devstackgaterc b/networking-odl/devstack/devstackgaterc
new file mode 100644 (file)
index 0000000..474cdc0
--- /dev/null
@@ -0,0 +1,62 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+#
+# This script is executed in the OpenStack CI job that runs DevStack + tempest.
+# You can find the CI job configuration here:
+#
+# http://git.openstack.org/cgit/openstack-infra/project-config/tree/jenkins/jobs/networking-odl.yaml
+#
+
+export OVERRIDE_ENABLED_SERVICES=q-svc,q-dhcp,q-meta,quantum,tempest,n-api,n-cond,n-cpu,n-crt,n-obj,n-sch,g-api,g-reg,mysql,rabbit,key,dstat
+
+# By default this variable is set to br100, and it is used to determine the value of PUBLIC_INTERFACE.
+# PUBLIC_INTERFACE is wired to PUBLIC_BRIDGE to route traffic externally. Since, we don't currently
+# need to route external traffic in the gate (no multi-node testing), this can be unset.
+unset PUBLIC_INTERFACE
+
+# Begin list of exclusions.
+r="^(?!.*"
+
+# exclude the slow tag (part of the default for 'full')
+r="$r(?:.*\[.*\bslow\b.*\])"
+
+# exclude things that just aren't enabled:
+r="$r|(?:tempest\.api\.network\.admin\.test_quotas\.QuotasTest\.test_lbaas_quotas.*)"
+r="$r|(?:tempest\.api\.network\.test_load_balancer.*)"
+r="$r|(?:tempest\.scenario\.test_load_balancer.*)"
+r="$r|(?:tempest\.scenario\.test_network_basic_ops.*)"
+r="$r|(?:tempest\.scenario\.test_security_groups_basic_ops.*)"
+r="$r|(?:tempest\.api\.network\.admin\.test_load_balancer.*)"
+r="$r|(?:tempest\.api\.network\.admin\.test_lbaas.*)"
+r="$r|(?:tempest\.api\.network\.test_fwaas_extensions.*)"
+r="$r|(?:tempest\.api\.network\.test_vpnaas_extensions.*)"
+r="$r|(?:tempest\.api\.network\.test_metering_extensions.*)"
+r="$r|(?:tempest\.thirdparty\.boto\.test_s3.*)"
+
+# exclude stuff we're less likely to break because i'm impatient
+r="$r|(?:tempest\.api\.identity.*)"
+r="$r|(?:tempest\.api\.image.*)"
+r="$r|(?:tempest\.api\.volume.*)"
+
+# Current list of failing tests that need to be triaged, have bugs filed, and
+# fixed as appropriate.
+# (none)
+
+# End list of exclusions.
+r="$r)"
+
+# only run tempest.api/scenario/thirdparty tests (part of the default for 'full')
+r="$r(tempest\.(api|scenario|thirdparty)).*$"
+
+export DEVSTACK_GATE_TEMPEST_REGEX="$r"
+enable_service "$OVERRIDE_ENABLED_SERVICES"
diff --git a/networking-odl/devstack/entry_points b/networking-odl/devstack/entry_points
new file mode 100644 (file)
index 0000000..647960f
--- /dev/null
@@ -0,0 +1,307 @@
+#!/bin/bash
+
+
+# cleanup_opendaylight() - Remove residual data files, anything left over
+# from previous runs that a clean run would need to clean up
+function cleanup_opendaylight {
+    # Wipe out the data, journal and snapshots directories ... grumble grumble grumble
+    rm -rf $ODL_DIR/$ODL_NAME/{data,journal,snapshots}
+
+    # Remove existing logfiles
+    if [[ -n "$LOGDIR" ]]; then
+        rm -f "$LOGDIR/$ODL_KARAF_LOG_BASE*"
+    fi
+    if [[ -n "$SCREEN_LOGDIR" ]]; then
+        rm -f "$SCREEN_LOGDIR/$ODL_KARAF_LOG_BASE*"
+    fi
+    rm -f "$DEST/logs/$ODL_KARAF_LOG_BASE*"
+
+    move_interface_addresses "outof_bridge"
+
+    unbind_opendaylight_controller
+}
+
+
+# configure_opendaylight() - Set config files, create data dirs, etc
+function configure_opendaylight {
+    echo "Configuring OpenDaylight"
+
+    # The logging config file in ODL
+    local ODL_LOGGING_CONFIG=${ODL_DIR}/${ODL_NAME}/etc/org.ops4j.pax.logging.cfg
+
+    # Add netvirt feature in Karaf, if it's not already there
+    local ODLFEATUREMATCH=$(cat $ODL_DIR/$ODL_NAME/etc/org.apache.karaf.features.cfg | \
+                            grep featuresBoot= | grep $ODL_NETVIRT_KARAF_FEATURE)
+    if [ "$ODLFEATUREMATCH" == "" ]; then
+        sed -i "/^featuresBoot=/ s/$/,$ODL_NETVIRT_KARAF_FEATURE/" \
+               $ODL_DIR/$ODL_NAME/etc/org.apache.karaf.features.cfg
+    fi
+
+    if [[ "$ODL_RELEASE" =~ "helium" ]]; then
+        # Move Tomcat to $ODL_PORT
+        local _ODLPORT=$(cat $ODL_DIR/$ODL_NAME/configuration/tomcat-server.xml | \
+                         grep $ODL_PORT)
+        if [ "$_ODLPORT" == "" ]; then
+            sed -i "/\<Connector port/ s/808./$ODL_PORT/" \
+                   $ODL_DIR/$ODL_NAME/configuration/tomcat-server.xml
+        fi
+    else
+        # Move Jetty to $ODL_PORT
+        local _ODLPORT=$(cat $ODL_DIR/$ODL_NAME/etc/jetty.xml | grep $ODL_PORT)
+        if [ "$_ODLPORT" == "" ]; then
+            sed -i "/\<Property name\=\"jetty\.port/ s/808./$ODL_PORT/" \
+                   $ODL_DIR/$ODL_NAME/etc/jetty.xml
+        fi
+    fi
+
+    # Configure L3 if the user wants it for NETVIRT_OVSDB
+    # L3 is always enabled in NETVIRT_VPNSERVICE
+    if [[ ",$ODL_NETVIRT_KARAF_FEATURE," =~ ",$ODL_NETVIRT_KARAF_FEATURE_OVSDB," ]] && [ "${ODL_L3}" == "True" ]; then
+        # Configure L3 FWD if it's not there
+        local L3FWD=$(cat $ODL_DIR/$ODL_NAME/etc/custom.properties | \
+                      grep ^ovsdb.l3.fwd.enabled)
+        if [ "$L3FWD" == "" ]; then
+            echo "ovsdb.l3.fwd.enabled=yes" >> $ODL_DIR/$ODL_NAME/etc/custom.properties
+        fi
+
+        # Configure L3 GW MAC if it's not there
+        local L3GW_MAC=$(cat $ODL_DIR/$ODL_NAME/etc/custom.properties | \
+                         grep ^ovsdb.l3gateway.mac)
+        if [[ -z "$L3GW_MAC" && -n "$ODL_L3GW_MAC" ]]; then
+            echo "ovsdb.l3gateway.mac=$ODL_L3GW_MAC" >> $ODL_DIR/$ODL_NAME/etc/custom.properties
+        fi
+    fi
+
+    # Remove existing logfiles
+    local ODL_LOGDIR=$DEST/logs
+    if [[ -n "$LOGDIR" ]]; then
+        ODL_LOGDIR=$LOGDIR
+    fi
+
+    rm -f "$ODL_LOGDIR/$ODL_KARAF_LOG_BASE*"
+    # Log karaf output to a file
+    _LF=$ODL_LOGDIR/$ODL_KARAF_LOG_NAME
+    LF=$(echo $_LF | sed 's/\//\\\//g')
+    # Soft link for easy consumption
+    sudo mkdir -p "$ODL_LOGDIR"
+    ln -sf $_LF "$ODL_LOGDIR/screen-karaf.log"
+    if [[ -n $SCREEN_LOGDIR ]]; then
+        ln -sf $_LF "$SCREEN_LOGDIR/screen-karaf.log"
+    fi
+
+    # Change the karaf logfile
+    # disable log rotation by setting max fiel size large enough
+    sed -i -e "/^log4j\.appender\.out\.file/ s/.*/log4j\.appender\.out\.file\=$LF/" \
+           -e "/^log4j\.appender\.out\.maxFileSize/ s/.*/log4j\.appender\.out\.maxFileSize\=1024GB/" \
+    $ODL_DIR/$ODL_NAME/etc/org.ops4j.pax.logging.cfg
+
+    # Configure DEBUG logs for network virtualization in odl, if the user wants it
+    if [ "${ODL_NETVIRT_DEBUG_LOGS}" == "True" ]; then
+        local OVSDB_DEBUG_LOGS=$(cat $ODL_LOGGING_CONFIG | grep ^log4j.logger.org.opendaylight.ovsdb)
+        if [ "${OVSDB_DEBUG_LOGS}" == "" ]; then
+            echo 'log4j.logger.org.opendaylight.ovsdb = TRACE, out' >> $ODL_LOGGING_CONFIG
+            echo 'log4j.logger.org.opendaylight.ovsdb.lib = INFO, out' >> $ODL_LOGGING_CONFIG
+            echo 'log4j.logger.org.opendaylight.ovsdb.openstack.netvirt.impl.NeutronL3Adapter = DEBUG, out' >> $ODL_LOGGING_CONFIG
+            echo 'log4j.logger.org.opendaylight.ovsdb.openstack.netvirt.impl.TenantNetworkManagerImpl = DEBUG, out' >> $ODL_LOGGING_CONFIG
+            echo 'log4j.logger.org.opendaylight.ovsdb.openstack.netvirt.providers.openflow13.services.arp.GatewayMacResolverService = DEBUG, out' >> $ODL_LOGGING_CONFIG
+            echo 'log4j.logger.org.opendaylight.ovsdb.plugin.md.OvsdbInventoryManager = INFO, out' >> $ODL_LOGGING_CONFIG
+        fi
+        if [[ "$ODL_RELEASE" =~ "helium" ]]; then
+            local ODL_NEUTRON_DEBUG_LOGS=$(cat $ODL_LOGGING_CONFIG | \
+                        grep ^log4j.logger.org.opendaylight.controller.networkconfig.neutron)
+            if [ "${ODL_NEUTRON_DEBUG_LOGS}" == "" ]; then
+                echo 'log4j.logger.org.opendaylight.controller.networkconfig.neutron = TRACE, out' >> $ODL_LOGGING_CONFIG
+            fi
+        else
+            local ODL_NEUTRON_DEBUG_LOGS=$(cat $ODL_LOGGING_CONFIG | \
+                        grep ^log4j.logger.org.opendaylight.neutron)
+            if [ "${ODL_NEUTRON_DEBUG_LOGS}" == "" ]; then
+                echo 'log4j.logger.org.opendaylight.neutron = TRACE, out' >> $ODL_LOGGING_CONFIG
+            fi
+        fi
+    fi
+}
+
+# configure_neutron_opendaylight() - Set Neutron config files according to ODL settings
+function configure_neutron_odl {
+    echo "Configuring ML2 for OpenDaylight"
+    populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl url=$ODL_ENDPOINT
+    populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl username=$ODL_USERNAME
+    populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl password=$ODL_PASSWORD
+    populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl port_binding_controller=$ODL_PORT_BINDING_CONTROLLER
+    # When it's not set, the default value is set by networking-odl
+    if [[ -n "$ODL_HOSTCONF_URI" ]]; then
+        populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl odl_hostconf_uri=$ODL_HOSTCONF_URI
+    fi
+}
+
+function configure_neutron_odl_lightweight_testing {
+    echo "Configuring lightweight testing for OpenDaylight"
+    populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_odl enable_lightweight_testing=True
+}
+
+# init_opendaylight() - Initialize databases, etc.
+function init_opendaylight {
+    # clean up from previous (possibly aborted) runs
+    # create required data files
+    :
+}
+
+
+# install_opendaylight() - Collect source and prepare
+function install_opendaylight {
+    echo "Installing OpenDaylight and dependent packages"
+    if [[ "$ODL_USING_EXISTING_JAVA" != "True" ]]
+    then
+        if ! setup_java "${ODL_REQUIRED_JAVA_VERSION:-7}"; then
+            exit 1
+        fi
+    fi
+
+    install_opendaylight_neutron_thin_ml2_driver
+
+    # Download OpenDaylight
+    cd $ODL_DIR
+
+    if [[ "$OFFLINE" != "True" ]]; then
+        wget -N $ODL_URL/$ODL_PKG
+    fi
+    unzip -u -o $ODL_PKG
+}
+
+
+# install_opendaylight_neutron_thin_ml2_driver() - Install the ML2 driver
+function install_opendaylight_neutron_thin_ml2_driver {
+    echo "Installing the Networking-ODL driver for OpenDaylight"
+    setup_develop $NETWORKING_ODL_DIR
+}
+
+
+# install_opendaylight_compute() - Make sure OVS is installed
+function install_opendaylight_compute {
+    if [[ "$SKIP_OVS_INSTALL" = "True" ]]; then
+        echo "Skipping OVS installation."
+    else
+        # packages are the same as for Neutron OVS agent
+        _neutron_ovs_base_install_agent_packages
+    fi
+}
+
+
+# start_opendaylight() - Start running processes, including screen
+function start_opendaylight {
+    echo "Starting OpenDaylight"
+
+    # Wipe out the data and journal directories ... grumble grumble grumble
+    rm -rf $ODL_DIR/$ODL_NAME/{data,journal}
+
+    # The following variables are needed by the running karaf process.
+    # See the "bin/setenv" file in the OpenDaylight distribution for
+    # their individual meaning.
+    setup_java_env
+    export JAVA_MIN_MEM=$ODL_JAVA_MIN_MEM
+    export JAVA_MAX_MEM=$ODL_JAVA_MAX_MEM
+    export JAVA_MAX_PERM_MEM=$ODL_JAVA_MAX_PERM_MEM
+    run_process odl-server "$ODL_DIR/$ODL_NAME/bin/start"
+
+    if [ -n "$ODL_BOOT_WAIT_URL" ]; then
+        echo "Waiting for Opendaylight to start via $ODL_BOOT_WAIT_URL ..."
+        # Probe ODL restconf for netvirt until it is operational
+        local testcmd="curl -o /dev/null --fail --silent --head -u \
+              ${ODL_USERNAME}:${ODL_PASSWORD} http://${ODL_MGR_IP}:${ODL_PORT}/${ODL_BOOT_WAIT_URL}"
+        test_with_retry "$testcmd" "Opendaylight did not start after $ODL_BOOT_WAIT" \
+              $ODL_BOOT_WAIT $ODL_RETRY_SLEEP_INTERVAL
+    else
+        echo "Waiting for Opendaylight to start ..."
+        # Sleep a bit to let OpenDaylight finish starting up
+        sleep $ODL_BOOT_WAIT
+    fi
+}
+
+
+# stop_opendaylight() - Stop running processes (non-screen)
+function stop_opendaylight {
+    # Stop the karaf container
+    $ODL_DIR/$ODL_NAME/bin/stop
+    stop_process odl-server
+}
+
+
+# cleanup_opendaylight_compute() - Remove all OVS ports, bridges and disconnects
+# controller from switch
+function cleanup_opendaylight_compute {
+    # Remove the patch ports
+    for port in $(sudo ovs-vsctl show | grep Port | awk '{print $2}'  | cut -d '"' -f 2 | grep patch); do
+        sudo ovs-vsctl del-port ${port}
+    done
+
+    # remove all OVS ports that look like Neutron created ports
+    for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do
+        sudo ovs-vsctl del-port ${port}
+    done
+
+    # Remove all the vxlan ports
+    for port in $(sudo ovs-vsctl list port | grep name | grep vxlan | awk '{print $3}'  | cut -d '"' -f 2); do
+        sudo ovs-vsctl del-port ${port}
+    done
+
+    # Disconnect controller from switch
+    unbind_opendaylight_controller
+
+    # remove all OVS bridges created by ODL
+    for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BR} -e ${PUBLIC_BRIDGE}); do
+        sudo ovs-vsctl del-br ${bridge}
+    done
+}
+
+# bind_opendaylight_controller() - set control manager to OVS
+function bind_opendaylight_controller {
+    echo_summary "Initializing OpenDaylight"
+    ODL_LOCAL_IP=${ODL_LOCAL_IP:-$HOST_IP}
+    ODL_MGR_PORT=${ODL_MGR_PORT:-6640}
+    ODL_OVS_MANAGERS=${ODL_OVS_MANAGERS:-$ODL_MGR_IP}
+    read ovstbl <<< $(sudo ovs-vsctl get Open_vSwitch . _uuid)
+    local ODL_MANAGERS_PARAM=()
+    for manager in $(echo $ODL_OVS_MANAGERS | tr "," "\n"); do
+        # ovsdbd doesn't understand localhost. replace it to numerical ip address
+        ODL_MANAGERS_PARAM=( "${ODL_MANAGERS_PARAM[@]}" "tcp:${manager/localhost/127.0.0.1}:$ODL_MGR_PORT" )
+    done
+    # don't overwrite the already existing managers
+    local ODL_MANAGERS_OLD=$(sudo ovs-vsctl get-manager)
+    local ODL_MANAGERS=$(echo $ODL_MANAGERS_OLD ${ODL_MANAGERS_PARAM[@]} | tr ' ' '\n' | sort | uniq | tr '\n' ' ')
+    sudo ovs-vsctl set-manager ${ODL_MANAGERS}
+    if [[ -n "$ODL_PROVIDER_MAPPINGS" ]]; then
+        sudo ovs-vsctl set Open_vSwitch $ovstbl \
+            other_config:provider_mappings=$ODL_PROVIDER_MAPPINGS
+    fi
+    sudo ovs-vsctl set Open_vSwitch $ovstbl other_config:local_ip=$ODL_LOCAL_IP
+}
+
+# unbind_opendaylight_controller() - disconnect controller from switch and clear bridges
+function unbind_opendaylight_controller {
+    sudo ovs-vsctl del-manager
+    BRIDGES=$(sudo ovs-vsctl list-br)
+    for bridge in $BRIDGES ; do
+        sudo ovs-vsctl del-controller $bridge
+     done
+}
+
+
+# configure_opendaylight_l3() - configure bridges for OpenDaylight L3 forwarding
+function configure_opendaylight_l3 {
+    wait_for_active_bridge $PUBLIC_BRIDGE $ODL_RETRY_SLEEP_INTERVAL $ODL_BOOT_WAIT
+
+    # Note: as of Lithium-SR3 and Beryllium releases, ODL will add the OVS port(s)
+    #       to the external bridge via the ODL_PROVIDER_MAPPINGS method. Being so,
+    #       the usage of PUBLIC_INTERFACE is no longer necessary in ODL devstack.
+
+    # DEPRECATED: Add public interface to public bridge, if needed
+    if [[ -n "$PUBLIC_INTERFACE" && "$PUBLIC_INTERFACE" != "br100" ]]; then
+        deprecated "Adding $PUBLIC_INTERFACE to $PUBLIC_BRIDGE."
+        deprecated "Use ODL_PROVIDER_MAPPINGS instead of PUBLIC_INTERFACE."
+        sudo ovs-vsctl --no-wait -- --may-exist add-port $PUBLIC_BRIDGE $PUBLIC_INTERFACE
+        sudo ip link set $PUBLIC_INTERFACE up
+    fi
+
+    move_interface_addresses "into_bridge"
+}
diff --git a/networking-odl/devstack/functions b/networking-odl/devstack/functions
new file mode 100644 (file)
index 0000000..ebd14da
--- /dev/null
@@ -0,0 +1,120 @@
+#!/bin/bash
+#
+# functions - OpenDaylight driver utility functions
+
+# Get build information
+function odl_update_maven_metadata_xml {
+    local MAVENMETAFILE=$1
+    local NEXUSPATH=$2
+    local BUNDLEVERSION=$3
+    local OFFLINE=$4
+
+    if [[ "$OFFLINE" == "True" ]]; then
+        return
+    fi
+
+    # Remove stale MAVENMETAFILE for cases where you switch releases
+    rm -f $MAVENMETAFILE
+
+    # Acquire the timestamp information from maven-metadata.xml
+    wget -O $MAVENMETAFILE ${NEXUSPATH}/${BUNDLEVERSION}/maven-metadata.xml
+}
+
+function _odl_export_snapshot_url_pkg {
+    local ODL_DIR=$1
+    local ODL_URL_PREFIX=$2
+    local BUNDLEVERSION=$3
+    local OFFLINE=$4
+    local BUNDLE_TIMESTAMP=$5
+
+    local MAVENMETAFILE=$ODL_DIR/maven-metadata.xml
+    local NEXUSPATH="${ODL_URL_PREFIX}/${ODL_URL_SNAPSHOT_REPOSITORY_PATH}/org/opendaylight/integration/distribution-karaf"
+
+    if [ "$BUNDLE_TIMESTAMP" == "latest" ]; then
+        odl_update_maven_metadata_xml $MAVENMETAFILE $NEXUSPATH $BUNDLEVERSION $OFFLINE
+        if is_ubuntu; then
+            install_package libxml-xpath-perl
+            BUNDLE_TIMESTAMP=`xpath -e "//snapshotVersion[extension='zip'][1]/value/text()" $MAVENMETAFILE 2>/dev/null`
+        else
+            yum_install perl-XML-XPath
+            BUNDLE_TIMESTAMP=`xpath $MAVENMETAFILE "//snapshotVersion[extension='zip'][1]/value/text()" 2>/dev/null`
+        fi
+    fi
+
+    export ODL_URL=${NEXUSPATH}/${BUNDLEVERSION}
+    export ODL_PKG=distribution-karaf-${BUNDLE_TIMESTAMP}.zip
+}
+
+function _odl_export_release_url_pkg {
+    local ODL_URL_PREFIX=$1
+    local BUNDLEVERSION=$2
+    local NEXUSPATH="${ODL_URL_PREFIX}/${ODL_URL_RELEASE_REPOSITORY_PATH}/org/opendaylight/integration/distribution-karaf"
+
+    export ODL_URL=${NEXUSPATH}/${BUNDLEVERSION}
+    export ODL_PKG=distribution-karaf-${BUNDLEVERSION}.zip
+}
+
+function setup_opendaylight_package {
+    if [[ -n "$ODL_SNAPSHOT_VERSION" ]]; then
+        _odl_export_snapshot_url_pkg ${ODL_DIR} ${ODL_URL_PREFIX} ${ODL_BUNDLEVERSION} ${OFFLINE} ${ODL_SNAPSHOT_VERSION}
+    else
+        _odl_export_release_url_pkg ${ODL_URL_PREFIX} ${ODL_BUNDLEVERSION}
+    fi
+}
+
+# Test if OpenDaylight is enabled
+function is_opendaylight_enabled {
+    [[ ,${ENABLED_SERVICES} =~ ,"odl-" ]] && return 0
+    return 1
+}
+
+
+# Check that the bridge is up and running
+function wait_for_active_bridge {
+    local BRIDGE=$1
+    local SLEEP_INTERVAL=$2
+    local MAX_WAIT=$3
+
+    echo "Waiting for bridge $BRIDGE to be available..."
+    local testcmd="sudo ovs-vsctl list Bridge | grep $BRIDGE"
+    test_with_retry "$testcmd" \
+        "$BRIDGE did not become available in $MAX_WAIT seconds." \
+        $MAX_WAIT $SLEEP_INTERVAL
+    echo "Bridge $BRIDGE is available."
+}
+
+# Move the public IP addresses to the OVS bridge on startup,
+# or back to the public interface on cleanup
+function move_interface_addresses {
+    local direction=$1
+
+    if [[ -n "$ODL_PROVIDER_MAPPINGS" ]]; then
+        local VETH_INTERFACE=$(echo $ODL_PROVIDER_MAPPINGS | cut -d ':' -f1)
+        local PHYSICAL_INTERFACE=$(echo $ODL_PROVIDER_MAPPINGS | cut -d ':' -f2)
+
+        if [[ "$direction" == "into_bridge" ]]; then
+            _move_neutron_addresses_route "$PHYSICAL_INTERFACE" "$VETH_INTERFACE" True False "inet"
+            if _has_public_ipv6_address "$PHYSICAL_INTERFACE"; then
+                _move_neutron_addresses_route "$PHYSICAL_INTERFACE" "$VETH_INTERFACE" False False "inet6"
+            fi
+        elif [[ "$direction" == "outof_bridge" ]]; then
+            _move_neutron_addresses_route "$VETH_INTERFACE" "$PHYSICAL_INTERFACE" False True "inet"
+            if _has_public_ipv6_address "$VETH_INTERFACE"; then
+                _move_neutron_addresses_route "$VETH_INTERFACE" "$PHYSICAL_INTERFACE" False False "inet6"
+            fi
+        fi
+    fi
+}
+
+# Check that the interface has an IP v6 address which
+# is routable on external network
+function _has_public_ipv6_address {
+    local interface=$1
+    local interface_public_ipv6_addresses=$(ip -f inet6 a s dev "$interface" | grep -c 'global')
+    echo "$interface public IPv6 address count: $interface_public_ipv6_addresses"
+    if [[ "$interface_public_ipv6_addresses" != 0 ]]; then
+        return 0
+    else
+        return 1
+    fi
+}
diff --git a/networking-odl/devstack/local.conf.example b/networking-odl/devstack/local.conf.example
new file mode 100644 (file)
index 0000000..9a6bd97
--- /dev/null
@@ -0,0 +1,42 @@
+[[local|localrc]]
+LOGFILE=stack.sh.log
+SCREEN_LOGDIR=/opt/stack/data/log
+LOG_COLOR=False
+
+disable_service swift
+disable_service cinder
+disable_service n-net
+enable_service q-svc
+enable_service q-dhcp
+enable_service q-l3
+enable_service q-meta
+enable_service q-vpn
+enable_service q-metering
+enable_service q-lbaas
+enable_service q-fwaas
+enable_service neutron
+enable_service tempest
+
+enable_plugin networking-odl http://git.openstack.org/openstack/networking-odl
+
+ENABLE_TENANT_TUNNELS=True
+
+HOST_NAME=$(hostname)
+SERVICE_HOST_NAME=${HOST_NAME}
+SERVICE_HOST=$HOST_NAME
+
+VNCSERVER_PROXYCLIENT_ADDRESS=$SERVICE_HOST
+VNCSERVER_LISTEN=0.0.0.0
+
+MYSQL_HOST=$SERVICE_HOST
+RABBIT_HOST=$SERVICE_HOST
+GLANCE_HOSTPORT=$SERVICE_HOST:9292
+KEYSTONE_AUTH_HOST=$SERVICE_HOST
+KEYSTONE_SERVICE_HOST=$SERVICE_HOST
+
+MYSQL_PASSWORD=mysql
+RABBIT_PASSWORD=rabbit
+SERVICE_TOKEN=service
+SERVICE_PASSWORD=admin
+ADMIN_PASSWORD=admin
+
diff --git a/networking-odl/devstack/odl-releases/beryllium-0.4.0 b/networking-odl/devstack/odl-releases/beryllium-0.4.0
new file mode 100644 (file)
index 0000000..4ed5b73
--- /dev/null
@@ -0,0 +1,4 @@
+export ODL_BUNDLEVERSION='0.4.0-Beryllium'
+
+# Java major version required to run OpenDaylight: 7, 8, ...
+ODL_REQUIRED_JAVA_VERSION=${ODL_REQUIRED_JAVA_VERSION:-8}
diff --git a/networking-odl/devstack/odl-releases/beryllium-0.4.1-SR1 b/networking-odl/devstack/odl-releases/beryllium-0.4.1-SR1
new file mode 100644 (file)
index 0000000..f659a38
--- /dev/null
@@ -0,0 +1,4 @@
+export ODL_BUNDLEVERSION='0.4.1-Beryllium-SR1'
+
+# Java major version required to run OpenDaylight: 7, 8, ...
+ODL_REQUIRED_JAVA_VERSION=${ODL_REQUIRED_JAVA_VERSION:-8}
diff --git a/networking-odl/devstack/odl-releases/beryllium-0.4.2-SR2 b/networking-odl/devstack/odl-releases/beryllium-0.4.2-SR2
new file mode 100644 (file)
index 0000000..230682b
--- /dev/null
@@ -0,0 +1,4 @@
+export ODL_BUNDLEVERSION='0.4.2-Beryllium-SR2'
+
+# Java major version required to run OpenDaylight: 7, 8, ...
+ODL_REQUIRED_JAVA_VERSION=${ODL_REQUIRED_JAVA_VERSION:-8}
diff --git a/networking-odl/devstack/odl-releases/beryllium-0.4.3-SR3 b/networking-odl/devstack/odl-releases/beryllium-0.4.3-SR3
new file mode 100644 (file)
index 0000000..2edebc0
--- /dev/null
@@ -0,0 +1,4 @@
+export ODL_BUNDLEVERSION='0.4.3-Beryllium-SR3'
+
+# Java major version required to run OpenDaylight: 7, 8, ...
+ODL_REQUIRED_JAVA_VERSION=${ODL_REQUIRED_JAVA_VERSION:-8}
diff --git a/networking-odl/devstack/odl-releases/beryllium-snapshot-0.4.3 b/networking-odl/devstack/odl-releases/beryllium-snapshot-0.4.3
new file mode 100644 (file)
index 0000000..a0d8ecc
--- /dev/null
@@ -0,0 +1,5 @@
+ODL_BUNDLEVERSION='0.4.3-SNAPSHOT'
+ODL_SNAPSHOT_VERSION=${ODL_SNAPSHOT_VERSION:-latest}
+
+# Java major version required to run OpenDaylight: 7, 8, ...
+ODL_REQUIRED_JAVA_VERSION=${ODL_REQUIRED_JAVA_VERSION:-8}
diff --git a/networking-odl/devstack/odl-releases/beryllium-snapshot-0.4.4 b/networking-odl/devstack/odl-releases/beryllium-snapshot-0.4.4
new file mode 100644 (file)
index 0000000..3ea7fac
--- /dev/null
@@ -0,0 +1,5 @@
+ODL_BUNDLEVERSION='0.4.4-SNAPSHOT'
+ODL_SNAPSHOT_VERSION=${ODL_SNAPSHOT_VERSION:-latest}
+
+# Java major version required to run OpenDaylight: 7, 8, ...
+ODL_REQUIRED_JAVA_VERSION=${ODL_REQUIRED_JAVA_VERSION:-8}
diff --git a/networking-odl/devstack/odl-releases/boron-snapshot-0.5.0 b/networking-odl/devstack/odl-releases/boron-snapshot-0.5.0
new file mode 100644 (file)
index 0000000..1afdb92
--- /dev/null
@@ -0,0 +1,5 @@
+ODL_BUNDLEVERSION='0.5.0-SNAPSHOT'
+ODL_SNAPSHOT_VERSION=${ODL_SNAPSHOT_VERSION:-latest}
+
+# Java major version required to run OpenDaylight: 7, 8, ...
+ODL_REQUIRED_JAVA_VERSION=${ODL_REQUIRED_JAVA_VERSION:-8}
diff --git a/networking-odl/devstack/odl-releases/carbon-snapshot-0.6.0 b/networking-odl/devstack/odl-releases/carbon-snapshot-0.6.0
new file mode 100644 (file)
index 0000000..102de66
--- /dev/null
@@ -0,0 +1,5 @@
+ODL_BUNDLEVERSION='0.6.0-SNAPSHOT'
+ODL_SNAPSHOT_VERSION=${ODL_SNAPSHOT_VERSION:-latest}
+
+# Java major version required to run OpenDaylight: 7, 8, ...
+ODL_REQUIRED_JAVA_VERSION=${ODL_REQUIRED_JAVA_VERSION:-8}
diff --git a/networking-odl/devstack/odl-releases/common b/networking-odl/devstack/odl-releases/common
new file mode 100644 (file)
index 0000000..ec57dd9
--- /dev/null
@@ -0,0 +1,14 @@
+# karaf distribution name of ODL to download
+export ODL_NAME=${ODL_NAME:-distribution-karaf-${ODL_BUNDLEVERSION}}
+
+# The network virtualization older feature name (ovsdb based)
+export ODL_NETVIRT_KARAF_FEATURE_OVSDB=${ODL_NETVIRT_KARAF_FEATURE_OVSDB:-odl-ovsdb-openstack}
+
+# The network virtualization newer feature name (vpnservice based)
+export ODL_NETVIRT_KARAF_FEATURE_VPNSERVICE=${ODL_NETVIRT_KARAF_FEATURE_VPNSERVICE:-odl-netvirt-openstack}
+
+# The network virtualization feature used by opendaylight loaded by Karaf
+export ODL_NETVIRT_KARAF_FEATURE=${ODL_NETVIRT_KARAF_FEATURE:-odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs,$ODL_NETVIRT_KARAF_FEATURE_OVSDB}
+
+# The url that this version of ODL netvirt can use to know ODL is fully up
+export ODL_BOOT_WAIT_URL=${ODL_BOOT_WAIT_URL:-restconf/operational/network-topology:network-topology/topology/netvirt:1}
diff --git a/networking-odl/devstack/odl-releases/helium-0.2.3-SR3 b/networking-odl/devstack/odl-releases/helium-0.2.3-SR3
new file mode 100644 (file)
index 0000000..70149e9
--- /dev/null
@@ -0,0 +1,17 @@
+# Short name of ODL package
+export ODL_NAME=distribution-karaf-0.2.3-Helium-SR3
+
+# Java major version required to run OpenDaylight: 7, 8, ...
+ODL_REQUIRED_JAVA_VERSION=${ODL_REQUIRED_JAVA_VERSION:-7}
+
+# overwrite this function
+function setup_opendaylight_package {
+    # The OpenDaylight URL
+    export ODL_URL=${ODL_URL_PREFIX}/content/repositories/public/org/opendaylight/integration/distribution-karaf/0.2.3-Helium-SR3
+
+    # The OpenDaylight Package
+    export ODL_PKG=distribution-karaf-0.2.3-Helium-SR3.zip
+}
+
+# The network virtualization feature used by opendaylight loaded by Karaf
+ODL_NETVIRT_KARAF_FEATURE=${ODL_NETVIRT_KARAF_FEATURE:-odl-base-all,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs,odl-adsal-northbound,odl-nsf-all,odl-ovsdb-northbound,odl-ovsdb-openstack}
diff --git a/networking-odl/devstack/odl-releases/lithium-0.3.1-SR1 b/networking-odl/devstack/odl-releases/lithium-0.3.1-SR1
new file mode 100644 (file)
index 0000000..81283a7
--- /dev/null
@@ -0,0 +1,4 @@
+export ODL_BUNDLEVERSION='0.3.1-Lithium-SR1'
+
+# Java major version required to run OpenDaylight: 7, 8, ...
+ODL_REQUIRED_JAVA_VERSION=${ODL_REQUIRED_JAVA_VERSION:-7}
diff --git a/networking-odl/devstack/odl-releases/lithium-0.3.2-SR2 b/networking-odl/devstack/odl-releases/lithium-0.3.2-SR2
new file mode 100644 (file)
index 0000000..ff52cad
--- /dev/null
@@ -0,0 +1,4 @@
+export ODL_BUNDLEVERSION='0.3.2-Lithium-SR2'
+
+# Java major version required to run OpenDaylight: 7, 8, ...
+ODL_REQUIRED_JAVA_VERSION=${ODL_REQUIRED_JAVA_VERSION:-7}
diff --git a/networking-odl/devstack/odl-releases/lithium-0.3.3-SR3 b/networking-odl/devstack/odl-releases/lithium-0.3.3-SR3
new file mode 100644 (file)
index 0000000..a1ff407
--- /dev/null
@@ -0,0 +1,4 @@
+export ODL_BUNDLEVERSION='0.3.3-Lithium-SR3'
+
+# Java major version required to run OpenDaylight: 7, 8, ...
+ODL_REQUIRED_JAVA_VERSION=${ODL_REQUIRED_JAVA_VERSION:-7}
diff --git a/networking-odl/devstack/odl-releases/lithium-0.3.4-SR4 b/networking-odl/devstack/odl-releases/lithium-0.3.4-SR4
new file mode 100644 (file)
index 0000000..da2f692
--- /dev/null
@@ -0,0 +1,4 @@
+export ODL_BUNDLEVERSION='0.3.4-Lithium-SR4'
+
+# Java major version required to run OpenDaylight: 7, 8, ...
+ODL_REQUIRED_JAVA_VERSION=${ODL_REQUIRED_JAVA_VERSION:-7}
diff --git a/networking-odl/devstack/override-defaults b/networking-odl/devstack/override-defaults
new file mode 100644 (file)
index 0000000..399a528
--- /dev/null
@@ -0,0 +1,37 @@
+# Override few things here as early as we can
+
+# We will enable the opendaylight ML2 MechanismDriver v1 version by default.
+# Note we are also enabling the logger driver, which is helpful for
+# debugging things on the Neutron side.
+if [[ "$ODL_V2DRIVER" == "True" ]]
+then
+    V2_POSTFIX="_v2"
+else
+    V2_POSTFIX=""
+fi
+
+Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-"opendaylight${V2_POSTFIX},logger"}
+
+# This triggers the provisioning of L3 resources like routers and
+# external network, if not overriden.
+Q_L3_ENABLED=${Q_L3_ENABLED:-True}
+
+# We have to disable the neutron L2 agent. OpenDaylight does not use the
+# L2 agent, it instead uses a combination of OpenFlow and OVSDB commands
+# to program OVS on each compute and network node host.
+disable_service q-agt
+
+# If ODL_L3 is enabled, then we don't need the L3 agent and OpenDaylight
+# is going to act as the ML2's L3 service plugin.
+# NETVIRT_VPNSERVICE feature enables ODL L3 by default, so ODL_L3 is disregarded.
+if [[ ",$ODL_NETVIRT_KARAF_FEATURE," =~ ",$ODL_NETVIRT_KARAF_FEATURE_VPNSERVICE," ]] || [ "$ODL_L3" == "True" ];
+then
+    disable_service q-l3
+    ML2_L3_PLUGIN="${ML2_L3_PLUGIN:-odl-router${V2_POSTFIX}}"
+fi
+
+# [networking-feature-enabled] api-extensions
+# api-extensions=all means any kind of extensions is enabled irrelevant of what plugin supports
+# ML2 plugin with ODL driver supports only the following extensions, not all
+# Those list must be maintained as ML2 plugin with ODL driver supports more extensions
+NETWORK_API_EXTENSIONS=${NETWORK_API_EXTENSIONS:-"dns-integration,address-scope,ext-gw-mode,binding,agent,subnet_allocation,dhcp_agent_scheduler,external-net,flavors,net-mtu,quotas,provider,multi-provider,extraroute,vlan-transparent,router,extra_dhcp_opt,security-group,rbac-policies,port-security,allowed-address-pairs,dvr"}
diff --git a/networking-odl/devstack/plugin.sh b/networking-odl/devstack/plugin.sh
new file mode 100644 (file)
index 0000000..a65840d
--- /dev/null
@@ -0,0 +1,151 @@
+#!/bin/bash
+#
+# devstack/plugin.sh
+# Functions to control the configuration and operation of the opendaylight service
+
+# Save trace setting
+_XTRACE_NETWORKING_ODL=$(set +o | grep xtrace)
+set +o xtrace
+
+# OpenDaylight directories
+NETWORKING_ODL_DIR=$DEST/networking-odl
+ODL_DIR=$DEST/opendaylight
+
+# Make sure $ODL_DIR exists
+mkdir -p $ODL_DIR
+
+# Import utility functions
+source $TOP_DIR/functions
+source $NETWORKING_ODL_DIR/devstack/functions
+
+# Import bridge data
+source $TOP_DIR/lib/neutron_plugins/ovs_base
+
+# Import ODL settings
+source $NETWORKING_ODL_DIR/devstack/settings.odl
+source $NETWORKING_ODL_DIR/devstack/odl-releases/$ODL_RELEASE
+source $NETWORKING_ODL_DIR/devstack/odl-releases/common
+
+# Utilities functions for setting up Java
+source $NETWORKING_ODL_DIR/devstack/setup_java.sh
+
+# Import Entry Points
+# -------------------
+source $NETWORKING_ODL_DIR/devstack/entry_points
+
+# Restore xtrace
+$_XTRACE_NETWORKING_ODL
+
+if [[ "$ODL_USING_EXISTING_JAVA" == "True" ]]
+then
+    echo 'Using installed java.'
+    java -version || exit 1
+fi
+
+# main loop
+if is_service_enabled odl-server; then
+    if [[ "$1" == "stack" && "$2" == "install" ]]; then
+        setup_opendaylight_package
+        install_opendaylight
+        configure_opendaylight
+        init_opendaylight
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        configure_neutron_odl
+        # This has to start before Neutron
+        start_opendaylight
+    elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
+        # no-op
+        :
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        stop_opendaylight
+        cleanup_opendaylight
+    fi
+
+    if [[ "$1" == "clean" ]]; then
+        # no-op
+        :
+    fi
+fi
+
+if is_service_enabled odl-compute; then
+    if [[ "$1" == "stack" && "$2" == "install" ]]; then
+        install_opendaylight_compute
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        if is_service_enabled nova; then
+            create_nova_conf_neutron
+        fi
+        bind_opendaylight_controller
+        wait_for_active_bridge $OVS_BR $ODL_RETRY_SLEEP_INTERVAL $ODL_BOOT_WAIT
+
+        # L3 needs to be configured only for netvirt-ovsdb - in netvirt-vpnservice L3 is configured
+        # by provider_mappings, and the provider mappings are added to br-int by default
+        if [[ ",$ODL_NETVIRT_KARAF_FEATURE," =~ ",$ODL_NETVIRT_KARAF_FEATURE_OVSDB," ]] && [ "${ODL_L3}" == "True" ]; then
+            configure_opendaylight_l3
+        fi
+    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
+        # no-op
+        :
+    elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
+        # no-op
+        :
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        cleanup_opendaylight_compute
+    fi
+
+    if [[ "$1" == "clean" ]]; then
+        # no-op
+        :
+    fi
+fi
+
+if is_service_enabled odl-neutron; then
+    if [[ "$1" == "stack" && "$2" == "install" ]]; then
+        install_opendaylight_neutron_thin_ml2_driver
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        configure_neutron_odl
+    elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
+        # no-op
+        :
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        # no-op
+        :
+    fi
+
+    if [[ "$1" == "clean" ]]; then
+        # no-op
+        :
+    fi
+fi
+
+if is_service_enabled odl-lightweight-testing; then
+    if [[ "$1" == "stack" && "$2" == "install" ]]; then
+        install_opendaylight_neutron_thin_ml2_driver
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        configure_neutron_odl
+        configure_neutron_odl_lightweight_testing
+    elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
+        # no-op
+        :
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        # no-op
+        :
+    fi
+
+    if [[ "$1" == "clean" ]]; then
+        # no-op
+        :
+    fi
+fi
+
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/networking-odl/devstack/post_test_hook.sh b/networking-odl/devstack/post_test_hook.sh
new file mode 100644 (file)
index 0000000..64d291e
--- /dev/null
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+
+set -xe
+
+GATE_DEST=$BASE/new
+DEVSTACK_PATH=$GATE_DEST/devstack
+
+source $DEVSTACK_PATH/functions
+source $DEVSTACK_PATH/localrc
+
+TEMPEST_CODE_DIR="$BASE/new/tempest"
+TEMPEST_DATA_DIR="$DATA_DIR/tempest"
+NETWORKING_ODL_DIR="$BASE/new/networking-odl"
+
+IS_GATE=$(trueorfalse False IS_GATE)
+if [[ "$IS_GATE" == "True" ]]
+then
+    source $NETWORKING_ODL_DIR/devstack/devstackgaterc
+fi
+
+owner=stack
+sudo_env="TEMPEST_CONFIG_DIR=$TEMPEST_CODE_DIR/etc"
+
+cd $TEMPEST_CODE_DIR
+sudo chown -R $owner:stack $TEMPEST_CODE_DIR
+sudo mkdir -p "$TEMPEST_DATA_DIR"
+sudo chown -R $owner:stack $TEMPEST_DATA_DIR
+source $DEVSTACK_PATH/openrc admin admin
+
+echo "Some pre-process info"
+neutron net-list
+neutron port-list
+neutron subnet-list
+neutron router-list
+
+echo "Running networking-odl test suite"
+sudo -H -u $owner $sudo_env tools/pretty_tox_serial.sh "$DEVSTACK_GATE_TEMPEST_REGEX"
+
+echo "Some post-process info"
+neutron net-list
+neutron port-list
+neutron subnet-list
+neutron router-list
diff --git a/networking-odl/devstack/pre_test_hook.sh b/networking-odl/devstack/pre_test_hook.sh
new file mode 100644 (file)
index 0000000..df11e2a
--- /dev/null
@@ -0,0 +1,77 @@
+#!/usr/bin/env bash
+
+set -xe
+
+# Drop a token that marks the build as coming from openstack infra
+GATE_DEST=$BASE/new
+DEVSTACK_PATH=$GATE_DEST/devstack
+
+case "$ODL_RELEASE_BASE" in
+    carbon-snapshot)
+        ODL_RELEASE=carbon-snapshot-0.6.0
+        ;;
+    boron-snapshot)
+        ODL_RELEASE=boron-snapshot-0.5.0
+        ;;
+    beryllium-snapshot)
+        ODL_RELEASE=beryllium-snapshot-0.4.4
+        ;;
+    *)
+        echo "Unknown ODL release base: $ODL_RELEASE_BASE"
+        exit 1
+        ;;
+esac
+
+case "$ODL_GATE_V2DRIVER" in
+    v2driver)
+        ODL_V2DRIVER=True
+        ;;
+    v1driver|*)
+        ODL_V2DRIVER=False
+        ;;
+esac
+
+case "$ODL_GATE_PORT_BINDING" in
+    pseudo-agentdb-binding)
+        ODL_PORT_BINDING_CONTROLLER=pseudo-agentdb-binding
+        ;;
+    legacy-port-binding)
+        ODL_PORT_BINDING_CONTROLLER=legacy-port-binding
+        ;;
+    network-topology|*)
+        ODL_PORT_BINDING_CONTROLLER=network-topology
+        ;;
+esac
+
+case "$ODL_GATE_SERVICE_PROVIDER" in
+    vpnservice)
+        ODL_NETVIRT_KARAF_FEATURE=odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs,odl-netvirt-openstack
+        ;;
+    netvirt|*)
+        ODL_NETVIRT_KARAF_FEATURE=odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs,odl-ovsdb-openstack
+        ;;
+esac
+
+cat <<EOF >> $DEVSTACK_PATH/localrc
+
+IS_GATE=True
+
+# Set here the ODL release to use for the Gate job
+ODL_RELEASE=${ODL_RELEASE}
+
+# Set here which driver, v1 or v2 driver
+ODL_V2DRIVER=${ODL_V2DRIVER}
+
+# Set here which port binding controller
+ODL_PORT_BINDING_CONTROLLER=${ODL_PORT_BINDING_CONTROLLER}
+
+# Set here which ODL openstack service provider to use
+ODL_NETVIRT_KARAF_FEATURE=${ODL_NETVIRT_KARAF_FEATURE}
+
+# Switch to using the ODL's L3 implementation
+ODL_L3=True
+
+# Enable debug logs for odl ovsdb
+ODL_NETVIRT_DEBUG_LOGS=True
+
+EOF
diff --git a/networking-odl/devstack/settings b/networking-odl/devstack/settings
new file mode 100644 (file)
index 0000000..0a924a2
--- /dev/null
@@ -0,0 +1,72 @@
+# Devstack settings
+
+# Each service you enable has the following meaning:
+# odl-neutron - Add this config flag if Opendaylight controller and OpenStack
+#               Controller are on different nodes.
+# odl-server  - Add this config flag if Opendaylight controller and OpenStack
+#               Controller are on the same node.
+# odl-compute - Add this config flag for OpenStack Compute.
+#
+# odl-lightweight-testing - Add this config flag for testing neutron ODL ML2
+#                           driver and networking-odl without a real running
+#                           Opendaylight instance
+#
+# NOTE: odl-server includes odl-neutron.
+#
+# An example of enabling all-in-one ODL is below.
+#enable_service odl-compute odl-server
+
+# This can be overridden in the localrc file
+ODL_MODE=${ODL_MODE:-allinone}
+
+# ODL_MODE is used to configure how devstack works with OpenDaylight. You
+# can configure this three ways:
+#
+# ODL_MODE=allinone
+# Use this mode if you want to run ODL in this devstack instance. Useful
+# for a single node deployment or on the control node of a multi-node
+# devstack environment.
+#
+# ODL_MODE=compute
+# Use this for the compute nodes of a multi-node devstack install.
+#
+# ODL_MODE=externalodl
+# This installs the neutron code for ODL, but does not attempt to
+# manage ODL in devstack. This is used for development environments
+# similar to the allinone case except where you are using bleeding edge ODL
+# which is not yet released, and thus don't want it managed by
+# devstack.
+#
+# ODL_MODE=lightweight-testing
+# Use this for testing neutron ML2 driver plus networking-odl without
+# a running Opendaylight instance.
+#
+# ODL_MODE=manual
+# You're on your own here, and are enabling services outside the scope of
+# the ODL_MODE variable.
+
+case $ODL_MODE in
+    allinone)
+        enable_service odl-server odl-compute
+        ;;
+    externalodl)
+        enable_service odl-neutron odl-compute
+        ;;
+    compute)
+        enable_service odl-compute
+        ;;
+    lightweight-testing)
+        enable_service odl-lightweight-testing
+        ;;
+    manual)
+        echo "Manual mode: Enabling services explicitly."
+        ;;
+esac
+
+
+IS_GATE=$(trueorfalse False IS_GATE)
+if [[ "$IS_GATE" == "True" ]]
+then
+    NETWORKING_ODL_DIR=$DEST/networking-odl
+    source $NETWORKING_ODL_DIR/devstack/devstackgaterc
+fi
diff --git a/networking-odl/devstack/settings.odl b/networking-odl/devstack/settings.odl
new file mode 100644 (file)
index 0000000..1ce23a4
--- /dev/null
@@ -0,0 +1,116 @@
+# Add here any global default values that apply for any ODL release
+# -----------------------------------------------------------------
+
+# What release to use. Choices are:
+#
+#   carbon-snapshot-0.6.0    (master)
+#   boron-snapshot-0.5.0     (stable/boron)
+#   beryllium-snapshot-0.4.4 (stable/beryllium)
+#   beryllium-snapshot-0.4.3 (stable/beryllium)
+#   beryllium-0.4.3-SR3
+#   beryllium-0.4.2-SR2
+#   beryllium-0.4.1-SR1
+#   beryllium-0.4.0
+#   lithium-0.3.4-SR4      (SR4)
+#   lithium-0.3.3-SR3      (SR3)
+#   lithium-0.3.2-SR2      (SR2)
+#   lithium-0.3.1-SR1      (SR1)
+#   helium-0.2.3-SR3
+
+ODL_RELEASE=${ODL_RELEASE:-boron-snapshot-0.5.0}
+
+# The IP address of ODL. Set this in local.conf.
+ODL_MGR_IP=${ODL_MGR_IP:-$SERVICE_HOST}
+
+# The list of IP addresses used as OVS manager, separated by a comma.
+# In non-clustering cases, this is normally the same as ODL_MGR_IP. However,
+# for HA deployments the southbound portion to ODL is expected to
+# use the ip addresses of the ODL instances instead of a single vip. That
+# enables OVS to simultaneouly connect to more than one ODL instance.
+# Example of expected format: ODL_OVS_MANAGERS=1.1.1.1,2.2.2.2,3.3.3.3
+ODL_OVS_MANAGERS=${ODL_OVS_MANAGERS:-$ODL_MGR_IP}
+
+# The default ODL port for Tomcat to use
+# NOTE: We make this configurable because by default, ODL uses port 8080 for
+# Tomcat (Helium releases) or Jetty (Lithium and later releases), and this
+# conflicts with swift which also uses port 8080.
+ODL_PORT=${ODL_PORT:-8087}
+
+# The ODL endpoint URL
+ODL_ENDPOINT=${ODL_ENDPOINT:-http://${ODL_MGR_IP}:${ODL_PORT}/controller/nb/v2/neutron}
+
+# The ODL username
+ODL_USERNAME=${ODL_USERNAME:-admin}
+
+# The ODL password
+ODL_PASSWORD=${ODL_PASSWORD:-admin}
+
+# use v2 type driver
+# this requires post mitaka
+ODL_V2DRIVER=${ODL_V2DRIVER:-False}
+
+# The OpenDaylight URL PREFIX
+ODL_URL_PREFIX=${ODL_URL_PREFIX:-https://nexus.opendaylight.org}
+
+# OpenDaylight snapshot & release repositories paths
+# Can be overidden in case you host proxy repositories which have a different diretory structure than OpenDaylight's
+ODL_URL_SNAPSHOT_REPOSITORY_PATH=${ODL_URL_SNAPSHOT_REPOSITORY_PATH:-content/repositories/opendaylight.snapshot}
+ODL_URL_RELEASE_REPOSITORY_PATH=${ODL_URL_RELEASE_REPOSITORY_PATH:-content/repositories/opendaylight.release}
+
+# How long (in seconds) to pause after ODL starts to let it complete booting
+ODL_BOOT_WAIT=${ODL_BOOT_WAIT:-600}
+
+# Enable OpenDaylight l3 forwarding
+ODL_L3=${ODL_L3:-False}
+
+# If you need to route the traffic out of the box, set
+# ODL_PROVIDER_MAPPINGS to map br-ex as shown below. Note
+# This used to be accomplished via PUBLIC_BRIDGE, but that
+# is no longer necessary.
+#
+# The physical provider network to device mapping. Use this
+# to instruct ODL to map ports into specific bridges
+# Examples:
+# ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS:-br-ex:eth2}
+# ODL_PROVIDER_MAPPINGS=${ODL_PROVIDER_MAPPINGS:-physnet1:eth1,br-ex:eth2}
+
+# MAC address for next hop gateway at external network
+ODL_L3GW_MAC=${ODL_L3GW_MAC:-''}
+
+# Enable debug logs for odl ovsdb
+ODL_NETVIRT_DEBUG_LOGS=${ODL_NETVIRT_DEBUG_LOGS:-False}
+
+# Karaf logfile information
+ODL_KARAF_LOG_DATE=$(date +%Y-%m-%d-%H%M%S)
+ODL_KARAF_LOG_BASE=${ODL_KARAF_LOG_BASE:-screen-karaf.log}
+ODL_KARAF_LOG_NAME=$ODL_KARAF_LOG_BASE.$ODL_KARAF_LOG_DATE
+
+# The bridge to configure
+OVS_BR=${OVS_BR:-br-int}
+
+# Use the existing ready java env
+ODL_USING_EXISTING_JAVA=${ODL_USING_EXISTING_JAVA:-False}
+
+# Allow the min/max/perm Java memory to be configurable
+ODL_JAVA_MIN_MEM=${ODL_JAVA_MIN_MEM:-256m}
+ODL_JAVA_MAX_MEM=${ODL_JAVA_MAX_MEM:-512m}
+ODL_JAVA_MAX_PERM_MEM=${ODL_JAVA_MAX_PERM_MEM:-512m}
+
+# Interval in test_with_retry calls
+ODL_RETRY_SLEEP_INTERVAL=${ODL_RETRY_SLEEP_INTERVAL:-5}
+
+# Skip installation of distribution provided Open vSwitch
+SKIP_OVS_INSTALL=$(trueorfalse False SKIP_OVS_INSTALL)
+
+# The ODL Restconf URL
+# URI to hostconfigs: empty for default value
+ODL_HOSTCONF_URI=${ODL_HOSTCONF_URI:-}
+
+# Port binding controller
+ODL_PORT_BINDING_CONTROLLER=${ODL_PORT_BINDING_CONTROLLER:-network-topology}
+
+# Snapshot version - allows using a specific version e.g. 0.5.0-20160719.101233-3643
+# latest: check the latest snapshot
+# specific version: the specific version of the snapshot
+# "": odl release
+ODL_SNAPSHOT_VERSION=${ODL_SNAPSHOT_VERSION:-}
diff --git a/networking-odl/devstack/setup_java.sh b/networking-odl/devstack/setup_java.sh
new file mode 100644 (file)
index 0000000..2b03cbc
--- /dev/null
@@ -0,0 +1,207 @@
+#!/bin/bash
+
+ORACLE_JAVA_URL="http://download.oracle.com/otn-pub/java/jdk"
+ORACLE_JAVA7_URL="${ORACLE_JAVA7_URL:-$ORACLE_JAVA_URL/7u80-b15/jdk-7u80}"
+ORACLE_JAVA7_NAME="jdk1.7.0_80"
+ORACLE_JAVA8_URL="${ORACLE_JAVA8_URL:-$ORACLE_JAVA_URL/8u74-b02/jdk-8u74}"
+ORACLE_JAVA8_NAME="jdk1.8.0_74"
+
+function setup_java {
+    # Java version 8 is the last stable one
+    local VERSION="${1:-8}"
+
+    echo "Setup Java version: $VERSION"
+    if test_java_version "$VERSION" && setup_java_env; then
+        echo "Current Java version is already $VERSION."
+    elif select_java "$VERSION"; then
+        echo "Java version $VERSION has been selected."
+    elif install_openjdk "$VERSION" && select_java "$VERSION"; then
+        echo "OpenJDK version $VERSION has been installed and selected."
+    elif install_other_java "$VERSION" && select_java "$VERSION"; then
+        echo "Some Java version $VERSION has been installed and selected."
+    else
+        echo "ERROR: Unable to setup Java version $VERSION."
+        return 1
+    fi
+
+    return 0
+}
+
+function setup_java_env() {
+    local JAVA_COMMAND="${1:-${JAVA:-java}}"
+
+    JAVA_LINK="$(which $JAVA_COMMAND)"
+    if [[ "$JAVA_LINK" == "" ]]; then
+        return 1
+    fi
+
+    export JAVA="$(readlink -f $JAVA_LINK)"
+    export JAVA_HOME=$(echo $JAVA | sed "s:/bin/java::" | sed "s:/jre::")
+    if [ "$JAVA" != "$(readlink -f $(which java))" ]; then
+        export PATH="$(dirname $JAVA):$PATH"
+        if [ "$JAVA" != "$(readlink -f $(which java))" ]; then
+            echo "Unable to set $JAVA as current."
+            return 1
+        fi
+    fi
+
+    echo "JAVA is: $JAVA"
+    echo "JAVA_HOME is: $JAVA_HOME"
+    echo "Java version is:"
+    $JAVA -version 2>&1
+}
+
+function select_java {
+    local VERSION="$1"
+    local COMMAND
+
+    for COMMAND in $(list_java_commands); do
+        if test_java_version "$VERSION" "$COMMAND"; then
+            if setup_java_env "$COMMAND"; then
+                return 0
+            fi
+        fi
+    done
+
+    echo 'Required java version not found.'
+    return 1
+}
+
+function test_java_version {
+    local EXPECTED_VERSION="'"*' version "1.'$1'.'*'"'"'"
+    local COMMAND="${2:-${JAVA:-java}}"
+    local ACTUAL_VERSION="'"$($COMMAND -version 2>&1 | head -n 1)"'"
+
+    if [[ $ACTUAL_VERSION == $EXPECTED_VERSION ]]; then
+        echo "Found matching java version: $ACTUAL_VERSION"
+        return 0
+    else
+        return 1
+    fi
+}
+
+if is_ubuntu; then
+    # --- Ubuntu -------------------------------------------------------------
+
+    function list_java_commands {
+        update-alternatives --list java
+    }
+
+    function install_openjdk {
+        local REQUIRED_VERSION="$1"
+        apt_get install "openjdk-$REQUIRED_VERSION-jre-headless"
+    }
+
+    function install_other_java {
+        local VERSION="$1"
+        local PPA_REPOSITORY="ppa:webupd8team/java"
+        local JAVA_INSTALLER="oracle-java${VERSION}-installer"
+        local JAVA_SET_DEFAULT="oracle-java${VERSION}-set-default"
+
+        # Accept installer license
+        echo "$JAVA_INSTALLER" shared/accepted-oracle-license-v1-1 select true | sudo /usr/bin/debconf-set-selections
+
+        # Remove all existing set-default versions
+        apt_get remove oracle-java*-set-default
+        if apt_get install $JAVA_INSTALLER ; then
+            if apt_get install $JAVA_SET_DEFAULT ; then
+                return 0  # Some PPA was already providing desired packages
+            fi
+        fi
+
+        # Add PPA only when package is not available
+        if apt_get install software-properties-common; then
+            # I pipe this after echo to emulate an user key-press
+            if echo | sudo -E add-apt-repository "$PPA_REPOSITORY"; then
+                if apt_get update; then
+                    if apt_get install $JAVA_INSTALLER ; then
+                        if apt_get install $JAVA_SET_DEFAULT ; then
+                            return 0
+                        fi
+                    fi
+                fi
+            fi
+        fi
+
+        # Something has gone wrong!
+        return 1
+    }
+
+else
+    # --- Red Hat -------------------------------------------------------------
+
+    function list_java_commands {
+         alternatives --display java 2>&1 | grep -v '^[[:space:]]' | awk '/[[:space:]]- priority[[:space:]]/{print $1}'
+    }
+
+    function install_openjdk {
+        local VERSION="$1"
+        yum_install java-1.$VERSION.*-openjdk-headless
+    }
+
+    function install_other_java {
+        local VERSION="$1"
+
+        if [[ "$(uname -m)" == "x86_64" ]]; then
+            local ARCH=linux-x64
+        else
+            local ARCH=linux-i586
+        fi
+
+        if [[ "$VERSION" == "7" ]]; then
+            ORIGIN=$ORACLE_JAVA7_URL
+            TARGET=$ORACLE_JAVA7_NAME
+        elif [[ "$VERSION" == "8" ]]; then
+            ORIGIN=$ORACLE_JAVA8_URL
+            TARGET=$ORACLE_JAVA8_NAME
+        else
+            echo "Unsupported Java version: $VERSION."
+            return 1
+        fi
+
+        local NEW_JAVA="/usr/java/$TARGET/jre/bin/java"
+        if test_java_version "$VERSION" "$NEW_JAVA"; then
+            if sudo alternatives --install /usr/bin/java java "$NEW_JAVA" 200000; then
+                return 0
+            fi
+        fi
+
+        local EXT
+        local WGET_OPTIONS="-c --no-check-certificate --no-cookies"
+        local HEADER="Cookie: oraclelicense=accept-securebackup-cookie"
+
+        for EXT in "rpm" "tar.gz"; do
+            local URL="$ORIGIN-$ARCH.$EXT"
+            local PACKAGE="/tmp/$(basename $URL)"
+
+            if wget $WGET_OPTIONS --header "$HEADER" "$URL" -O "$PACKAGE"; then
+                case "$EXT" in
+                    "rpm")
+                        sudo rpm -i "$PACKAGE"
+                        ;;
+                    "tar.gz")
+                        sudo mkdir -p /usr/java && sudo tar -C /usr/java -xzf "$PACKAGE"
+                        ;;
+                    *)
+                        echo "Unsupported extension: $EXT"
+                        ;;
+                esac
+
+                if test_java_version "$VERSION" "$NEW_JAVA"; then
+                    if sudo alternatives --install /usr/bin/java java "$NEW_JAVA" 200000; then
+                        return 0
+                    fi
+                fi
+
+                echo "Unable to register installed java."
+
+            else
+                echo "Unable to download java archive: $URL"
+            fi
+
+        done
+
+        return 1
+    }
+
+fi
diff --git a/networking-odl/doc/source/conf.py b/networking-odl/doc/source/conf.py
new file mode 100644 (file)
index 0000000..309859f
--- /dev/null
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+
+sys.path.insert(0, os.path.abspath('../..'))
+# -- General configuration ----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = [
+    'sphinx.ext.autodoc',
+    #'sphinx.ext.intersphinx',
+    'oslosphinx'
+]
+
+# autodoc generation is a bit aggressive and a nuisance when doing heavy
+# text edit cycles.
+# execute "export SPHINX_DEBUG=1" in your terminal to disable
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'networking-odl'
+copyright = u'2013, OpenStack Foundation'
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+add_module_names = True
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# -- Options for HTML output --------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+# html_theme_path = ["."]
+# html_theme = '_theme'
+# html_static_path = ['static']
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = '%sdoc' % project
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass
+# [howto/manual]).
+latex_documents = [
+    ('index',
+     '%s.tex' % project,
+     u'%s Documentation' % project,
+     u'OpenStack Foundation', 'manual'),
+]
+
+# Example configuration for intersphinx: refer to the Python standard library.
+#intersphinx_mapping = {'http://docs.python.org/': None}
diff --git a/networking-odl/doc/source/contributing.rst b/networking-odl/doc/source/contributing.rst
new file mode 100644 (file)
index 0000000..1728a61
--- /dev/null
@@ -0,0 +1,4 @@
+============
+Contributing
+============
+.. include:: ../../CONTRIBUTING.rst
diff --git a/networking-odl/doc/source/devref/hostconfig.rst b/networking-odl/doc/source/devref/hostconfig.rst
new file mode 100644 (file)
index 0000000..70c8607
--- /dev/null
@@ -0,0 +1,119 @@
+Host Configuration
+==================
+
+Overview
+--------
+
+ODL is agentless configuration. In this scenario Host Configuration is used
+to specify the physical host type and other configurations for the host
+system. This information is populated by the Cloud Operator is in OVSDB in
+Open_vSwitch configuration data in the external_ids field as a key value pair.
+This information is then read by ODL and made available to networking-odl
+through REST API. Networking-odl populates this information in agent_db in
+Neutron and is then used by Neutron scheduler. This information is required
+for features like Port binding and Router scheduling.
+
+Refer to this link for detailed design for this feature.
+
+https://docs.google.com/presentation/d/1kq0elysCDEmIWs3omTi5RoXTSBbrewn11Je2d26cI4M/edit?pref=2&pli=1#slide=id.g108988d1e3_0_6
+
+Related ODL changes:
+
+https://git.opendaylight.org/gerrit/#/c/36767/
+
+https://git.opendaylight.org/gerrit/#/c/40143/
+
+Host Configuration fields
+-------------------------
+
+- host-id
+
+This represents host identification string. This string will be stored in
+external_ids field with the key as odl_os_hostconfig_hostid.
+Refer to Neutron config definition for host field for details on this field.
+
+http://docs.openstack.org/kilo/config-reference/content/section_neutron.conf.html
+
+- host-type
+
+The field is for type of the node. This value corresponds to agent_type in
+agent_db. Example value are “ODL L2” and “ODL L3” for Compute and Network node
+respectively. Same host can be configured to have multiple configurations and
+can therefore can have both L2, L3 and other configurations at the same time.
+This string will be populated by ODL based on the configurations available
+on the host. See example in section below.
+
+- config
+
+This is the configuration data for the host type. Since same node can be
+configured to store multiple configurations different external_ids key value
+pair are used to store these configuration. The external_ids with keys as
+odl_os_hostconfig_config_odl_XXXXXXXX store different configurations.
+8 characters after the suffix odl_os_hostconfig_config_odl are host type.
+ODL extracts these characters and store that as the host-type fields. For
+example odl_os_hostconfig_config_odl_l2, odl_os_hostconfig_config_odl_l3 keys
+are used to provide L2 and L3 configurations respectively. ODL will extract
+"ODL L2" and "ODL L3" as host-type field from these keys and populate
+host-type field.
+
+Config is a Json string. Some examples of config:
+
+::
+
+    {“supported_vnic_types”: [{
+            “vnic_type”: “normal”,
+            “vif_type”: “ovs”,
+            “vif_details”: “{}”
+        }]
+        “allowed_network_types”: ["local", "gre", "vlan", "vxlan"]”,
+        “bridge_mappings”: {“physnet1":"br-ex”}
+   }"
+
+   {“supported_vnic_types”: [{
+            “vnic_type”: “normal”,
+            “vif_type”: “vhostuser”,
+            “vif_details”: “{“port_filter”: “False”, “vhostuser_socket”: “/var/run/openvswitch”}”
+        }]
+        “allowed_network_types”: ["local", "gre", "vlan", "vxlan"]”,
+        “bridge_mappings”: {“physnet1":"br-ex”}
+   }"
+
+**Host Config URL**
+
+Url : http://ip:odlport/restconf/operational/neutron:neutron/hostconfigs/
+
+**Commands to setup host config in OVSDB**
+::
+
+ export OVSUUID=$(ovs-vsctl get Open_vSwitch . _uuid)
+ ovs-vsctl set Open_vSwitch $OVSUUID external_ids:odl_os_hostconfig_hostid=test_host
+ ovs-vsctl set Open_vSwitch $OVSUUID external_ids:odl_os_hostconfig_config_odl_l2 =
+ "{“supported_vnic_types”: [{“vnic_type”: “normal”, “vif_type”: “ovs”, "vif_details": {} }], “allowed_network_types”: [“local”], “bridge_mappings”: {“physnet1":"br-ex”}}"
+
+Example for host configuration
+-------------------------------
+
+::
+
+  {
+  "hostconfigs": {
+    "hostconfig": [
+      {
+        "host-id": "test_host1",
+        "host-type": "ODL L2",
+        "config":
+        "{“supported_vnic_types”: [{
+            “vnic_type”: “normal”,
+            “vif_type”: “ovs”,
+            “vif_details”: {}
+        }]
+        “allowed_network_types”: ["local", "gre", "vlan", "vxlan"],
+        “bridge_mappings”: {“physnet1":"br-ex”}}"
+      },
+      {
+        "host-id": "test_host2",
+        "host-type": "ODL L3",
+        "config": {}
+      }]
+    }
+  }
diff --git a/networking-odl/doc/source/devref/index.rst b/networking-odl/doc/source/devref/index.rst
new file mode 100644 (file)
index 0000000..1bf7790
--- /dev/null
@@ -0,0 +1,36 @@
+..
+      Licensed under the Apache License, Version 2.0 (the "License"); you may
+      not use this file except in compliance with the License. You may obtain
+      a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+      WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+      License for the specific language governing permissions and limitations
+      under the License.
+
+
+Developer Guide
+===============
+
+In the Developer Guide, you will find information on networking-odl's lower
+level design and implementation details.
+
+
+Contents:
+--------------------------------
+.. toctree::
+    :maxdepth: 2
+
+   hostconfig
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/networking-odl/doc/source/index.rst b/networking-odl/doc/source/index.rst
new file mode 100644 (file)
index 0000000..312dbe0
--- /dev/null
@@ -0,0 +1,35 @@
+.. networking-odl documentation master file, created by
+   sphinx-quickstart on Tue Jul  9 22:26:36 2013.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+Welcome to networking-odl's documentation!
+========================================================
+
+Contents:
+
+.. toctree::
+   :maxdepth: 2
+
+   readme
+   installation
+   usage
+   contributing
+   specs
+
+Developer Docs
+==============
+
+.. toctree::
+   :maxdepth: 1
+
+   devref/index
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/networking-odl/doc/source/installation.rst b/networking-odl/doc/source/installation.rst
new file mode 100644 (file)
index 0000000..29c6e45
--- /dev/null
@@ -0,0 +1,12 @@
+============
+Installation
+============
+
+At the command line::
+
+    $ pip install networking-odl
+
+Or, if you have virtualenvwrapper installed::
+
+    $ mkvirtualenv networking-odl
+    $ pip install networking-odl
diff --git a/networking-odl/doc/source/readme.rst b/networking-odl/doc/source/readme.rst
new file mode 100644 (file)
index 0000000..a6210d3
--- /dev/null
@@ -0,0 +1 @@
+.. include:: ../../README.rst
diff --git a/networking-odl/doc/source/specs.rst b/networking-odl/doc/source/specs.rst
new file mode 100644 (file)
index 0000000..6050296
--- /dev/null
@@ -0,0 +1,26 @@
+..
+ This work is licensed under a Creative Commons Attribution 3.0 Unported
+ License.
+
+ http://creativecommons.org/licenses/by/3.0/legalcode
+
+ neutron networking-odl specs documentation master file
+
+=============================================
+Neutron networking-odl Project Specifications
+=============================================
+
+Specs
+=====
+
+.. toctree::
+   :glob:
+   :maxdepth: 1
+
+   specs/*
+
+==================
+Indices and tables
+==================
+
+* :ref:`search`
diff --git a/networking-odl/doc/source/specs/journal-recovery.rst b/networking-odl/doc/source/specs/journal-recovery.rst
new file mode 100644 (file)
index 0000000..1132485
--- /dev/null
@@ -0,0 +1,152 @@
+..
+ This work is licensed under a Creative Commons Attribution 3.0 Unported
+ License.
+
+ http://creativecommons.org/licenses/by/3.0/legalcode
+
+================
+Journal Recovery
+================
+
+https://blueprints.launchpad.net/networking-odl/+spec/journal-recovery
+
+Journal entries in the failed state need to be handled somehow. This spec will
+try to address the issue and propose a solution.
+
+Problem Description
+===================
+
+Currently there is no handling for Journal entries that reach the failed state.
+A journal entry can reach the failed state for several reasons, some of which
+are:
+
+* Reached maximum failed attempts for retrying the operation.
+
+* Inconsistency between ODL and the Neutron DB.
+
+  * For example: An update fails because the resource doesn't exist in ODL.
+
+* Bugs that can lead to failure to sync up.
+
+These entries will be left in the journal table forever which is a bit wasteful
+since they take up some space on the DB storage and also affect the performance
+of the journal table.
+Albeit each entry has a negligble effect on it's own, the impact of a large
+number of such entries can become quite significant.
+
+Proposed Change
+===============
+
+A "journal recovery" routine will run as part of the current journal
+maintenance process.
+This routine will scan the journal table for rows in the "failed" state and
+will try to sync the resource for that entry.
+
+The procedure can be best described by the following flow chart:
+
+asciiflow::
+
+  +-----------------+
+  | For each entry  |
+  | in failed state |
+  +-------+---------+
+          |
+  +-------v--------+
+  | Query resource |
+  | on ODL (REST)  |
+  +-----+-----+----+
+        |     |                          +-----------+
+     Resource |                          | Determine |
+     exists   +--Resource doesn't exist--> operation |
+        |                                | type      |
+  +-----v-----+                          +-----+-----+
+  | Determine |                                |
+  | operation |                                |
+  | type      |                                |
+  +-----+-----+                                |
+        |              +------------+          |
+        +--Create------> Mark entry <--Delete--+
+        |              | completed  |          |
+        |              +----------^-+       Create/
+        |                         |         Update
+        |                         |            |
+        |          +------------+ |      +-----v-----+
+        +--Delete--> Mark entry | |      | Determine |
+        |          | pending    | |      | parent    |
+        |          +---------^--+ |      | relation  |
+        |                    |    |      +-----+-----+
+  +-----v------+             |    |            |
+  | Compare to +--Different--+    |            |
+  | resource   |                  |            |
+  | in DB      +--Same------------+            |
+  +------------+                               |
+                                               |
+  +-------------------+                        |
+  | Create entry for  <-----Has no parent------+
+  | resource creation |                        |
+  +--------^----------+                  Has a parent
+           |                                   |
+           |                         +---------v-----+
+           +------Parent exists------+ Query parent  |
+                                     | on ODL (REST) |
+                                     +---------+-----+
+  +------------------+                         |
+  | Create entry for <---Parent doesn't exist--+
+  | parent creation  |
+  +------------------+
+
+For every error during the process the entry will remain in failed state but
+the error shouldn't stop processing of further entries.
+
+
+The implementation could be done in two phases where the parent handling is
+done in a a second pahse.
+For the first phase if we detect an entry that is in failed for a create/update
+operation and the resource doesn't exist on ODL we create a new "create
+resource" journal entry for the resource.
+
+This propsal utilises the journal mechanism for it's operation while the only
+part that deviates from the standard mode of operation is when it queries ODL
+directly. This direct query has to be done to get ODL's representation of the
+resource.
+
+Performance Impact
+------------------
+
+The maintenance thread will have another task to handle. This can lead to
+longer processing time and even cause the thread to skip an iteration.
+This is not an issue since the maintenance thread runs in parallel and doesn't
+directly impact the responsiveness of the system.
+
+Since most operations here involve I/O then CPU probably won't be impacted.
+
+Network traffic would be impacted slightly since we will attempt to fetch the
+resource each time from ODL and we might attempt to fetch it's parent.
+This is however negligble as we do this only for failed entries, which are
+expected to appear rarely.
+
+
+Alternatives
+------------
+
+The partial sync process could make this process obsolete (along with full
+sync), but it's a far more complicated and problematic process.
+It's better to start with this process which is more lightweight and doable
+and consider partial sync in the future.
+
+
+Assignee(s)
+===========
+
+Primary assignee:
+  mkolesni <mkolesni@redhat.com>
+
+Other contributors:
+  None
+
+
+References
+==========
+
+https://goo.gl/IOMpzJ
+
diff --git a/networking-odl/doc/source/specs/qos-driver.rst b/networking-odl/doc/source/specs/qos-driver.rst
new file mode 100644 (file)
index 0000000..d2faad1
--- /dev/null
@@ -0,0 +1,104 @@
+==========================================
+Quality of Service Driver for OpenDaylight
+==========================================
+
+This spec describes the plan to implement quality of service driver for
+OpenDaylight Controller.
+
+Problem Statement
+=================
+OpenStack networking project (neutron [1]) have a extension plugin implemented
+and which expose api for quality of service that can be also be implemented by
+any backend networking service provider to support QoS. These APIs provide a
+way to integrate OpenStack Neutron QoS with any of the backend QoS providers.
+OpenDaylight will provide backend for existing functionalities in neutron-QoS.
+A notification driver is needed for integration of existing api in Openstack
+neutron for QoS with OpenDaylight backend.
+
+Proposed Change
+===============
+This change will introduce a new notification driver in networking-odl that
+will take CRUD requests data for QoS policies from OpenStack neutron and notify
+the OpenDaylight controller about the respective operation.
+
+Detailed Design
+===============
+To enable the formal end to end integration between OpenStack QoS and
+OpenDaylight requires an networking-odl QoS notification driver. QoS driver
+will act as a shim layer between OpenStack and OpenDaylight that will carry
+out following task:
+
+#. After getting QoS policy request data from neutron, It will log a operation
+    request in opendaylightjournal table.
+
+#. The operation will be picked from opendaylightjournal table and a rest call
+    for notifying OpenDaylight server will be prepared and sent.
+
+#. This request will processed by neutron northbound in OpenDaylight.
+The OpenDaylight neutron northbound project. These models will be based
+on the existing neutron qos plugin APIs.
+
+QoS providers in OpenDaylight can listen to these OpenDaylight Neutron
+Northbound QoS models and translate it to their specific yang models for QoS.
+The following diagram shows the high level integration between OpenStack and
+the OpenDaylight QoS provider::
+
+                           +---------------------------------------------+
+                           | OpenStack Network Server (neutron qos)      |
+                           |                                             |
+                           |            +---------------------+          |
+                           |            | networking-odl      |          |
+                           |            |                     |          |
+                           |            |     +---------------|          |
+                           |            |     | Notification  |          |
+                           |            |     | driver QoS    |          |
+                           +----------------------|----------------------+
+                                                  |
+                                                  | Rest Communication
+                                                  |
+                    OpenDaylight Controller       |
+                          +-----------------------|------------+
+                          |            +----------V----+       |
+                          | ODL        | QoS Yang Model|       |
+                          | Northbound |               |       |
+                          | (neutron)  +---------------+       |
+                          |                    |               |
+                          |                    |               |
+                          | ODL           +----V----+          |
+                          | Southbound    | QoS     |          |
+                          | (neutron)     +---------+          |
+                          +-----------------|------------------+
+                                            |
+                                            |
+                          +------------------------------------+
+                          |           Network/OVS              |
+                          |                                    |
+                          +------------------------------------+
+
+In the above diagram, the OpenDaylight components are shown just to understand
+the overall architecture, but it's out of scope of this spec's work items.
+This spec will only track progress related to networking-odl notification QoS
+driver work.
+
+Dependencies
+============
+It has a dependency on OpenDaylight Neutron Northbound QoS yang models, but
+that is out of scope of this spec.
+
+Impact
+======
+None
+
+Assignee(s)
+===========
+
+Following developers will be the initial contributor to the driver, but we
+will be happy to have more contributor on board.
+
+* Manjeet Singh Bhatia (manjeet.s.bhatia@intel.com, irc: manjeets)
+
+References
+==========
+
+[1] http://docs.openstack.org/developer/neutron/devref/quality_of_service.html
+[2] https://wiki.opendaylight.org/view/NeutronNorthbound:Main
diff --git a/networking-odl/doc/source/specs/sfc-driver.rst b/networking-odl/doc/source/specs/sfc-driver.rst
new file mode 100644 (file)
index 0000000..388b2c1
--- /dev/null
@@ -0,0 +1,139 @@
+=================================================
+Service Function Chaining Driver for OpenDaylight
+=================================================
+
+This spec describes the plan to implement OpenStack networking-sfc[1] driver
+for OpenDaylight Controller.
+
+Problem Statement
+===================
+OpenStack SFC project (networking-sfc [1]) exposes generic APIs[2] for Service
+Function Chaining (SFC) that can be implemented by any backend networking
+service provider to support SFC. These APIs provide a way to integrate
+OpenStack SFC with any of the backend SFC providers. OpenDaylight SFC project
+provides a very mature implementation of SFC [3], but currently there is no
+formal integration mechanism present to consume OpenDaylight as an SFC provider
+for networking-sfc.
+
+Recently Tacker project [4] has been approved as an official project in
+OpenStack, that opens many possibilities to realize the NFV use cases (e.g SFC)
+using OpenStack as a platform. Providing a formal end to end integration
+between OpenStack and OpenDaylight for SFC use case will help NFV users
+leverage OpenStack, Tacker and OpenDaylight as a solution. A POC for this
+integration work has already been implemented [5][6] by Tim Rozet, but in
+this POC work, Tacker directly communicates to OpenDaylight SFC & classifier
+providers and not through OpenStack SFC APIs (networking-sfc).
+
+Proposed Change
+===============
+Implementation of this spec will introduce a networking-sfc[1] driver for
+OpenDaylight Controller in networking-odl project that will pass through
+the networking-sfc API's call to the OpenDaylight Controller.
+
+Detailed Design
+===============
+To enable the formal end to end integration between OpenStack SFC and
+OpenDaylight requires an SFC driver for OpenDaylight. ODL SFC driver will
+act as a shim layer between OpenStack and OpenDaylight that will carry out
+following two main tasks:
+
+* Translation of OpenStack SFC Classifier API to ODL SFC classifier yang
+  models**.
+
+* Translation of OpenStack SFC API's to OpenDaylight Neutron Northbound
+  SFC models** [8].
+
+** This work is not yet done, but the OpenDaylight neutron northbound project
+needs to come up with yang models for SFC classification/chain. These models
+will be based on the existing networking-sfc APIs. This work is out of scope
+of networking-odl work and will be collaborated in the scope of OpenDaylight
+Neutron Northbound project.
+
+SFC providers (E.g Net-Virt, GBP, SFC ) in OpenDaylight can listen to these
+OpenDaylight Neutron Northbound SFC models and translate it to their specific
+yang models for classification/sfc. The following diagram shows the high level
+integration between OpenStack and the OpenDaylight SFC provider::
+
+                         +---------------------------------------------+
+                         | OpenStack Network Server (networking-sfc)   |
+                         |            +-------------------+            |
+                         |            | networking-odl    |            |
+                         |            |   SFC Driver      |            |
+                         |            +-------------------+            |
+                         +----------------------|----------------------+
+                                                | REST Communication
+                                                |
+                                      -----------------------
+             OpenDaylight Controller |                       |
+             +-----------------------|-----------------------|---------------+
+             |            +----------v----+              +---v---+           |
+             | Neutron    | SFC Classifier|              |SFC    | Neutron   |
+             | Northbound |    Models     |              |Models | Northbound|
+             | Project    +---------------+              +-------+ Project   |
+             |               /        \                      |               |
+             |             /           \                     |               |
+             |           /               \                   |               |
+             |     +-----V--+        +---V----+          +---V---+           |
+             |     |Net-Virt|  ...   |   GBP  |          |  SFC  |  ...      |
+             |     +---------+       +--------+          +-------+           |
+             +-----------|----------------|------------------|---------------+
+                         |                |                  |
+                         |                |                  |
+             +-----------V----------------V------------------V---------------+
+             |                     Network/OVS                               |
+             |                                                               |
+             +---------------------------------------------------------------+
+
+In the above architecture, the opendaylight components are shown just to
+understand the overall architecture, but it's out of scope of this spec's
+work items. This spec will only track progress related to networking-odl
+OpenStack sfc driver work.
+
+Given that OpenStack SFC APIs are port-pair based API's and OpenDaylight SFC
+API's are based on IETF SFC yang models[8], there might be situations where
+translation might requires API enhancement from OpenStack SFC. Networking SFC
+team is open for these new enhancement requirements given that they are generic
+enough to be leveraged by other backend SFC providers[9]. This work will be
+leveraging the POC work done by Tim [10] to come up with the first version of
+SFC driver.
+
+Dependencies
+============
+It has a dependency on OpenDaylight Neutron Northbound SFC classifier and chain
+yang models, but that is out of scope of this spec.
+
+Impact
+======
+None
+
+Assignee(s)
+===========
+
+Following developers will be the initial contributor to the driver, but we will
+be happy to have more contributor on board.
+
+* Anil Vishnoi (vishnoianil@gmail.com, irc: vishnoianil)
+* Tim Rozet (trozet@redhat.com, irc: trozet)
+
+References
+==========
+
+[1] http://docs.openstack.org/developer/networking-sfc/
+
+[2] https://github.com/openstack/networking-sfc/blob/master/doc/source/api.rst
+
+[3] https://wiki.opendaylight.org/view/Service_Function_Chaining:Main
+
+[4] https://wiki.openstack.org/wiki/Tacker
+
+[5] https://github.com/trozet/tacker/tree/SFC_brahmaputra/tacker/sfc
+
+[6] https://github.com/trozet/tacker/tree/SFC_brahmaputra/tacker/sfc_classifier
+
+[7] https://tools.ietf.org/html/draft-ietf-netmod-acl-model-05
+
+[8] https://wiki.opendaylight.org/view/NeutronNorthbound:Main
+
+[9] http://eavesdrop.openstack.org/meetings/service_chaining/2016/service_chaining.2016-03-31-17.00.log.html
+
+[10] https://github.com/trozet/tacker/blob/SFC_brahmaputra/tacker/sfc/drivers/opendaylight.py
diff --git a/networking-odl/doc/source/usage.rst b/networking-odl/doc/source/usage.rst
new file mode 100644 (file)
index 0000000..003ed66
--- /dev/null
@@ -0,0 +1,7 @@
+========
+Usage
+========
+
+To use networking-odl in a project::
+
+    import networking_odl
diff --git a/networking-odl/etc/neutron/plugins/ml2/ml2_conf_odl.ini b/networking-odl/etc/neutron/plugins/ml2/ml2_conf_odl.ini
new file mode 100644 (file)
index 0000000..8218073
--- /dev/null
@@ -0,0 +1,61 @@
+# Configuration for the OpenDaylight MechanismDriver
+
+[ml2_odl]
+# (StrOpt) OpenDaylight REST URL
+# If this is not set then no HTTP requests will be made.
+#
+# url =
+# Example: url = http://192.168.56.1:8080/controller/nb/v2/neutron
+
+# (StrOpt) Username for HTTP basic authentication to ODL.
+#
+# username =
+# Example: username = admin
+
+# (StrOpt) Password for HTTP basic authentication to ODL.
+#
+# password =
+# Example: password = admin
+
+# (IntOpt) Timeout in seconds to wait for ODL HTTP request completion.
+# This is an optional parameter, default value is 10 seconds.
+#
+# timeout = 10
+# Example: timeout = 15
+
+# (IntOpt) Timeout in minutes to wait for a Tomcat session timeout.
+# This is an optional parameter, default value is 30 minutes.
+#
+# session_timeout = 30
+# Example: session_timeout = 60
+
+# (IntOpt) Timeout in seconds for the V2 driver thread to fire off
+# another thread run through the journal database.
+#
+# sync_timeout = 10
+# Example: sync_timeout = 10
+
+# (IntOpt) Number of times to retry a journal transaction before
+# marking it 'failed'.
+#
+# retry_count = 5
+# Example: retry_count = 5
+
+# (IntOpt) (V2 driver) Journal maintenance operations interval in seconds.
+#
+# maintenance_interval = 300
+# Example: maintenance_interval = 30
+
+# (IntOpt) (V2 driver) Time to keep completed rows in seconds.
+# Completed rows retention will be checked every maintenance_interval by the
+# cleanup thread.
+# To disable completed rows deletion value should be -1
+#
+# completed_rows_retention = 600
+# Example: completed_rows_retention = 30
+
+# (IntOpt) (V2 driver) Timeout in seconds to wait before marking a processing
+# row back to pending state.
+#
+# processing_timeout = 100
+# Example: maintenance_interval = 200
diff --git a/networking-odl/etc/policy.json b/networking-odl/etc/policy.json
new file mode 100644 (file)
index 0000000..4c7f003
--- /dev/null
@@ -0,0 +1,143 @@
+{
+    "context_is_admin":  "role:admin",
+    "admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s",
+    "context_is_advsvc":  "role:advsvc",
+    "admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
+    "admin_only": "rule:context_is_admin",
+    "regular_user": "",
+    "shared": "field:networks:shared=True",
+    "shared_firewalls": "field:firewalls:shared=True",
+    "external": "field:networks:router:external=True",
+    "default": "rule:admin_or_owner",
+
+    "create_subnet": "rule:admin_or_network_owner",
+    "get_subnet": "rule:admin_or_owner or rule:shared",
+    "update_subnet": "rule:admin_or_network_owner",
+    "delete_subnet": "rule:admin_or_network_owner",
+
+    "create_network": "",
+    "get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc",
+    "get_network:router:external": "rule:regular_user",
+    "get_network:segments": "rule:admin_only",
+    "get_network:provider:network_type": "rule:admin_only",
+    "get_network:provider:physical_network": "rule:admin_only",
+    "get_network:provider:segmentation_id": "rule:admin_only",
+    "get_network:queue_id": "rule:admin_only",
+    "create_network:shared": "rule:admin_only",
+    "create_network:router:external": "rule:admin_only",
+    "create_network:segments": "rule:admin_only",
+    "create_network:provider:network_type": "rule:admin_only",
+    "create_network:provider:physical_network": "rule:admin_only",
+    "create_network:provider:segmentation_id": "rule:admin_only",
+    "update_network": "rule:admin_or_owner",
+    "update_network:segments": "rule:admin_only",
+    "update_network:shared": "rule:admin_only",
+    "update_network:provider:network_type": "rule:admin_only",
+    "update_network:provider:physical_network": "rule:admin_only",
+    "update_network:provider:segmentation_id": "rule:admin_only",
+    "update_network:router:external": "rule:admin_only",
+    "delete_network": "rule:admin_or_owner",
+
+    "create_port": "",
+    "create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc",
+    "create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
+    "create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
+    "create_port:binding:host_id": "rule:admin_only",
+    "create_port:binding:profile": "rule:admin_only",
+    "create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
+    "get_port": "rule:admin_or_owner or rule:context_is_advsvc",
+    "get_port:queue_id": "rule:admin_only",
+    "get_port:binding:vif_type": "rule:admin_only",
+    "get_port:binding:vif_details": "rule:admin_only",
+    "get_port:binding:host_id": "rule:admin_only",
+    "get_port:binding:profile": "rule:admin_only",
+    "update_port": "rule:admin_or_owner or rule:context_is_advsvc",
+    "update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
+    "update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
+    "update_port:binding:host_id": "rule:admin_only",
+    "update_port:binding:profile": "rule:admin_only",
+    "update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
+    "delete_port": "rule:admin_or_owner or rule:context_is_advsvc",
+
+    "get_router:ha": "rule:admin_only",
+    "create_router": "rule:regular_user",
+    "create_router:external_gateway_info:enable_snat": "rule:admin_only",
+    "create_router:distributed": "rule:admin_only",
+    "create_router:ha": "rule:admin_only",
+    "get_router": "rule:admin_or_owner",
+    "get_router:distributed": "rule:admin_only",
+    "update_router:external_gateway_info:enable_snat": "rule:admin_only",
+    "update_router:distributed": "rule:admin_only",
+    "update_router:ha": "rule:admin_only",
+    "delete_router": "rule:admin_or_owner",
+
+    "add_router_interface": "rule:admin_or_owner",
+    "remove_router_interface": "rule:admin_or_owner",
+
+    "create_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
+    "update_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
+
+    "create_firewall": "",
+    "get_firewall": "rule:admin_or_owner",
+    "create_firewall:shared": "rule:admin_only",
+    "get_firewall:shared": "rule:admin_only",
+    "update_firewall": "rule:admin_or_owner",
+    "update_firewall:shared": "rule:admin_only",
+    "delete_firewall": "rule:admin_or_owner",
+
+    "create_firewall_policy": "",
+    "get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls",
+    "create_firewall_policy:shared": "rule:admin_or_owner",
+    "update_firewall_policy": "rule:admin_or_owner",
+    "delete_firewall_policy": "rule:admin_or_owner",
+
+    "create_firewall_rule": "",
+    "get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls",
+    "update_firewall_rule": "rule:admin_or_owner",
+    "delete_firewall_rule": "rule:admin_or_owner",
+
+    "create_qos_queue": "rule:admin_only",
+    "get_qos_queue": "rule:admin_only",
+
+    "update_agent": "rule:admin_only",
+    "delete_agent": "rule:admin_only",
+    "get_agent": "rule:admin_only",
+
+    "create_dhcp-network": "rule:admin_only",
+    "delete_dhcp-network": "rule:admin_only",
+    "get_dhcp-networks": "rule:admin_only",
+    "create_l3-router": "rule:admin_only",
+    "delete_l3-router": "rule:admin_only",
+    "get_l3-routers": "rule:admin_only",
+    "get_dhcp-agents": "rule:admin_only",
+    "get_l3-agents": "rule:admin_only",
+    "get_loadbalancer-agent": "rule:admin_only",
+    "get_loadbalancer-pools": "rule:admin_only",
+
+    "create_floatingip": "rule:regular_user",
+    "create_floatingip:floating_ip_address": "rule:admin_only",
+    "update_floatingip": "rule:admin_or_owner",
+    "delete_floatingip": "rule:admin_or_owner",
+    "get_floatingip": "rule:admin_or_owner",
+
+    "create_network_profile": "rule:admin_only",
+    "update_network_profile": "rule:admin_only",
+    "delete_network_profile": "rule:admin_only",
+    "get_network_profiles": "",
+    "get_network_profile": "",
+    "update_policy_profiles": "rule:admin_only",
+    "get_policy_profiles": "",
+    "get_policy_profile": "",
+
+    "create_metering_label": "rule:admin_only",
+    "delete_metering_label": "rule:admin_only",
+    "get_metering_label": "rule:admin_only",
+
+    "create_metering_label_rule": "rule:admin_only",
+    "delete_metering_label_rule": "rule:admin_only",
+    "get_metering_label_rule": "rule:admin_only",
+
+    "get_service_provider": "rule:regular_user",
+    "get_lsn": "rule:admin_only",
+    "create_lsn": "rule:admin_only"
+}
diff --git a/networking-odl/networking_odl/__init__.py b/networking-odl/networking_odl/__init__.py
new file mode 100644 (file)
index 0000000..f2b8357
--- /dev/null
@@ -0,0 +1,23 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import gettext
+import six
+
+
+if six.PY2:
+    gettext.install('networking_odl', unicode=1)
+else:
+    gettext.install('networking_odl')
diff --git a/networking-odl/networking_odl/_i18n.py b/networking-odl/networking_odl/_i18n.py
new file mode 100644 (file)
index 0000000..d338871
--- /dev/null
@@ -0,0 +1,50 @@
+# Copyright 2016 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""oslo.i18n integration module.
+
+See http://docs.openstack.org/developer/oslo.i18n/usage.html .
+
+"""
+
+import oslo_i18n
+
+DOMAIN = "networking_odl"
+
+_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
+
+# The primary translation function using the well-known name "_"
+_ = _translators.primary
+
+# The contextual translation function using the name "_C"
+# requires oslo.i18n >=2.1.0
+_C = _translators.contextual_form
+
+# The plural translation function using the name "_P"
+# requires oslo.i18n >=2.1.0
+_P = _translators.plural_form
+
+# Translators for log levels.
+#
+# The abbreviated names are meant to reflect the usual use of a short
+# name like '_'. The "L" is for "log" and the other letter comes from
+# the level.
+_LI = _translators.log_info
+_LW = _translators.log_warning
+_LE = _translators.log_error
+_LC = _translators.log_critical
+
+
+def get_available_languages():
+        return oslo_i18n.get_available_languages(DOMAIN)
diff --git a/networking-odl/networking_odl/cmd/__init__.py b/networking-odl/networking_odl/cmd/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/networking_odl/cmd/set_ovs_hostconfigs.py b/networking-odl/networking_odl/cmd/set_ovs_hostconfigs.py
new file mode 100644 (file)
index 0000000..8b8b1d3
--- /dev/null
@@ -0,0 +1,123 @@
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_serialization import jsonutils
+
+from neutron._i18n import _
+from neutron._i18n import _LE
+from neutron._i18n import _LI
+from neutron.agent.common import utils
+from neutron.common import config
+
+LOG = log.getLogger(__name__)
+
+
+class SetOvsHostconfigs(object):
+
+    # Refer below for ovs ext-id strings
+    # https://review.openstack.org/#/c/309630/
+    extid_str = 'external_ids:{}={}'
+    odl_os_hconf_str = 'odl_os_hostconfig_config_{}'
+    odl_os_hostid_str = 'odl_os_hostconfig_hostid'
+    odl_os_hosttype_str = 'odl_os_hostconfig_hosttype'
+
+    # TODO(mzmalick): use neutron.agent.ovsdb instead of subprocess.Popen
+    ovs_cmd_get_uuid = ['ovs-vsctl', 'get', 'Open_vSwitch', '.', '_uuid']
+    ovs_cmd_set_extid = ['ovs-vsctl', 'set', 'Open_vSwitch', '', '']
+
+    UUID = 3
+    EXTID = 4
+
+    def __init__(self):
+        self.ovs_uuid = self.get_ovs_uuid()
+
+    def ovs_exec_cmd(self, cmd):
+        LOG.info(_LI("SET-HOSTCONFIGS: Executing cmd: %s"), ' '.join(cmd))
+        return utils.execute(cmd, return_stderr=True, run_as_root=True)
+
+    def get_ovs_uuid(self):
+        return self.ovs_exec_cmd(self.ovs_cmd_get_uuid)[0].strip()
+
+    def set_extid_hostname(self, hname):
+        self.ovs_cmd_set_extid[self.UUID] = self.ovs_uuid
+        self.ovs_cmd_set_extid[self.EXTID] = self.extid_str.format(
+            self.odl_os_hostid_str, hname)
+        return self.ovs_exec_cmd(self.ovs_cmd_set_extid)
+
+    def set_extid_hosttype(self, htype):
+        self.ovs_cmd_set_extid[self.UUID] = self.ovs_uuid
+        self.ovs_cmd_set_extid[self.EXTID] = self.extid_str.format(
+            self.odl_os_hosttype_str, htype)
+        return self.ovs_exec_cmd(self.ovs_cmd_set_extid)
+
+    def set_extid_hostconfig(self, htype, hconfig):
+        ext_htype = self.odl_os_hconf_str.format(
+            htype.lower().replace(' ', '_'))
+        self.ovs_cmd_set_extid[self.UUID] = self.ovs_uuid
+        self.ovs_cmd_set_extid[self.EXTID] = self.extid_str.format(
+            ext_htype, jsonutils.dumps(hconfig))
+        return self.ovs_exec_cmd(self.ovs_cmd_set_extid)
+
+    def set_ovs_extid_hostconfigs(self, conf):
+        if not conf.ovs_hostconfigs:
+            LOG.error(_LE("ovs_hostconfigs argument needed!"))
+            return
+
+        json_str = cfg.CONF.ovs_hostconfigs
+        json_str.replace("\'", "\"")
+        LOG.debug("SET-HOSTCONFIGS: JSON String %s", json_str)
+
+        self.set_extid_hostname(cfg.CONF.host)
+        htype_config = jsonutils.loads(json_str)
+
+        for htype in htype_config.keys():
+            self.set_extid_hostconfig(htype, htype_config[htype])
+
+
+def setup_conf():
+    """setup cmdline options."""
+    cli_opts = [
+        cfg.StrOpt('ovs_hostconfigs', help=_(
+            "OVS hostconfiguration for OpenDaylight "
+            "as a JSON string"))
+    ]
+
+    conf = cfg.CONF
+    conf.register_cli_opts(cli_opts)
+    conf.import_opt('host', 'neutron.common.config')
+    conf()
+    return conf
+
+
+def main():
+
+    conf = setup_conf()
+    config.setup_logging()
+    SetOvsHostconfigs().set_ovs_extid_hostconfigs(conf)
+
+#
+# command line example (run without line breaks):
+#
+# set_ovs_hostconfigs.py  --ovs_hostconfigs='{"ODL L2": {
+# "supported_vnic_types":[{"vnic_type":"normal", "vif_type":"ovs",
+# "vif_details":{}}], "allowed_network_types":["local","vlan",
+# "vxlan","gre"], "bridge_mappings":{"physnet1":"br-ex"}},
+# "ODL L3": {}}' --debug
+#
+
+if __name__ == '__main__':
+    main()
diff --git a/networking-odl/networking_odl/cmd/test_setup_hostconfig.sh b/networking-odl/networking_odl/cmd/test_setup_hostconfig.sh
new file mode 100755 (executable)
index 0000000..1651d0e
--- /dev/null
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+python set_ovs_hostconfigs.py  --debug --ovs_hostconfigs='{"ODL L2": {"supported_vnic_types":[{"vnic_type":"normal", "vif_type":"ovs", "vif_details":{}}], "allowed_network_types":["local","vlan", "vxlan","gre"], "bridge_mappings":{"physnet1":"br-ex"}}, "ODL L3": {"some_details": "dummy_details"}}'
diff --git a/networking-odl/networking_odl/common/__init__.py b/networking-odl/networking_odl/common/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/networking_odl/common/cache.py b/networking-odl/networking_odl/common/cache.py
new file mode 100644 (file)
index 0000000..6c44cc3
--- /dev/null
@@ -0,0 +1,197 @@
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import collections
+import six
+import sys
+import time
+
+from oslo_log import log
+
+from networking_odl._i18n import _LW
+
+
+LOG = log.getLogger(__name__)
+
+
+class CacheEntry(collections.namedtuple('CacheEntry', ['timeout', 'values'])):
+
+    error = None
+
+    @classmethod
+    def create(cls, timeout, *values):
+        return CacheEntry(timeout, list(values))
+
+    def add_value(self, value):
+        self.values.append(value)
+
+    def is_expired(self, current_clock):
+        return self.timeout <= current_clock
+
+    def __hash__(self):
+        return id(self)
+
+    def __eq__(self, other):
+        return self is other
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+
+class Cache(object):
+    '''Generic mapping class used to cache mapping
+
+    Example of uses:
+        - host name to IP addresses mapping
+        - IP addresses to ODL networking topology elements mapping
+    '''
+
+    # TODO(Federico Ressi) after Mitaka: this class should store cached data
+    # in a place shared between more hosts using a caching mechanism coherent
+    # with other OpenStack libraries. This is specially interesting in the
+    # context of reliability when there are more Neutron instances and direct
+    # connection to ODL is broken.
+
+    create_new_entry = CacheEntry.create
+
+    def __init__(self, fetch_all_func):
+        if not callable(fetch_all_func):
+            message = 'Expected callable as parameter, got {!r}.'.format(
+                fetch_all_func)
+            raise TypeError(message)
+        self._fetch_all = fetch_all_func
+        self.clear()
+
+    def clear(self):
+        self._entries = collections.OrderedDict()
+
+    def fetch(self, key, timeout):
+        __, value = self.fetch_any([key], timeout=timeout)
+        return value
+
+    def fetch_any(self, keys, timeout):
+        return next(self.fetch_all(keys=keys, timeout=timeout))
+
+    def fetch_all(self, keys, timeout):
+        # this mean now in numbers
+        current_clock = time.clock()
+        # this is the moment in the future in which new entries will expires
+        new_entries_timeout = current_clock + timeout
+        # entries to be fetched because missing or expired
+        new_entries = collections.OrderedDict()
+        # all entries missing or expired
+        missing = collections.OrderedDict()
+        # captured error for the case a problem has to be reported
+        cause_exc_info = None
+
+        for key in keys:
+            entry = self._entries.get(key)
+            if entry is None or entry.is_expired(current_clock) or entry.error:
+                # this entry has to be fetched
+                new_entries[key] = missing[key] =\
+                    self.create_new_entry(new_entries_timeout)
+            elif entry.values:
+                # Yield existing entry
+                for value in entry.values:
+                    yield key, value
+            else:
+                # This entry is not expired and there were no error where it
+                # has been fetch. Therefore we accept that there are no values
+                # for given key until it expires. This is going to produce a
+                # KeyError if it is still missing at the end of this function.
+                missing[key] = entry
+
+        if missing:
+            if new_entries:
+                # Fetch some entries and update the cache
+                try:
+                    new_entry_keys = tuple(new_entries)
+                    for key, value in self._fetch_all(new_entry_keys):
+                        entry = new_entries.get(key)
+                        if entry:
+                            # Add fresh new value
+                            entry.add_value(value)
+                        else:
+                            # This key was not asked, but we take it in any
+                            # way. "Noli equi dentes inspicere donati."
+                            new_entries[key] = entry = self.create_new_entry(
+                                new_entries_timeout, value)
+
+                # pylint: disable=broad-except
+                except Exception:
+                    # Something has gone wrong: update and yield what got until
+                    # now before raising any error
+                    cause_exc_info = sys.exc_info()
+                    LOG.warning(
+                        _LW('Error fetching values for keys: %r'),
+                        ', '.join(repr(k) for k in new_entry_keys),
+                        exc_info=cause_exc_info)
+
+                # update the cache with new fresh entries
+                self._entries.update(new_entries)
+
+            missing_keys = []
+            for key, entry in six.iteritems(missing):
+                if entry.values:
+                    # yield entries that was missing before
+                    for value in entry.values:
+                        # Yield just fetched entry
+                        yield key, value
+                else:
+                    if cause_exc_info:
+                        # mark this entry as failed
+                        entry.error = cause_exc_info
+                    # after all this entry is still without any value
+                    missing_keys.append(key)
+
+            if missing_keys:
+                # After all some entry is still missing, probably because the
+                # key was invalid. It's time to raise an error.
+                missing_keys = tuple(missing_keys)
+                if not cause_exc_info:
+                    # Search for the error cause in missing entries
+                    for key in missing_keys:
+                        error = self._entries[key].error
+                        if error:
+                            # A cached entry for which fetch method produced an
+                            # error will produce the same error if fetch method
+                            # fails to fetch it again without giving any error
+                            # Is this what we want?
+                            break
+
+                    else:
+                        # If the cause of the problem is not knwow then
+                        # probably keys were wrong
+                        message = 'Invalid keys: {!r}'.format(
+                            ', '.join(missing_keys))
+                        error = KeyError(message)
+
+                    try:
+                        raise error
+                    except KeyError:
+                        cause_exc_info = sys.exc_info()
+
+                raise CacheFetchError(
+                    missing_keys=missing_keys, cause_exc_info=cause_exc_info)
+
+
+class CacheFetchError(KeyError):
+
+    def __init__(self, missing_keys, cause_exc_info):
+        super(CacheFetchError, self).__init__(str(cause_exc_info[1]))
+        self.cause_exc_info = cause_exc_info
+        self.missing_keys = missing_keys
+
+    def reraise_cause(self):
+        six.reraise(*self.cause_exc_info)
diff --git a/networking-odl/networking_odl/common/callback.py b/networking-odl/networking_odl/common/callback.py
new file mode 100644 (file)
index 0000000..d9d168b
--- /dev/null
@@ -0,0 +1,73 @@
+# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import collections
+
+from oslo_log import log as logging
+
+from neutron.callbacks import events
+from neutron.callbacks import registry
+from neutron.callbacks import resources
+
+from networking_odl.common import constants as odl_const
+
+LOG = logging.getLogger(__name__)
+
+ODLResource = collections.namedtuple('ODLResource', ('singular', 'plural'))
+_RESOURCE_MAPPING = {
+    resources.SECURITY_GROUP: ODLResource(odl_const.ODL_SG, odl_const.ODL_SGS),
+    resources.SECURITY_GROUP_RULE: ODLResource(odl_const.ODL_SG_RULE,
+                                               odl_const.ODL_SG_RULES),
+}
+_OPERATION_MAPPING = {
+    events.AFTER_CREATE: odl_const.ODL_CREATE,
+    events.AFTER_UPDATE: odl_const.ODL_UPDATE,
+    events.AFTER_DELETE: odl_const.ODL_DELETE,
+}
+
+
+class OdlSecurityGroupsHandler(object):
+
+    def __init__(self, odl_driver):
+        self.odl_driver = odl_driver
+        self._subscribe()
+
+    def _subscribe(self):
+        for event in (events.AFTER_CREATE, events.AFTER_DELETE):
+            registry.subscribe(self.sg_callback, resources.SECURITY_GROUP,
+                               event)
+            registry.subscribe(self.sg_callback, resources.SECURITY_GROUP_RULE,
+                               event)
+
+        registry.subscribe(self.sg_callback, resources.SECURITY_GROUP,
+                           events.AFTER_UPDATE)
+
+    def sg_callback(self, resource, event, trigger, **kwargs):
+        res = kwargs.get(resource)
+        res_id = kwargs.get("%s_id" % resource)
+        odl_res_type = _RESOURCE_MAPPING[resource]
+
+        odl_ops = _OPERATION_MAPPING[event]
+        odl_res_dict = None if res is None else {odl_res_type.singular: res}
+
+        LOG.debug("Calling sync_from_callback with ODL_OPS (%(odl_ops)s) "
+                  "ODL_RES_TYPE (%(odl_res_type)s) RES_ID (%(res_id)s) "
+                  "ODL_RES_DICT (%(odl_res_dict)s) KWARGS (%(kwargs)s)",
+                  {'odl_ops': odl_ops, 'odl_res_type': odl_res_type,
+                   'res_id': res_id, 'odl_res_dict': odl_res_dict,
+                   'kwargs': kwargs})
+
+        self.odl_driver.sync_from_callback(odl_ops, odl_res_type,
+                                           res_id, odl_res_dict)
diff --git a/networking-odl/networking_odl/common/client.py b/networking-odl/networking_odl/common/client.py
new file mode 100644 (file)
index 0000000..45349e9
--- /dev/null
@@ -0,0 +1,94 @@
+# Copyright (c) 2014 Red Hat Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_serialization import jsonutils
+from oslo_utils import excutils
+import requests
+
+
+LOG = log.getLogger(__name__)
+cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
+
+
+class OpenDaylightRestClient(object):
+
+    @classmethod
+    def create_client(cls, url=None):
+        if cfg.CONF.ml2_odl.enable_lightweight_testing:
+            LOG.debug("ODL lightweight testing is enabled, "
+                      "returning a OpenDaylightLwtClient instance")
+
+            """Have to import at here, otherwise we create a dependency loop"""
+            from networking_odl.common import lightweight_testing as lwt
+            cls = lwt.OpenDaylightLwtClient
+
+        return cls(
+            url or cfg.CONF.ml2_odl.url,
+            cfg.CONF.ml2_odl.username,
+            cfg.CONF.ml2_odl.password,
+            cfg.CONF.ml2_odl.timeout)
+
+    def __init__(self, url, username, password, timeout):
+        self.url = url
+        self.timeout = timeout
+        self.auth = (username, password)
+
+    def get(self, urlpath='', data=None):
+        return self.request('get', urlpath, data)
+
+    def put(self, urlpath='', data=None):
+        return self.request('put', urlpath, data)
+
+    def delete(self, urlpath='', data=None):
+        return self.request('delete', urlpath, data)
+
+    def request(self, method, urlpath='', data=None):
+        headers = {'Content-Type': 'application/json'}
+        url = '/'.join([self.url, urlpath])
+        LOG.debug(
+            "Sending METHOD (%(method)s) URL (%(url)s) JSON (%(data)s)",
+            {'method': method, 'url': url, 'data': data})
+        return requests.request(
+            method, url=url, headers=headers, data=data, auth=self.auth,
+            timeout=self.timeout)
+
+    def sendjson(self, method, urlpath, obj):
+        """Send json to the OpenDaylight controller."""
+        data = jsonutils.dumps(obj, indent=2) if obj else None
+        return self._check_rensponse(self.request(method, urlpath, data))
+
+    def try_delete(self, urlpath):
+        rensponse = self.delete(urlpath)
+        if rensponse.status_code == requests.codes.not_found:
+            # The resource is already removed. ignore 404 gracefully
+            LOG.debug("%(urlpath)s doesn't exist", {'urlpath': urlpath})
+            return False
+        else:
+            self._check_rensponse(rensponse)
+            return True
+
+    def _check_rensponse(self, rensponse):
+        try:
+            rensponse.raise_for_status()
+        except requests.HTTPError as error:
+            with excutils.save_and_reraise_exception():
+                LOG.debug("Exception from ODL: %(e)s %(text)s",
+                          {'e': error, 'text': rensponse.text}, exc_info=1)
+        else:
+            LOG.debug("Got response:\n"
+                      "(%(response)s)", {'response': rensponse.text})
+            return rensponse
diff --git a/networking-odl/networking_odl/common/config.py b/networking-odl/networking_odl/common/config.py
new file mode 100644 (file)
index 0000000..c921242
--- /dev/null
@@ -0,0 +1,67 @@
+# Copyright (c) 2014 Red Hat Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_config import cfg
+
+from networking_odl._i18n import _
+
+
+odl_opts = [
+    cfg.StrOpt('url',
+               help=_("HTTP URL of OpenDaylight REST interface.")),
+    cfg.StrOpt('username',
+               help=_("HTTP username for authentication.")),
+    cfg.StrOpt('password', secret=True,
+               help=_("HTTP password for authentication.")),
+    cfg.IntOpt('timeout', default=10,
+               help=_("HTTP timeout in seconds.")),
+    cfg.IntOpt('session_timeout', default=30,
+               help=_("Tomcat session timeout in minutes.")),
+    cfg.IntOpt('sync_timeout', default=10,
+               help=_("(V2 driver) Sync thread timeout in seconds.")),
+    cfg.IntOpt('retry_count', default=5,
+               help=_("(V2 driver) Number of times to retry a row "
+                      "before failing.")),
+    cfg.IntOpt('maintenance_interval', default=300,
+               help=_("(V2 driver) Journal maintenance operations interval "
+                      "in seconds.")),
+    cfg.IntOpt('completed_rows_retention', default=600,
+               help=_("(V2 driver) Time to keep completed rows in seconds."
+                      "Completed rows retention will be checked every "
+                      "maintenance_interval by the cleanup thread."
+                      "To disable completed rows deletion "
+                      "value should be -1")),
+    cfg.BoolOpt('enable_lightweight_testing',
+                default=False,
+                help=_('Test without real ODL.')),
+    cfg.StrOpt('port_binding_controller',
+               default='network-topology',
+               help=_('Name of the controller to be used for port binding.')),
+    cfg.IntOpt('processing_timeout', default='100',
+               help=_("(V2 driver) Time in seconds to wait before a "
+                      "processing row is marked back to pending.")),
+    cfg.StrOpt('odl_hostconf_uri',
+               help=_("Path for ODL host configuration REST interface"),
+               default="/restconf/operational/neutron:neutron/hostconfigs"),
+    cfg.IntOpt('restconf_poll_interval', default=30,
+               help=_("Poll interval in seconds for getting ODL hostconfig")),
+
+]
+
+cfg.CONF.register_opts(odl_opts, "ml2_odl")
+
+
+def list_opts():
+    return [('ml2_odl', odl_opts)]
diff --git a/networking-odl/networking_odl/common/constants.py b/networking-odl/networking_odl/common/constants.py
new file mode 100644 (file)
index 0000000..50c0117
--- /dev/null
@@ -0,0 +1,55 @@
+# Copyright (c) 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+ODL_NETWORK = 'network'
+ODL_NETWORKS = 'networks'
+ODL_SUBNET = 'subnet'
+ODL_SUBNETS = 'subnets'
+ODL_PORT = 'port'
+ODL_PORTS = 'ports'
+ODL_SG = 'security_group'
+ODL_SGS = 'security_groups'
+ODL_SG_RULE = 'security_group_rule'
+ODL_SG_RULES = 'security_group_rules'
+ODL_ROUTER = 'router'
+ODL_ROUTERS = 'routers'
+ODL_ROUTER_INTF = 'router_interface'
+ODL_FLOATINGIP = 'floatingip'
+ODL_FLOATINGIPS = 'floatingips'
+
+ODL_LOADBALANCER = 'loadbalancer'
+ODL_LOADBALANCERS = 'loadbalancers'
+ODL_LISTENER = 'listener'
+ODL_LISTENERS = 'listeners'
+ODL_POOL = 'pool'
+ODL_POOLS = 'pools'
+ODL_MEMBER = 'member'
+ODL_MEMBERS = 'members'
+ODL_HEALTHMONITOR = 'healthmonitor'
+ODL_HEALTHMONITORS = 'healthmonitors'
+
+ODL_CREATE = 'create'
+ODL_UPDATE = 'update'
+ODL_DELETE = 'delete'
+ODL_ADD = 'add'
+ODL_REMOVE = 'remove'
+
+ODL_UUID_NOT_USED = '0'
+
+# Constants for journal operation states
+PENDING = 'pending'
+PROCESSING = 'processing'
+FAILED = 'failed'
+COMPLETED = 'completed'
diff --git a/networking-odl/networking_odl/common/exceptions.py b/networking-odl/networking_odl/common/exceptions.py
new file mode 100644 (file)
index 0000000..f174c10
--- /dev/null
@@ -0,0 +1,20 @@
+# Copyright (c) 2014 Red Hat Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from neutron_lib import exceptions as exc
+
+
+class OpendaylightAuthError(exc.NeutronException):
+    message = '%(msg)s'
diff --git a/networking-odl/networking_odl/common/filters.py b/networking-odl/networking_odl/common/filters.py
new file mode 100644 (file)
index 0000000..fb42a0e
--- /dev/null
@@ -0,0 +1,96 @@
+# Copyright (c) 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+from networking_odl.common import constants as odl_const
+from networking_odl.common import utils as odl_utils
+
+
+def _filter_unmapped_null(resource_dict, unmapped_keys):
+    # NOTE(yamahata): bug work around
+    # https://bugs.eclipse.org/bugs/show_bug.cgi?id=475475
+    #   Null-value for an unmapped element causes next mapped
+    #   collection to contain a null value
+    #   JSON: { "unmappedField": null, "mappedCollection": [ "a" ] }
+    #
+    #   Java Object:
+    #   class Root {
+    #     Collection<String> mappedCollection = new ArrayList<String>;
+    #   }
+    #
+    #   Result:
+    #   Field B contains one element; null
+    #
+    # TODO(yamahata): update along side with neutron and ODL
+    #   add when neutron adds more extensions
+    #   delete when ODL neutron northbound supports it
+    # TODO(yamahata): do same thing for other resources
+    keys_to_del = [key for key in unmapped_keys
+                   if resource_dict.get(key) is None]
+    if keys_to_del:
+        odl_utils.try_del(resource_dict, keys_to_del)
+
+
+_NETWORK_UNMAPPED_KEYS = ['qos_policy_id']
+_PORT_UNMAPPED_KEYS = ['binding:profile', 'dns_name',
+                       'port_security_enabled', 'qos_policy_id']
+
+
+def _filter_network_create(network):
+    odl_utils.try_del(network, ['status', 'subnets'])
+    _filter_unmapped_null(network, _NETWORK_UNMAPPED_KEYS)
+
+
+def _filter_network_update(network):
+    odl_utils.try_del(network, ['id', 'status', 'subnets', 'tenant_id'])
+    _filter_unmapped_null(network, _NETWORK_UNMAPPED_KEYS)
+
+
+def _filter_subnet_update(subnet):
+    odl_utils.try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr',
+                      'allocation_pools', 'tenant_id'])
+
+
+def _filter_port_create(port):
+    """Filter out port attributes not required for a create."""
+    odl_utils.try_del(port, ['status'])
+    _filter_unmapped_null(port, _PORT_UNMAPPED_KEYS)
+
+
+def _filter_port_update(port):
+    """Filter out port attributes for an update operation."""
+    odl_utils.try_del(port, ['network_id', 'id', 'status', 'mac_address',
+                      'tenant_id', 'fixed_ips'])
+    _filter_unmapped_null(port, _PORT_UNMAPPED_KEYS)
+
+
+def _filter_router_update(router):
+    """Filter out attributes for an update operation."""
+    odl_utils.try_del(router, ['id', 'tenant_id', 'status'])
+
+
+_FILTER_MAP = {
+    (odl_const.ODL_NETWORK, odl_const.ODL_CREATE): _filter_network_create,
+    (odl_const.ODL_NETWORK, odl_const.ODL_UPDATE): _filter_network_update,
+    (odl_const.ODL_SUBNET, odl_const.ODL_UPDATE): _filter_subnet_update,
+    (odl_const.ODL_PORT, odl_const.ODL_CREATE): _filter_port_create,
+    (odl_const.ODL_PORT, odl_const.ODL_UPDATE): _filter_port_update,
+    (odl_const.ODL_ROUTER, odl_const.ODL_UPDATE): _filter_router_update,
+}
+
+
+def filter_for_odl(object_type, operation, data):
+    """Filter out the attributed before sending the data to ODL"""
+    filter_key = (object_type, operation)
+    if filter_key in _FILTER_MAP:
+        _FILTER_MAP[filter_key](data)
diff --git a/networking-odl/networking_odl/common/lightweight_testing.py b/networking-odl/networking_odl/common/lightweight_testing.py
new file mode 100644 (file)
index 0000000..3d0cf2e
--- /dev/null
@@ -0,0 +1,177 @@
+# Copyright (c) 2015 Intel Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from copy import deepcopy
+import requests
+import six
+
+from oslo_log import log as logging
+from oslo_serialization import jsonutils
+
+from networking_odl._i18n import _
+from networking_odl.common import client
+from networking_odl.common import constants as odl_const
+
+
+LOG = logging.getLogger(__name__)
+
+OK = requests.codes.ok
+NO_CONTENT = requests.codes.no_content
+NOT_ALLOWED = requests.codes.not_allowed
+NOT_FOUND = requests.codes.not_found
+BAD_REQUEST = requests.codes.bad_request
+
+
+class OpenDaylightLwtClient(client.OpenDaylightRestClient):
+    """Lightweight testing client"""
+
+    lwt_dict = {odl_const.ODL_NETWORKS: {},
+                odl_const.ODL_SUBNETS: {},
+                odl_const.ODL_PORTS: {},
+                odl_const.ODL_SGS: {},
+                odl_const.ODL_SG_RULES: {},
+                odl_const.ODL_LOADBALANCERS: {},
+                odl_const.ODL_LISTENERS: {},
+                odl_const.ODL_POOLS: {},
+                odl_const.ODL_MEMBERS: {},
+                odl_const.ODL_HEALTHMONITORS: {}}
+
+    @classmethod
+    def _make_response(cls, status_code=OK, content=None):
+        """Only supports 'content-type': 'application/json'"""
+        response = requests.models.Response()
+        response.status_code = status_code
+        if content:
+            response.raw = six.BytesIO(
+                jsonutils.dumps(content).encode('utf-8'))
+
+        return response
+
+    @classmethod
+    def _get_resource_id(cls, urlpath):
+        # resouce ID is the last element of urlpath
+        return str(urlpath).rsplit('/', 1)[-1]
+
+    @classmethod
+    def post(cls, resource_type, resource_dict, urlpath, resource_list):
+        """No ID in URL, elements in resource_list must have ID"""
+
+        if resource_list is None:
+            raise ValueError(_("resource_list can not be None"))
+
+        for resource in resource_list:
+            if resource['id'] in resource_dict:
+                LOG.debug("%s %s already exists", resource_type,
+                          resource['id'])
+                response = cls._make_response(NOT_ALLOWED)
+                raise requests.exceptions.HTTPError(response=response)
+
+            resource_dict[resource['id']] = deepcopy(resource)
+
+        return cls._make_response(NO_CONTENT)
+
+    @classmethod
+    def put(cls, resource_type, resource_dict, urlpath, resource_list):
+
+        resource_id = cls._get_resource_id(urlpath)
+
+        if resource_list is None:
+            raise ValueError(_("resource_list can not be None"))
+
+        if resource_id and len(resource_list) != 1:
+            LOG.debug("Updating %s with multiple resources", urlpath)
+            response = cls._make_response(BAD_REQUEST)
+            raise requests.exceptions.HTTPError(response=response)
+
+        for resource in resource_list:
+            res_id = resource_id or resource['id']
+            if res_id in resource_dict:
+                resource_dict[res_id].update(deepcopy(resource))
+            else:
+                LOG.debug("%s %s does not exist", resource_type, res_id)
+                response = cls._make_response(NOT_FOUND)
+                raise requests.exceptions.HTTPError(response=response)
+
+        return cls._make_response(NO_CONTENT)
+
+    @classmethod
+    def delete(cls, resource_type, resource_dict, urlpath, resource_list):
+
+        if resource_list is None:
+            resource_id = cls._get_resource_id(urlpath)
+            id_list = [resource_id]
+        else:
+            id_list = [res['id'] for res in resource_list]
+
+        for res_id in id_list:
+            removed = resource_dict.pop(res_id, None)
+            if removed is None:
+                LOG.debug("%s %s does not exist", resource_type, res_id)
+                response = cls._make_response(NOT_FOUND)
+                raise requests.exceptions.HTTPError(response=response)
+
+        return cls._make_response(NO_CONTENT)
+
+    @classmethod
+    def get(cls, resource_type, resource_dict, urlpath, resource_list=None):
+
+        resource_id = cls._get_resource_id(urlpath)
+
+        if resource_id:
+            resource = resource_dict.get(resource_id)
+            if resource is None:
+                LOG.debug("%s %s does not exist", resource_type, resource_id)
+                response = cls._make_response(NOT_FOUND)
+                raise requests.exceptions.HTTPError(response=response)
+            else:
+                # When getting single resource, return value is a dict
+                r_list = {resource_type[:-1]: deepcopy(resource)}
+                return cls._make_response(OK, r_list)
+
+        r_list = [{resource_type[:-1]: deepcopy(res)}
+                  for res in six.itervalues(resource_dict)]
+
+        return cls._make_response(OK, r_list)
+
+    def sendjson(self, method, urlpath, obj=None):
+        """Lightweight testing without ODL"""
+
+        if '/' not in urlpath:
+            urlpath += '/'
+
+        resource_type = str(urlpath).split('/', 1)[0]
+        resource_type = resource_type.replace('-', '_')
+
+        resource_dict = self.lwt_dict.get(resource_type)
+
+        if resource_dict is None:
+            LOG.debug("Resource type %s is not supported", resource_type)
+            response = self._make_response(NOT_FOUND)
+            raise requests.exceptions.HTTPError(response=response)
+
+        func = getattr(self, str(method).lower())
+
+        resource_list = None
+        if obj:
+            """If obj is not None, it can only have one entry"""
+            assert len(obj) == 1, "Obj can only have one entry"
+
+            key, resource_list = list(obj.items())[0]
+
+            if not isinstance(resource_list, list):
+                # Need to transform resource_list to a real list, i.e. [res]
+                resource_list = [resource_list]
+
+        return func(resource_type, resource_dict, urlpath, resource_list)
diff --git a/networking-odl/networking_odl/common/utils.py b/networking-odl/networking_odl/common/utils.py
new file mode 100644 (file)
index 0000000..a01a14a
--- /dev/null
@@ -0,0 +1,60 @@
+# Copyright (c) 2014 Red Hat Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import collections
+import socket
+
+from oslo_log import log
+
+from networking_odl.common import cache
+
+LOG = log.getLogger(__name__)
+
+
+def try_del(d, keys):
+    """Ignore key errors when deleting from a dictionary."""
+    for key in keys:
+        try:
+            del d[key]
+        except KeyError:
+            pass
+
+
+def _fetch_all_addresses_by_hostnames(hostnames):
+    for name in hostnames:
+        # it uses an ordered dict to avoid duplicates and keep order
+        entries = collections.OrderedDict(
+            (info[4][0], None) for info in socket.getaddrinfo(name, None))
+        for entry in entries:
+            yield name, entry
+
+
+_addresses_by_name_cache = cache.Cache(_fetch_all_addresses_by_hostnames)
+
+
+def get_addresses_by_name(name, time_to_live=60.0):
+    """Gets and caches addresses for given name.
+
+    This is a cached wrapper for function 'socket.getaddrinfo'.
+
+    :returns: a sequence of unique addresses binded to given hostname.
+    """
+
+    try:
+        results = _addresses_by_name_cache.fetch_all(
+            [name], timeout=time_to_live)
+        return tuple(address for name, address in results)
+    except cache.CacheFetchError as error:
+        error.reraise_cause()
diff --git a/networking-odl/networking_odl/db/__init__.py b/networking-odl/networking_odl/db/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/networking_odl/db/db.py b/networking-odl/networking_odl/db/db.py
new file mode 100644 (file)
index 0000000..31f4ce2
--- /dev/null
@@ -0,0 +1,234 @@
+# Copyright (c) 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import datetime
+
+from sqlalchemy import asc
+from sqlalchemy import func
+from sqlalchemy import or_
+
+from networking_odl.common import constants as odl_const
+from networking_odl.db import models
+
+from neutron.db import api as db_api
+
+from oslo_db import api as oslo_db_api
+
+
+def check_for_pending_or_processing_ops(session, object_uuid, operation=None):
+    q = session.query(models.OpendaylightJournal).filter(
+        or_(models.OpendaylightJournal.state == odl_const.PENDING,
+            models.OpendaylightJournal.state == odl_const.PROCESSING),
+        models.OpendaylightJournal.object_uuid == object_uuid)
+    if operation:
+        if isinstance(operation, (list, tuple)):
+            q = q.filter(models.OpendaylightJournal.operation.in_(operation))
+        else:
+            q = q.filter(models.OpendaylightJournal.operation == operation)
+    return session.query(q.exists()).scalar()
+
+
+def check_for_pending_delete_ops_with_parent(session, object_type, parent_id):
+    rows = session.query(models.OpendaylightJournal).filter(
+        or_(models.OpendaylightJournal.state == odl_const.PENDING,
+            models.OpendaylightJournal.state == odl_const.PROCESSING),
+        models.OpendaylightJournal.object_type == object_type,
+        models.OpendaylightJournal.operation == odl_const.ODL_DELETE
+    ).all()
+
+    for row in rows:
+        if parent_id in row.data:
+            return True
+
+    return False
+
+
+def check_for_pending_or_processing_add(session, router_id, subnet_id):
+    rows = session.query(models.OpendaylightJournal).filter(
+        or_(models.OpendaylightJournal.state == odl_const.PENDING,
+            models.OpendaylightJournal.state == odl_const.PROCESSING),
+        models.OpendaylightJournal.object_type == odl_const.ODL_ROUTER_INTF,
+        models.OpendaylightJournal.operation == odl_const.ODL_ADD
+    ).all()
+
+    for row in rows:
+        if router_id in row.data.values() and subnet_id in row.data.values():
+            return True
+
+    return False
+
+
+def check_for_pending_remove_ops_with_parent(session, parent_id):
+    rows = session.query(models.OpendaylightJournal).filter(
+        or_(models.OpendaylightJournal.state == odl_const.PENDING,
+            models.OpendaylightJournal.state == odl_const.PROCESSING),
+        models.OpendaylightJournal.object_type == odl_const.ODL_ROUTER_INTF,
+        models.OpendaylightJournal.operation == odl_const.ODL_REMOVE
+    ).all()
+
+    for row in rows:
+        if parent_id in row.data.values():
+            return True
+
+    return False
+
+
+def check_for_older_ops(session, row):
+    q = session.query(models.OpendaylightJournal).filter(
+        or_(models.OpendaylightJournal.state == odl_const.PENDING,
+            models.OpendaylightJournal.state == odl_const.PROCESSING),
+        models.OpendaylightJournal.operation == row.operation,
+        models.OpendaylightJournal.object_uuid == row.object_uuid,
+        models.OpendaylightJournal.created_at < row.created_at,
+        models.OpendaylightJournal.id != row.id)
+    return session.query(q.exists()).scalar()
+
+
+def get_all_db_rows(session):
+    return session.query(models.OpendaylightJournal).all()
+
+
+def get_all_db_rows_by_state(session, state):
+    return session.query(models.OpendaylightJournal).filter_by(
+        state=state).all()
+
+
+# Retry deadlock exception for Galera DB.
+# If two (or more) different threads call this method at the same time, they
+# might both succeed in changing the same row to pending, but at least one
+# of them will get a deadlock from Galera and will have to retry the operation.
+@db_api.retry_db_errors
+def get_oldest_pending_db_row_with_lock(session):
+    with session.begin():
+        row = session.query(models.OpendaylightJournal).filter_by(
+            state=odl_const.PENDING).order_by(
+            asc(models.OpendaylightJournal.last_retried)).with_for_update(
+        ).first()
+        if row:
+            update_db_row_state(session, row, odl_const.PROCESSING)
+
+    return row
+
+
+@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES,
+                           retry_on_request=True)
+def update_db_row_state(session, row, state):
+    row.state = state
+    session.merge(row)
+    session.flush()
+
+
+def update_pending_db_row_retry(session, row, retry_count):
+    if row.retry_count >= retry_count:
+        update_db_row_state(session, row, odl_const.FAILED)
+    else:
+        row.retry_count += 1
+        update_db_row_state(session, row, odl_const.PENDING)
+
+
+# This function is currently not used.
+# Deleted resources are marked as 'deleted' in the database.
+@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES,
+                           retry_on_request=True)
+def delete_row(session, row=None, row_id=None):
+    if row_id:
+        row = session.query(models.OpendaylightJournal).filter_by(
+            id=row_id).one()
+    if row:
+        session.delete(row)
+        session.flush()
+
+
+@oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES,
+                           retry_on_request=True)
+def create_pending_row(session, object_type, object_uuid,
+                       operation, data):
+    row = models.OpendaylightJournal(object_type=object_type,
+                                     object_uuid=object_uuid,
+                                     operation=operation, data=data,
+                                     created_at=func.now(),
+                                     state=odl_const.PENDING)
+    session.add(row)
+    # Keep session flush for unit tests. NOOP for L2/L3 events since calls are
+    # made inside database session transaction with subtransactions=True.
+    session.flush()
+
+
+@db_api.retry_db_errors
+def delete_pending_rows(session, operations_to_delete):
+    with session.begin():
+        session.query(models.OpendaylightJournal).filter(
+            models.OpendaylightJournal.operation.in_(operations_to_delete),
+            models.OpendaylightJournal.state == odl_const.PENDING).delete(
+            synchronize_session=False)
+        session.expire_all()
+
+
+@db_api.retry_db_errors
+def _update_maintenance_state(session, expected_state, state):
+    with session.begin():
+        row = session.query(models.OpendaylightMaintenance).filter_by(
+            state=expected_state).with_for_update().one_or_none()
+        if row is None:
+            return False
+
+        row.state = state
+        return True
+
+
+def lock_maintenance(session):
+    return _update_maintenance_state(session, odl_const.PENDING,
+                                     odl_const.PROCESSING)
+
+
+def unlock_maintenance(session):
+    return _update_maintenance_state(session, odl_const.PROCESSING,
+                                     odl_const.PENDING)
+
+
+def update_maintenance_operation(session, operation=None):
+    """Update the current maintenance operation details.
+
+    The function assumes the lock is held, so it mustn't be run outside of a
+    locked context.
+    """
+    op_text = None
+    if operation:
+        op_text = operation.__name__
+
+    with session.begin():
+        row = session.query(models.OpendaylightMaintenance).one_or_none()
+        row.processing_operation = op_text
+
+
+def delete_rows_by_state_and_time(session, state, time_delta):
+    with session.begin():
+        now = session.execute(func.now()).scalar()
+        session.query(models.OpendaylightJournal).filter(
+            models.OpendaylightJournal.state == state,
+            models.OpendaylightJournal.last_retried < now - time_delta).delete(
+            synchronize_session=False)
+        session.expire_all()
+
+
+def reset_processing_rows(session, max_timedelta):
+    with session.begin():
+        now = session.execute(func.now()).scalar()
+        max_timedelta = datetime.timedelta(seconds=max_timedelta)
+        rows = session.query(models.OpendaylightJournal).filter(
+            models.OpendaylightJournal.last_retried < now - max_timedelta,
+            models.OpendaylightJournal.state == odl_const.PROCESSING,
+            ).update({'state': odl_const.PENDING})
+
+    return rows
diff --git a/networking-odl/networking_odl/db/migration/__init__.py b/networking-odl/networking_odl/db/migration/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/README b/networking-odl/networking_odl/db/migration/alembic_migrations/README
new file mode 100644 (file)
index 0000000..5d89e57
--- /dev/null
@@ -0,0 +1 @@
+This directory contains the migration scripts for the networking_odl project.
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/__init__.py b/networking-odl/networking_odl/db/migration/alembic_migrations/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/env.py b/networking-odl/networking_odl/db/migration/alembic_migrations/env.py
new file mode 100644 (file)
index 0000000..9405ae0
--- /dev/null
@@ -0,0 +1,99 @@
+# Copyright 2015 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+from logging import config as logging_config
+
+from alembic import context
+from oslo_config import cfg
+from oslo_db.sqlalchemy import session
+import sqlalchemy as sa
+from sqlalchemy import event
+
+from neutron.db.migration.alembic_migrations import external
+from neutron.db.migration.models import head  # noqa
+from neutron.db import model_base
+
+MYSQL_ENGINE = None
+ODL_VERSION_TABLE = 'odl_alembic_version'
+config = context.config
+neutron_config = config.neutron_config
+logging_config.fileConfig(config.config_file_name)
+target_metadata = model_base.BASEV2.metadata
+
+
+def set_mysql_engine():
+    try:
+        mysql_engine = neutron_config.command.mysql_engine
+    except cfg.NoSuchOptError:
+        mysql_engine = None
+
+    global MYSQL_ENGINE
+    MYSQL_ENGINE = (mysql_engine or
+                    model_base.BASEV2.__table_args__['mysql_engine'])
+
+
+def include_object(object, name, type_, reflected, compare_to):
+    if type_ == 'table' and name in external.TABLES:
+        return False
+    else:
+        return True
+
+
+def run_migrations_offline():
+    set_mysql_engine()
+
+    kwargs = dict()
+    if neutron_config.database.connection:
+        kwargs['url'] = neutron_config.database.connection
+    else:
+        kwargs['dialect_name'] = neutron_config.database.engine
+    kwargs['include_object'] = include_object
+    kwargs['version_table'] = ODL_VERSION_TABLE
+    context.configure(**kwargs)
+
+    with context.begin_transaction():
+        context.run_migrations()
+
+
+@event.listens_for(sa.Table, 'after_parent_attach')
+def set_storage_engine(target, parent):
+    if MYSQL_ENGINE:
+        target.kwargs['mysql_engine'] = MYSQL_ENGINE
+
+
+def run_migrations_online():
+    set_mysql_engine()
+    engine = session.create_engine(neutron_config.database.connection)
+
+    connection = engine.connect()
+    context.configure(
+        connection=connection,
+        target_metadata=target_metadata,
+        include_object=include_object,
+        version_table=ODL_VERSION_TABLE
+    )
+
+    try:
+        with context.begin_transaction():
+            context.run_migrations()
+    finally:
+        connection.close()
+        engine.dispose()
+
+
+if context.is_offline_mode():
+    run_migrations_offline()
+else:
+    run_migrations_online()
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/script.py.mako b/networking-odl/networking_odl/db/migration/alembic_migrations/script.py.mako
new file mode 100644 (file)
index 0000000..9e0b2ce
--- /dev/null
@@ -0,0 +1,36 @@
+# Copyright ${create_date.year} <PUT YOUR NAME/COMPANY HERE>
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision}
+Create Date: ${create_date}
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = ${repr(up_revision)}
+down_revision = ${repr(down_revision)}
+% if branch_labels:
+branch_labels = ${repr(branch_labels)}
+%endif
+
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+def upgrade():
+    ${upgrades if upgrades else "pass"}
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/versions/CONTRACT_HEAD b/networking-odl/networking_odl/db/migration/alembic_migrations/versions/CONTRACT_HEAD
new file mode 100644 (file)
index 0000000..b7dbc31
--- /dev/null
@@ -0,0 +1 @@
+383acb0d38a0
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/versions/EXPAND_HEAD b/networking-odl/networking_odl/db/migration/alembic_migrations/versions/EXPAND_HEAD
new file mode 100644 (file)
index 0000000..34912ba
--- /dev/null
@@ -0,0 +1 @@
+703dbf02afde
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/versions/b89a299e19f9_initial_branchpoint.py b/networking-odl/networking_odl/db/migration/alembic_migrations/versions/b89a299e19f9_initial_branchpoint.py
new file mode 100644 (file)
index 0000000..d80815d
--- /dev/null
@@ -0,0 +1,28 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+"""Initial odl db, branchpoint
+
+Revision ID: b89a299e19f9
+Revises: None
+Create Date: 2015-09-03 22:22:22.222222
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'b89a299e19f9'
+down_revision = None
+
+
+def upgrade():
+    pass
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/versions/mitaka/contract/383acb0d38a0_initial_contract.py b/networking-odl/networking_odl/db/migration/alembic_migrations/versions/mitaka/contract/383acb0d38a0_initial_contract.py
new file mode 100644 (file)
index 0000000..43959c0
--- /dev/null
@@ -0,0 +1,36 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+"""Start of odl contract branch
+
+Revision ID: 383acb0d38a0
+Revises: b89a299e19f9
+Create Date: 2015-09-03 22:27:49.306394
+
+"""
+
+from neutron.db import migration
+from neutron.db.migration import cli
+
+
+# revision identifiers, used by Alembic.
+revision = '383acb0d38a0'
+down_revision = 'b89a299e19f9'
+branch_labels = (cli.CONTRACT_BRANCH,)
+
+# milestone identifier, used by neutron-db-manage
+neutron_milestone = [migration.MITAKA]
+
+
+def upgrade():
+    pass
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/247501328046_initial_expand.py b/networking-odl/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/247501328046_initial_expand.py
new file mode 100644 (file)
index 0000000..71d24b3
--- /dev/null
@@ -0,0 +1,32 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+"""Start of odl expand branch
+
+Revision ID: 247501328046
+Revises: b89a299e19f9
+Create Date: 2015-09-03 22:27:49.292238
+
+"""
+
+from neutron.db.migration import cli
+
+
+# revision identifiers, used by Alembic.
+revision = '247501328046'
+down_revision = 'b89a299e19f9'
+branch_labels = (cli.EXPAND_BRANCH,)
+
+
+def upgrade():
+    pass
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/37e242787ae5_opendaylight_neutron_mechanism_driver_.py b/networking-odl/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/37e242787ae5_opendaylight_neutron_mechanism_driver_.py
new file mode 100644 (file)
index 0000000..71d8273
--- /dev/null
@@ -0,0 +1,54 @@
+# Copyright (c) 2015 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+"""Opendaylight Neutron mechanism driver refactor
+
+Revision ID: 37e242787ae5
+Revises: 247501328046
+Create Date: 2015-10-30 22:09:27.221767
+
+"""
+from neutron.db import migration
+
+
+# revision identifiers, used by Alembic.
+revision = '37e242787ae5'
+down_revision = '247501328046'
+
+# milestone identifier, used by neutron-db-manage
+neutron_milestone = [migration.MITAKA]
+
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade():
+    op.create_table(
+        'opendaylightjournal',
+        sa.Column('id', sa.String(36), primary_key=True),
+        sa.Column('object_type', sa.String(36), nullable=False),
+        sa.Column('object_uuid', sa.String(36), nullable=False),
+        sa.Column('operation', sa.String(36), nullable=False),
+        sa.Column('data', sa.PickleType, nullable=True),
+        sa.Column('state',
+                  sa.Enum('pending', 'processing', 'failed', 'completed',
+                          name='state'),
+                  nullable=False, default='pending'),
+        sa.Column('retry_count', sa.Integer, default=0),
+        sa.Column('created_at', sa.DateTime, default=sa.func.now()),
+        sa.Column('last_retried', sa.TIMESTAMP, server_default=sa.func.now(),
+                  onupdate=sa.func.now())
+    )
diff --git a/networking-odl/networking_odl/db/migration/alembic_migrations/versions/newton/expand/703dbf02afde_add_journal_maintenance_table.py b/networking-odl/networking_odl/db/migration/alembic_migrations/versions/newton/expand/703dbf02afde_add_journal_maintenance_table.py
new file mode 100644 (file)
index 0000000..bbe0c46
--- /dev/null
@@ -0,0 +1,52 @@
+# Copyright 2016 Red Hat Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+"""Add journal maintenance table
+
+Revision ID: 703dbf02afde
+Revises: 37e242787ae5
+Create Date: 2016-04-12 10:49:31.802663
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '703dbf02afde'
+down_revision = '37e242787ae5'
+
+from alembic import op
+from oslo_utils import uuidutils
+import sqlalchemy as sa
+
+from networking_odl.common import constants as odl_const
+
+
+def upgrade():
+    maint_table = op.create_table(
+        'opendaylight_maintenance',
+        sa.Column('id', sa.String(36), primary_key=True),
+        sa.Column('state', sa.Enum(odl_const.PENDING, odl_const.PROCESSING,
+                                   name='state'),
+                  nullable=False),
+        sa.Column('processing_operation', sa.String(70)),
+        sa.Column('lock_updated', sa.TIMESTAMP, nullable=False,
+                  server_default=sa.func.now(),
+                  onupdate=sa.func.now())
+    )
+
+    # Insert the only row here that is used to synchronize the lock between
+    # different Neutron processes.
+    op.bulk_insert(maint_table,
+                   [{'id': uuidutils.generate_uuid(),
+                     'state': odl_const.PENDING}])
diff --git a/networking-odl/networking_odl/db/models.py b/networking-odl/networking_odl/db/models.py
new file mode 100644 (file)
index 0000000..0416ed1
--- /dev/null
@@ -0,0 +1,47 @@
+# Copyright (c) 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import sqlalchemy as sa
+
+from networking_odl.common import constants as odl_const
+from neutron.db import model_base
+from neutron.db.models_v2 import HasId
+
+
+class OpendaylightJournal(model_base.BASEV2, HasId):
+    __tablename__ = 'opendaylightjournal'
+
+    object_type = sa.Column(sa.String(36), nullable=False)
+    object_uuid = sa.Column(sa.String(36), nullable=False)
+    operation = sa.Column(sa.String(36), nullable=False)
+    data = sa.Column(sa.PickleType, nullable=True)
+    state = sa.Column(sa.Enum(odl_const.PENDING, odl_const.FAILED,
+                              odl_const.PROCESSING, odl_const.COMPLETED),
+                      nullable=False, default=odl_const.PENDING)
+    retry_count = sa.Column(sa.Integer, default=0)
+    created_at = sa.Column(sa.DateTime, server_default=sa.func.now())
+    last_retried = sa.Column(sa.TIMESTAMP, server_default=sa.func.now(),
+                             onupdate=sa.func.now())
+
+
+class OpendaylightMaintenance(model_base.BASEV2, HasId):
+    __tablename__ = 'opendaylight_maintenance'
+
+    state = sa.Column(sa.Enum(odl_const.PENDING, odl_const.PROCESSING),
+                      nullable=False)
+    processing_operation = sa.Column(sa.String(70))
+    lock_updated = sa.Column(sa.TIMESTAMP, nullable=False,
+                             server_default=sa.func.now(),
+                             onupdate=sa.func.now())
diff --git a/networking-odl/networking_odl/fwaas/__init__.py b/networking-odl/networking_odl/fwaas/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/networking_odl/fwaas/driver.py b/networking-odl/networking_odl/fwaas/driver.py
new file mode 100644 (file)
index 0000000..a9de4f2
--- /dev/null
@@ -0,0 +1,69 @@
+#
+# Copyright (C) 2013 Red Hat, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License. You may obtain
+#  a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#  License for the specific language governing permissions and limitations
+#  under the License.
+#
+
+from oslo_log import log as logging
+
+from neutron_fwaas.services.firewall.drivers import fwaas_base
+
+from networking_odl.common import client as odl_client
+from networking_odl.common import config  # noqa
+
+LOG = logging.getLogger(__name__)
+
+
+class OpenDaylightFwaasDriver(fwaas_base.FwaasDriverBase):
+
+    """OpenDaylight FWaaS Driver
+
+    This code is the backend implementation for the OpenDaylight FWaaS
+    driver for OpenStack Neutron.
+    """
+
+    def __init__(self):
+        LOG.debug("Initializing OpenDaylight FWaaS driver")
+        self.client = odl_client.OpenDaylightRestClient.create_client()
+
+    def create_firewall(self, apply_list, firewall):
+        """Create the Firewall with default (drop all) policy.
+
+        The default policy will be applied on all the interfaces of
+        trusted zone.
+        """
+        pass
+
+    def delete_firewall(self, apply_list, firewall):
+        """Delete firewall.
+
+        Removes all policies created by this instance and frees up
+        all the resources.
+        """
+        pass
+
+    def update_firewall(self, apply_list, firewall):
+        """Apply the policy on all trusted interfaces.
+
+        Remove previous policy and apply the new policy on all trusted
+        interfaces.
+        """
+        pass
+
+    def apply_default_policy(self, apply_list, firewall):
+        """Apply the default policy on all trusted interfaces.
+
+        Remove current policy and apply the default policy on all trusted
+        interfaces.
+        """
+        pass
diff --git a/networking-odl/networking_odl/journal/__init__.py b/networking-odl/networking_odl/journal/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/networking_odl/journal/cleanup.py b/networking-odl/networking_odl/journal/cleanup.py
new file mode 100644 (file)
index 0000000..994fb82
--- /dev/null
@@ -0,0 +1,46 @@
+#
+# Copyright (C) 2016 Red Hat, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License. You may obtain
+#  a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#  License for the specific language governing permissions and limitations
+#  under the License.
+#
+
+from datetime import timedelta
+
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from networking_odl._i18n import _LI
+from networking_odl.common import constants as odl_const
+from networking_odl.db import db
+
+LOG = logging.getLogger(__name__)
+
+
+class JournalCleanup(object):
+    """Journal maintenance operation for deleting completed rows."""
+    def __init__(self):
+        self._rows_retention = cfg.CONF.ml2_odl.completed_rows_retention
+        self._processing_timeout = cfg.CONF.ml2_odl.processing_timeout
+
+    def delete_completed_rows(self, session):
+        if self._rows_retention is not -1:
+            LOG.debug("Deleting completed rows")
+            db.delete_rows_by_state_and_time(
+                session, odl_const.COMPLETED,
+                timedelta(seconds=self._rows_retention))
+
+    def cleanup_processing_rows(self, session):
+        row_count = db.reset_processing_rows(session, self._processing_timeout)
+        if row_count:
+            LOG.info(_LI("Reset %(num)s orphaned rows back to pending"),
+                     {"num": row_count})
diff --git a/networking-odl/networking_odl/journal/dependency_validations.py b/networking-odl/networking_odl/journal/dependency_validations.py
new file mode 100644 (file)
index 0000000..a6f5f96
--- /dev/null
@@ -0,0 +1,267 @@
+# Copyright (c) 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from networking_odl.common import constants as odl_const
+from networking_odl.db import db
+
+
+def _is_valid_update_operation(session, row):
+    # Check if there are older updates in the queue
+    if db.check_for_older_ops(session, row):
+        return False
+
+    # Check for a pending or processing create operation on this uuid
+    if db.check_for_pending_or_processing_ops(
+            session, row.object_uuid, odl_const.ODL_CREATE):
+        return False
+    return True
+
+
+def validate_network_operation(session, row):
+    """Validate the network operation based on dependencies.
+
+    Validate network operation depending on whether it's dependencies
+    are still in 'pending' or 'processing' state. e.g.
+    """
+    if row.operation == odl_const.ODL_DELETE:
+        # Check for any pending or processing create or update
+        # ops on this uuid itself
+        if db.check_for_pending_or_processing_ops(
+            session, row.object_uuid, [odl_const.ODL_UPDATE,
+                                       odl_const.ODL_CREATE]):
+            return False
+        # Check for dependent operations
+        if db.check_for_pending_delete_ops_with_parent(
+            session, odl_const.ODL_SUBNET, row.object_uuid):
+            return False
+        if db.check_for_pending_delete_ops_with_parent(
+            session, odl_const.ODL_PORT, row.object_uuid):
+            return False
+        if db.check_for_pending_delete_ops_with_parent(
+            session, odl_const.ODL_ROUTER, row.object_uuid):
+            return False
+    elif (row.operation == odl_const.ODL_UPDATE and
+            not _is_valid_update_operation(session, row)):
+        return False
+    return True
+
+
+def validate_subnet_operation(session, row):
+    """Validate the subnet operation based on dependencies.
+
+    Validate subnet operation depending on whether it's dependencies
+    are still in 'pending' or 'processing' state. e.g.
+    """
+    if row.operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE):
+        network_id = row.data['network_id']
+        # Check for pending or processing network operations
+        if db.check_for_pending_or_processing_ops(session, network_id):
+            return False
+        if (row.operation == odl_const.ODL_UPDATE and
+                not _is_valid_update_operation(session, row)):
+            return False
+    elif row.operation == odl_const.ODL_DELETE:
+        # Check for any pending or processing create or update
+        # ops on this uuid itself
+        if db.check_for_pending_or_processing_ops(
+            session, row.object_uuid, [odl_const.ODL_UPDATE,
+                                       odl_const.ODL_CREATE]):
+            return False
+        # Check for dependent operations
+        if db.check_for_pending_delete_ops_with_parent(
+            session, odl_const.ODL_PORT, row.object_uuid):
+            return False
+
+    return True
+
+
+def validate_port_operation(session, row):
+    """Validate port operation based on dependencies.
+
+    Validate port operation depending on whether it's dependencies
+    are still in 'pending' or 'processing' state. e.g.
+    """
+    if row.operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE):
+        network_id = row.data['network_id']
+        # Check for pending or processing network operations
+        ops = db.check_for_pending_or_processing_ops(session, network_id)
+        # Check for pending subnet operations.
+        for fixed_ip in row.data['fixed_ips']:
+            ip_ops = db.check_for_pending_or_processing_ops(
+                session, fixed_ip['subnet_id'])
+            ops = ops or ip_ops
+
+        if ops:
+            return False
+        if (row.operation == odl_const.ODL_UPDATE and
+                not _is_valid_update_operation(session, row)):
+            return False
+    elif row.operation == odl_const.ODL_DELETE:
+        # Check for any pending or processing create or update
+        # ops on this uuid itself
+        if db.check_for_pending_or_processing_ops(
+            session, row.object_uuid, [odl_const.ODL_UPDATE,
+                                       odl_const.ODL_CREATE]):
+            return False
+
+    return True
+
+
+def validate_router_operation(session, row):
+    """Validate router operation based on dependencies.
+
+    Validate router operation depending on whether it's dependencies
+    are still in 'pending' or 'processing' state.
+    """
+    if row.operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE):
+        if row.data['gw_port_id'] is not None:
+            if db.check_for_pending_or_processing_ops(session,
+                                                      row.data['gw_port_id']):
+                return False
+        if (row.operation == odl_const.ODL_UPDATE and
+                not _is_valid_update_operation(session, row)):
+            return False
+    elif row.operation == odl_const.ODL_DELETE:
+        # Check for any pending or processing create or update
+        # operations on this uuid.
+        if db.check_for_pending_or_processing_ops(session, row.object_uuid,
+                                                  [odl_const.ODL_UPDATE,
+                                                   odl_const.ODL_CREATE]):
+            return False
+
+        # Check that dependent port delete operation has completed.
+        if db.check_for_pending_delete_ops_with_parent(
+            session, odl_const.ODL_PORT, row.object_uuid):
+            return False
+
+        # Check that dependent floatingip delete operation has completed.
+        if db.check_for_pending_delete_ops_with_parent(
+                session, odl_const.ODL_FLOATINGIP, row.object_uuid):
+            return False
+
+        # Check that dependent router interface remove operation has completed.
+        if db.check_for_pending_remove_ops_with_parent(
+                session, row.object_uuid):
+            return False
+
+    return True
+
+
+def validate_floatingip_operation(session, row):
+    """Validate floatingip operation based on dependencies.
+
+    Validate floating IP operation depending on whether it's dependencies
+    are still in 'pending' or 'processing' state.
+    """
+    if row.operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE):
+        network_id = row.data.get('floating_network_id')
+        if network_id is not None:
+            if not db.check_for_pending_or_processing_ops(session, network_id):
+                port_id = row.data.get('port_id')
+                if port_id is not None:
+                    if db.check_for_pending_or_processing_ops(session,
+                                                              port_id):
+                        return False
+            else:
+                return False
+
+        router_id = row.data.get('router_id')
+        if router_id is not None:
+            if db.check_for_pending_or_processing_ops(session, router_id):
+                return False
+        if (row.operation == odl_const.ODL_UPDATE and
+                not _is_valid_update_operation(session, row)):
+            return False
+    elif row.operation == odl_const.ODL_DELETE:
+        # Check for any pending or processing create or update
+        # ops on this uuid itself
+        if db.check_for_pending_or_processing_ops(session, row.object_uuid,
+                                                  [odl_const.ODL_UPDATE,
+                                                   odl_const.ODL_CREATE]):
+            return False
+
+    return True
+
+
+def validate_router_interface_operation(session, row):
+    """Validate router_interface operation based on dependencies.
+
+    Validate router_interface operation depending on whether it's dependencies
+    are still in 'pending' or 'processing' state.
+    """
+    if row.operation == odl_const.ODL_ADD:
+        # Verify that router event has been completed.
+        if db.check_for_pending_or_processing_ops(session, row.data['id']):
+            return False
+
+        # TODO(rcurran): Check for port_id?
+        if db.check_for_pending_or_processing_ops(session,
+                                                  row.data['subnet_id']):
+            return False
+    elif row.operation == odl_const.ODL_REMOVE:
+        if db.check_for_pending_or_processing_add(session, row.data['id'],
+                                                  row.data['subnet_id']):
+            return False
+
+    return True
+
+
+def validate_security_group_operation(session, row):
+    """Validate security_group operation based on dependencies.
+
+    Validate security_group operation depending on whether it's dependencies
+    are still in 'pending' or 'processing' state. e.g.
+    """
+    return True
+
+
+def validate_security_group_rule_operation(session, row):
+    """Validate security_group_rule operation based on dependencies.
+
+    Validate security_group_rule operation depending on whether it's
+    dependencies are still in 'pending' or 'processing' state. e.g.
+    """
+    return True
+
+_VALIDATION_MAP = {
+    odl_const.ODL_NETWORK: validate_network_operation,
+    odl_const.ODL_SUBNET: validate_subnet_operation,
+    odl_const.ODL_PORT: validate_port_operation,
+    odl_const.ODL_ROUTER: validate_router_operation,
+    odl_const.ODL_ROUTER_INTF: validate_router_interface_operation,
+    odl_const.ODL_FLOATINGIP: validate_floatingip_operation,
+    odl_const.ODL_SG: validate_security_group_operation,
+    odl_const.ODL_SG_RULE: validate_security_group_rule_operation,
+}
+
+
+def validate(session, row):
+    """Validate resource dependency in journaled operations.
+
+    :param session: db session
+    :param row: entry in journal entry to be validated
+    """
+    return _VALIDATION_MAP[row.object_type](session, row)
+
+
+def register_validator(object_type, validator):
+    """Register validator function for given resource.
+
+    :param object_type: neutron resource type
+    :param validator: function to be registered which validates resource
+         dependencies
+    """
+    assert object_type not in _VALIDATION_MAP
+    _VALIDATION_MAP[object_type] = validator
diff --git a/networking-odl/networking_odl/journal/full_sync.py b/networking-odl/networking_odl/journal/full_sync.py
new file mode 100644 (file)
index 0000000..dad7215
--- /dev/null
@@ -0,0 +1,114 @@
+#
+# Copyright (C) 2016 Red Hat, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License. You may obtain
+#  a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#  License for the specific language governing permissions and limitations
+#  under the License.
+#
+
+import requests
+
+from neutron import context as neutron_context
+from neutron import manager
+from neutron.plugins.common import constants
+from neutron_lib import constants as l3_constants
+
+from networking_odl.common import client
+from networking_odl.common import constants as odl_const
+from networking_odl.db import db
+
+# Define which pending operation types should be deleted
+_CANARY_NETWORK_ID = "bd8db3a8-2b30-4083-a8b3-b3fd46401142"
+_CANARY_TENANT_ID = "bd8db3a8-2b30-4083-a8b3-b3fd46401142"
+_CANARY_NETWORK_DATA = {'id': _CANARY_NETWORK_ID,
+                        'tenant_id': _CANARY_TENANT_ID,
+                        'name': 'Sync Canary Network',
+                        'admin_state_up': False}
+_OPS_TO_DELETE_ON_SYNC = (odl_const.ODL_CREATE, odl_const.ODL_UPDATE)
+_L2_RESOURCES_TO_SYNC = [(odl_const.ODL_SG, odl_const.ODL_SGS),
+                         (odl_const.ODL_SG_RULE, odl_const.ODL_SG_RULES),
+                         (odl_const.ODL_NETWORK, odl_const.ODL_NETWORKS),
+                         (odl_const.ODL_SUBNET, odl_const.ODL_SUBNETS),
+                         (odl_const.ODL_PORT, odl_const.ODL_PORTS)]
+_L3_RESOURCES_TO_SYNC = [(odl_const.ODL_ROUTER, odl_const.ODL_ROUTERS),
+                         (odl_const.ODL_FLOATINGIP, odl_const.ODL_FLOATINGIPS)]
+_CLIENT = client.OpenDaylightRestClient.create_client()
+
+
+def full_sync(session):
+    if not _full_sync_needed(session):
+        return
+
+    db.delete_pending_rows(session, _OPS_TO_DELETE_ON_SYNC)
+
+    dbcontext = neutron_context.get_admin_context()
+    plugin = manager.NeutronManager.get_plugin()
+    for resource_type, collection_name in _L2_RESOURCES_TO_SYNC:
+        _sync_resources(session, plugin, dbcontext, resource_type,
+                        collection_name)
+
+    l3plugin = manager.NeutronManager.get_service_plugins().get(
+        constants.L3_ROUTER_NAT)
+    for resource_type, collection_name in _L3_RESOURCES_TO_SYNC:
+        _sync_resources(session, l3plugin, dbcontext, resource_type,
+                        collection_name)
+    _sync_router_ports(session, plugin, dbcontext)
+
+    db.create_pending_row(session, odl_const.ODL_NETWORK, _CANARY_NETWORK_ID,
+                          odl_const.ODL_CREATE, _CANARY_NETWORK_DATA)
+
+
+def _full_sync_needed(session):
+    return (_canary_network_missing_on_odl() and
+            _canary_network_not_in_journal(session))
+
+
+def _canary_network_missing_on_odl():
+    # Try to reach the ODL server, sometimes it might be up & responding to
+    # HTTP calls but inoperative..
+    response = _CLIENT.get(odl_const.ODL_NETWORKS)
+    response.raise_for_status()
+
+    response = _CLIENT.get(odl_const.ODL_NETWORKS + "/" + _CANARY_NETWORK_ID)
+    if response.status_code == requests.codes.not_found:
+        return True
+
+    # In case there was an error raise it up because we don't know how to deal
+    # with it..
+    response.raise_for_status()
+    return False
+
+
+def _canary_network_not_in_journal(session):
+    return not db.check_for_pending_or_processing_ops(session,
+                                                      _CANARY_NETWORK_ID,
+                                                      odl_const.ODL_CREATE)
+
+
+def _sync_resources(session, plugin, dbcontext, object_type, collection_name):
+    obj_getter = getattr(plugin, 'get_%s' % collection_name)
+    resources = obj_getter(dbcontext)
+
+    for resource in resources:
+        db.create_pending_row(session, object_type, resource['id'],
+                              odl_const.ODL_CREATE, resource)
+
+
+def _sync_router_ports(session, plugin, dbcontext):
+    filters = {'device_owner': [l3_constants.DEVICE_OWNER_ROUTER_INTF]}
+    router_ports = plugin.get_ports(dbcontext, filters=filters)
+    for port in router_ports:
+        resource = {'subnet_id': port['fixed_ips'][0]['subnet_id'],
+                    'port_id': port['id'],
+                    'id': port['device_id'],
+                    'tenant_id': port['tenant_id']}
+        db.create_pending_row(session, odl_const.ODL_ROUTER_INTF, port['id'],
+                              odl_const.ODL_ADD, resource)
diff --git a/networking-odl/networking_odl/journal/journal.py b/networking-odl/networking_odl/journal/journal.py
new file mode 100644 (file)
index 0000000..ca0d2c2
--- /dev/null
@@ -0,0 +1,220 @@
+# Copyright (c) 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+import threading
+
+from requests import exceptions
+
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from neutron import context as neutron_context
+from neutron.db import api as neutron_db_api
+from neutron import manager
+
+from networking_odl.common import client
+from networking_odl.common import constants as odl_const
+from networking_odl.common import filters
+from networking_odl._i18n import _LI, _LE
+from networking_odl.db import db
+from networking_odl.journal import dependency_validations
+
+
+LOG = logging.getLogger(__name__)
+
+
+def call_thread_on_end(func):
+    def new_func(obj, *args, **kwargs):
+        return_value = func(obj, *args, **kwargs)
+        obj.journal.set_sync_event()
+        return return_value
+    return new_func
+
+
+def _enrich_port(db_session, context, object_type, operation, data):
+    """Enrich the port with additional information needed by ODL"""
+    if context:
+        plugin = context._plugin
+        dbcontext = context._plugin_context
+    else:
+        dbcontext = neutron_context.get_admin_context()
+        plugin = manager.NeutronManager.get_plugin()
+
+    groups = [plugin.get_security_group(dbcontext, sg)
+              for sg in data['security_groups']]
+    new_data = copy.deepcopy(data)
+    new_data['security_groups'] = groups
+
+    # NOTE(yamahata): work around for port creation for router
+    # tenant_id=''(empty string) is passed when port is created
+    # by l3 plugin internally for router.
+    # On the other hand, ODL doesn't accept empty string for tenant_id.
+    # In that case, deduce tenant_id from network_id for now.
+    # Right fix: modify Neutron so that don't allow empty string
+    # for tenant_id even for port for internal use.
+    # TODO(yamahata): eliminate this work around when neutron side
+    # is fixed
+    # assert port['tenant_id'] != ''
+    if ('tenant_id' not in new_data or new_data['tenant_id'] == ''):
+        if context:
+            tenant_id = context._network_context._network['tenant_id']
+        else:
+            network = plugin.get_network(dbcontext, new_data['network_id'])
+            tenant_id = network['tenant_id']
+        new_data['tenant_id'] = tenant_id
+
+    return new_data
+
+
+def record(db_session, object_type, object_uuid, operation, data,
+           context=None):
+    if (object_type == odl_const.ODL_PORT and
+            operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE)):
+        data = _enrich_port(db_session, context, object_type, operation, data)
+
+    db.create_pending_row(db_session, object_type, object_uuid, operation,
+                          data)
+
+
+class OpendaylightJournalThread(object):
+    """Thread worker for the Opendaylight Journal Database."""
+    def __init__(self):
+        self.client = client.OpenDaylightRestClient.create_client()
+        self._odl_sync_timeout = cfg.CONF.ml2_odl.sync_timeout
+        self._row_retry_count = cfg.CONF.ml2_odl.retry_count
+        self.event = threading.Event()
+        self.lock = threading.Lock()
+        self._odl_sync_thread = self.start_odl_sync_thread()
+        self._start_sync_timer()
+
+    def start_odl_sync_thread(self):
+        # Start the sync thread
+        LOG.debug("Starting a new sync thread")
+        odl_sync_thread = threading.Thread(
+            name='sync',
+            target=self.run_sync_thread)
+        odl_sync_thread.start()
+        return odl_sync_thread
+
+    def set_sync_event(self):
+        # Prevent race when starting the timer
+        with self.lock:
+            LOG.debug("Resetting thread timer")
+            self._timer.cancel()
+            self._start_sync_timer()
+        self.event.set()
+
+    def _start_sync_timer(self):
+        self._timer = threading.Timer(self._odl_sync_timeout,
+                                      self.set_sync_event)
+        self._timer.start()
+
+    def _json_data(self, row):
+        data = copy.deepcopy(row.data)
+        filters.filter_for_odl(row.object_type, row.operation, data)
+        url_object = row.object_type.replace('_', '-')
+
+        if row.operation == odl_const.ODL_CREATE:
+            method = 'post'
+            urlpath = url_object + 's'
+            to_send = {row.object_type: data}
+        elif row.operation == odl_const.ODL_UPDATE:
+            method = 'put'
+            urlpath = url_object + 's/' + row.object_uuid
+            to_send = {row.object_type: data}
+        elif row.operation == odl_const.ODL_DELETE:
+            method = 'delete'
+            urlpath = url_object + 's/' + row.object_uuid
+            to_send = None
+        elif row.operation == odl_const.ODL_ADD:
+            method = 'put'
+            urlpath = 'routers/' + data['id'] + '/add_router_interface'
+            to_send = data
+        elif row.operation == odl_const.ODL_REMOVE:
+            method = 'put'
+            urlpath = 'routers/' + data['id'] + '/remove_router_interface'
+            to_send = data
+
+        return method, urlpath, to_send
+
+    def run_sync_thread(self, exit_after_run=False):
+        while True:
+            try:
+                self.event.wait()
+                self.event.clear()
+
+                session = neutron_db_api.get_session()
+                self._sync_pending_rows(session, exit_after_run)
+
+                LOG.debug("Clearing sync thread event")
+                if exit_after_run:
+                    # Permanently waiting thread model breaks unit tests
+                    # Adding this arg to exit here only for unit tests
+                    break
+            except Exception:
+                # Catch exceptions to protect the thread while running
+                LOG.exception(_LE("Error on run_sync_thread"))
+
+    def _sync_pending_rows(self, session, exit_after_run):
+        while True:
+            LOG.debug("Thread walking database")
+            row = db.get_oldest_pending_db_row_with_lock(session)
+            if not row:
+                LOG.debug("No rows to sync")
+                break
+
+            # Validate the operation
+            valid = dependency_validations.validate(session, row)
+            if not valid:
+                LOG.info(_LI("%(operation)s %(type)s %(uuid)s is not a "
+                             "valid operation yet, skipping for now"),
+                         {'operation': row.operation,
+                          'type': row.object_type,
+                          'uuid': row.object_uuid})
+
+                # Set row back to pending.
+                db.update_db_row_state(session, row, odl_const.PENDING)
+                if exit_after_run:
+                    break
+                continue
+
+            LOG.info(_LI("Syncing %(operation)s %(type)s %(uuid)s"),
+                     {'operation': row.operation, 'type': row.object_type,
+                      'uuid': row.object_uuid})
+
+            # Add code to sync this to ODL
+            method, urlpath, to_send = self._json_data(row)
+
+            try:
+                self.client.sendjson(method, urlpath, to_send)
+                db.update_db_row_state(session, row, odl_const.COMPLETED)
+            except exceptions.ConnectionError as e:
+                # Don't raise the retry count, just log an error
+                LOG.error(_LE("Cannot connect to the Opendaylight Controller"))
+                # Set row back to pending
+                db.update_db_row_state(session, row, odl_const.PENDING)
+                # Break our of the loop and retry with the next
+                # timer interval
+                break
+            except Exception as e:
+                LOG.error(_LE("Error syncing %(type)s %(operation)s,"
+                              " id %(uuid)s Error: %(error)s"),
+                          {'type': row.object_type,
+                           'uuid': row.object_uuid,
+                           'operation': row.operation,
+                           'error': e.message})
+                db.update_pending_db_row_retry(session, row,
+                                               self._row_retry_count)
diff --git a/networking-odl/networking_odl/journal/maintenance.py b/networking-odl/networking_odl/journal/maintenance.py
new file mode 100644 (file)
index 0000000..7fb82a0
--- /dev/null
@@ -0,0 +1,73 @@
+#
+# Copyright (C) 2016 Red Hat, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License. You may obtain
+#  a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#  License for the specific language governing permissions and limitations
+#  under the License.
+#
+
+from neutron.db import api as neutron_db_api
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_service import loopingcall
+
+from networking_odl._i18n import _LI, _LE
+from networking_odl.db import db
+
+
+LOG = logging.getLogger(__name__)
+
+
+class MaintenanceThread(object):
+    def __init__(self):
+        self.timer = loopingcall.FixedIntervalLoopingCall(self.execute_ops)
+        self.maintenance_interval = cfg.CONF.ml2_odl.maintenance_interval
+        self.maintenance_ops = []
+
+    def start(self):
+        self.timer.start(self.maintenance_interval, stop_on_exception=False)
+
+    def _execute_op(self, operation, session):
+        op_details = operation.__name__
+        if operation.__doc__:
+            op_details += " (%s)" % operation.func_doc
+
+        try:
+            LOG.info(_LI("Starting maintenance operation %s."), op_details)
+            db.update_maintenance_operation(session, operation=operation)
+            operation(session=session)
+            LOG.info(_LI("Finished maintenance operation %s."), op_details)
+        except Exception:
+            LOG.exception(_LE("Failed during maintenance operation %s."),
+                          op_details)
+
+    def execute_ops(self):
+        LOG.info(_LI("Starting journal maintenance run."))
+        session = neutron_db_api.get_session()
+        if not db.lock_maintenance(session):
+            LOG.info(_LI("Maintenance already running, aborting."))
+            return
+
+        try:
+            for operation in self.maintenance_ops:
+                self._execute_op(operation, session)
+        finally:
+            db.update_maintenance_operation(session, operation=None)
+            db.unlock_maintenance(session)
+            LOG.info(_LI("Finished journal maintenance run."))
+
+    def register_operation(self, f):
+        """Register a function to be run by the maintenance thread.
+
+        :param f: Function to call when the thread runs. The function will
+        receive a DB session to use for DB operations.
+        """
+        self.maintenance_ops.append(f)
diff --git a/networking-odl/networking_odl/l2gateway/__init__.py b/networking-odl/networking_odl/l2gateway/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/networking_odl/l2gateway/driver.py b/networking-odl/networking_odl/l2gateway/driver.py
new file mode 100644 (file)
index 0000000..d1fd5bb
--- /dev/null
@@ -0,0 +1,121 @@
+# Copyright (c) 2016 Ericsson India Global Service Pvt Ltd.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import abc
+import copy
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import excutils
+import six
+
+from networking_l2gw.services.l2gateway.common import constants
+from networking_l2gw.services.l2gateway import service_drivers
+from networking_odl._i18n import _LE, _LI
+from networking_odl.common import client as odl_client
+
+cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
+
+LOG = logging.getLogger(__name__)
+
+L2GATEWAYS = 'l2-gateways'
+L2GATEWAY_CONNECTIONS = 'l2gateway-connections'
+
+
+@six.add_metaclass(abc.ABCMeta)
+class OpenDaylightL2gwDriver(service_drivers.L2gwDriver):
+    """Opendaylight L2Gateway Service Driver
+
+    This code is the openstack driver for exciting the OpenDaylight L2GW
+    facility.
+    """
+
+    def __init__(self, service_plugin, validator=None):
+        super(OpenDaylightL2gwDriver, self).__init__(service_plugin, validator)
+        self.service_plugin = service_plugin
+        self.client = odl_client.OpenDaylightRestClient.create_client()
+        LOG.info(_LI("ODL: Started OpenDaylight L2Gateway driver"))
+
+    @property
+    def service_type(self):
+        return constants.L2GW
+
+    def create_l2_gateway_postcommit(self, context, l2_gateway):
+        LOG.info(_LI("ODL: Create L2Gateway %(l2gateway)s"),
+                 {'l2gateway': l2_gateway})
+        request = {'l2_gateway': l2_gateway}
+        try:
+            self.client.sendjson('post', L2GATEWAYS, request)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                LOG.exception(_LE("ODL: L2Gateway create"
+                                  " failed for gateway %(l2gatewayid)s"),
+                              {'l2gatewayid': l2_gateway['id']})
+
+    def delete_l2_gateway_postcommit(self, context, l2_gateway_id):
+        LOG.info(_LI("ODL: Delete L2Gateway %(l2gatewayid)s"),
+                 {'l2gatewayid': l2_gateway_id})
+        url = L2GATEWAYS + '/' + l2_gateway_id
+        try:
+            self.client.try_delete(url)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                LOG.exception(_LE("ODL: L2Gateway delete"
+                                  " failed for gateway_id %(l2gatewayid)s"),
+                              {'l2gatewayid': l2_gateway_id})
+
+    def update_l2_gateway_postcommit(self, context, l2_gateway):
+        LOG.info(_LI("ODL: Update L2Gateway %(l2gateway)s"),
+                 {'l2gateway': l2_gateway})
+        request = {'l2_gateway': l2_gateway}
+        url = L2GATEWAYS + '/' + l2_gateway['id']
+        try:
+            self.client.sendjson('put', url, request)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                LOG.exception(_LE("ODL: L2Gateway update"
+                                  " failed for gateway %(l2gatewayid)s"),
+                              {'l2gatewayid': l2_gateway['id']})
+
+    def create_l2_gateway_connection_postcommit(self, context,
+                                                l2_gateway_connection):
+        LOG.info(_LI("ODL: Create L2Gateway connection %(l2gwconn)s"),
+                 {'l2gwconn': l2_gateway_connection})
+        odl_l2_gateway_connection = copy.deepcopy(l2_gateway_connection)
+        odl_l2_gateway_connection['gateway_id'] = (
+            l2_gateway_connection['l2_gateway_id'])
+        odl_l2_gateway_connection.pop('l2_gateway_id')
+        request = {'l2gateway_connection': odl_l2_gateway_connection}
+        try:
+            self.client.sendjson('post', L2GATEWAY_CONNECTIONS, request)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                LOG.exception(_LE("ODL: L2Gateway connection create"
+                                  " failed for gateway %(l2gwconnid)s"),
+                              {'l2gwconnid':
+                               l2_gateway_connection['l2_gateway_id']})
+
+    def delete_l2_gateway_connection_postcommit(self, context,
+                                                l2_gateway_connection_id):
+        LOG.info(_LI("ODL: Delete L2Gateway connection %(l2gwconnid)s"),
+                 {'l2gwconnid': l2_gateway_connection_id})
+        url = L2GATEWAY_CONNECTIONS + '/' + l2_gateway_connection_id
+        try:
+            self.client.try_delete(url)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                LOG.exception(_LE("ODL: L2Gateway connection delete"
+                                  " failed for connection %(l2gwconnid)s"),
+                              {'l2gwconnid': l2_gateway_connection_id})
diff --git a/networking-odl/networking_odl/l3/__init__.py b/networking-odl/networking_odl/l3/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/networking_odl/l3/l3_odl.py b/networking-odl/networking_odl/l3/l3_odl.py
new file mode 100644 (file)
index 0000000..e06e335
--- /dev/null
@@ -0,0 +1,189 @@
+#
+# Copyright (C) 2013 Red Hat, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License. You may obtain
+#  a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#  License for the specific language governing permissions and limitations
+#  under the License.
+#
+
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
+from neutron.api.rpc.handlers import l3_rpc
+from neutron.common import rpc as n_rpc
+from neutron.common import topics
+from neutron.db import extraroute_db
+from neutron.db import l3_agentschedulers_db
+from neutron.db import l3_dvr_db
+from neutron.db import l3_gwmode_db
+from neutron.plugins.common import constants
+from neutron_lib import constants as q_const
+
+from networking_odl.common import client as odl_client
+from networking_odl.common import utils as odl_utils
+
+try:
+    from neutron.db.db_base_plugin_v2 import common_db_mixin
+except ImportError as e:
+    # the change set ofece8cc2e9aae1610a325d0c206e38da3da9a0a1a
+    # the Change-Id of I1eac61c258541bca80e14be4b7c75519a014ffae
+    # db_base_plugin_v2.common_db_mixin was removed
+    from neutron.db import common_db_mixin
+
+
+cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
+LOG = logging.getLogger(__name__)
+ROUTERS = 'routers'
+FLOATINGIPS = 'floatingips'
+
+
+class OpenDaylightL3RouterPlugin(
+    common_db_mixin.CommonDbMixin,
+    extraroute_db.ExtraRoute_db_mixin,
+    l3_dvr_db.L3_NAT_with_dvr_db_mixin,
+    l3_gwmode_db.L3_NAT_db_mixin,
+    l3_agentschedulers_db.L3AgentSchedulerDbMixin):
+
+    """Implementation of the OpenDaylight L3 Router Service Plugin.
+
+    This class implements a L3 service plugin that provides
+    router and floatingip resources and manages associated
+    request/response.
+    """
+    supported_extension_aliases = ["dvr", "router", "ext-gw-mode",
+                                   "extraroute"]
+
+    def __init__(self):
+        self.setup_rpc()
+        self.client = odl_client.OpenDaylightRestClient.create_client()
+
+    def setup_rpc(self):
+        self.topic = topics.L3PLUGIN
+        self.conn = n_rpc.create_connection()
+        self.agent_notifiers.update(
+            {q_const.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()})
+        self.endpoints = [l3_rpc.L3RpcCallback()]
+        self.conn.create_consumer(self.topic, self.endpoints,
+                                  fanout=False)
+        self.conn.consume_in_threads()
+
+    def get_plugin_type(self):
+        return constants.L3_ROUTER_NAT
+
+    def get_plugin_description(self):
+        """returns string description of the plugin."""
+        return ("L3 Router Service Plugin for basic L3 forwarding"
+                " using OpenDaylight")
+
+    def filter_update_router_attributes(self, router):
+        """Filter out router attributes for an update operation."""
+        odl_utils.try_del(router, ['id', 'tenant_id', 'status'])
+
+    def create_router(self, context, router):
+        router_dict = super(OpenDaylightL3RouterPlugin, self).create_router(
+            context, router)
+        url = ROUTERS
+        self.client.sendjson('post', url, {ROUTERS[:-1]: router_dict})
+        return router_dict
+
+    def update_router(self, context, id, router):
+        router_dict = super(OpenDaylightL3RouterPlugin, self).update_router(
+            context, id, router)
+        url = ROUTERS + "/" + id
+        resource = router_dict.copy()
+        self.filter_update_router_attributes(resource)
+        self.client.sendjson('put', url, {ROUTERS[:-1]: resource})
+        return router_dict
+
+    def delete_router(self, context, id):
+        super(OpenDaylightL3RouterPlugin, self).delete_router(context, id)
+        url = ROUTERS + "/" + id
+        self.client.sendjson('delete', url, None)
+
+    def create_floatingip(self, context, floatingip,
+                          initial_status=q_const.FLOATINGIP_STATUS_ACTIVE):
+        fip_dict = super(OpenDaylightL3RouterPlugin, self).create_floatingip(
+            context, floatingip, initial_status)
+        url = FLOATINGIPS
+        self.client.sendjson('post', url, {FLOATINGIPS[:-1]: fip_dict})
+        return fip_dict
+
+    def update_floatingip(self, context, id, floatingip):
+        with context.session.begin(subtransactions=True):
+            fip_dict = super(OpenDaylightL3RouterPlugin,
+                             self).update_floatingip(context, id, floatingip)
+            # Update status based on association
+            if fip_dict.get('port_id') is None:
+                fip_dict['status'] = q_const.FLOATINGIP_STATUS_DOWN
+            else:
+                fip_dict['status'] = q_const.FLOATINGIP_STATUS_ACTIVE
+            self.update_floatingip_status(context, id, fip_dict['status'])
+
+        url = FLOATINGIPS + "/" + id
+        self.client.sendjson('put', url, {FLOATINGIPS[:-1]: fip_dict})
+        return fip_dict
+
+    def delete_floatingip(self, context, id):
+        super(OpenDaylightL3RouterPlugin, self).delete_floatingip(context, id)
+        url = FLOATINGIPS + "/" + id
+        self.client.sendjson('delete', url, None)
+
+    def add_router_interface(self, context, router_id, interface_info):
+        new_router = super(
+            OpenDaylightL3RouterPlugin, self).add_router_interface(
+                context, router_id, interface_info)
+        url = ROUTERS + "/" + router_id + "/add_router_interface"
+        router_dict = self._generate_router_dict(router_id, interface_info,
+                                                 new_router)
+        self.client.sendjson('put', url, router_dict)
+        return new_router
+
+    def remove_router_interface(self, context, router_id, interface_info):
+        new_router = super(
+            OpenDaylightL3RouterPlugin, self).remove_router_interface(
+                context, router_id, interface_info)
+        url = ROUTERS + "/" + router_id + "/remove_router_interface"
+        router_dict = self._generate_router_dict(router_id, interface_info,
+                                                 new_router)
+        self.client.sendjson('put', url, router_dict)
+        return new_router
+
+    def _generate_router_dict(self, router_id, interface_info, new_router):
+        # Get network info for the subnet that is being added to the router.
+        # Check if the interface information is by port-id or subnet-id
+        add_by_port, add_by_sub = self._validate_interface_info(interface_info)
+        if add_by_sub:
+            _port_id = new_router['port_id']
+            _subnet_id = interface_info['subnet_id']
+        elif add_by_port:
+            _port_id = interface_info['port_id']
+            _subnet_id = new_router['subnet_id']
+
+        router_dict = {'subnet_id': _subnet_id,
+                       'port_id': _port_id,
+                       'id': router_id,
+                       'tenant_id': new_router['tenant_id']}
+
+        return router_dict
+
+    dvr_deletens_if_no_port_warned = False
+
+    def dvr_deletens_if_no_port(self, context, port_id):
+        # TODO(yamahata): implement this method or delete this logging
+        # For now, this is defined to avoid attribute exception
+        # Since ODL L3 does not create namespaces, this is always going to
+        # be a noop. When it is confirmed, delete this comment and logging
+        if not self.dvr_deletens_if_no_port_warned:
+            LOG.debug('dvr is not suported yet. '
+                      'this method needs to be implemented')
+            self.dvr_deletens_if_no_port_warned = True
+        return []
diff --git a/networking-odl/networking_odl/l3/l3_odl_v2.py b/networking-odl/networking_odl/l3/l3_odl_v2.py
new file mode 100644 (file)
index 0000000..2732ea6
--- /dev/null
@@ -0,0 +1,206 @@
+#  Copyright (c) 2016 OpenStack Foundation
+#  All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License. You may obtain
+#  a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#  License for the specific language governing permissions and limitations
+#  under the License.
+#
+
+from oslo_log import log as logging
+
+from neutron.db import api as db_api
+from neutron.db import common_db_mixin
+from neutron.db import extraroute_db
+from neutron.db import l3_agentschedulers_db
+from neutron.db import l3_dvr_db
+from neutron.db import l3_gwmode_db
+from neutron.plugins.common import constants
+from neutron_lib import constants as q_const
+
+from networking_odl.common import config  # noqa
+from networking_odl.common import constants as odl_const
+from networking_odl.db import db
+from networking_odl.journal import journal
+
+LOG = logging.getLogger(__name__)
+
+
+class OpenDaylightL3RouterPlugin(
+    common_db_mixin.CommonDbMixin,
+    extraroute_db.ExtraRoute_db_mixin,
+    l3_dvr_db.L3_NAT_with_dvr_db_mixin,
+    l3_gwmode_db.L3_NAT_db_mixin,
+    l3_agentschedulers_db.L3AgentSchedulerDbMixin):
+
+    """Implementation of the OpenDaylight L3 Router Service Plugin.
+
+    This class implements a L3 service plugin that provides
+    router and floatingip resources and manages associated
+    request/response.
+    """
+    supported_extension_aliases = ["dvr", "router", "ext-gw-mode",
+                                   "extraroute"]
+
+    def __init__(self):
+        super(OpenDaylightL3RouterPlugin, self).__init__()
+
+        # TODO(rcurran): Continue investigation into how many journal threads
+        # to run per neutron controller deployment.
+        self.journal = journal.OpendaylightJournalThread()
+
+    def get_plugin_type(self):
+        return constants.L3_ROUTER_NAT
+
+    def get_plugin_description(self):
+        """Returns string description of the plugin."""
+        return ("L3 Router Service Plugin for basic L3 forwarding "
+                "using OpenDaylight.")
+
+    @journal.call_thread_on_end
+    def create_router(self, context, router):
+        session = db_api.get_session()
+        with session.begin(subtransactions=True):
+            router_dict = super(
+                OpenDaylightL3RouterPlugin, self).create_router(context,
+                                                                router)
+            db.create_pending_row(context.session, odl_const.ODL_ROUTER,
+                                  router_dict['id'], odl_const.ODL_CREATE,
+                                  router_dict)
+        return router_dict
+
+    @journal.call_thread_on_end
+    def update_router(self, context, router_id, router):
+        session = db_api.get_session()
+        with session.begin(subtransactions=True):
+            router_dict = super(
+                OpenDaylightL3RouterPlugin, self).update_router(
+                    context, router_id, router)
+            db.create_pending_row(context.session, odl_const.ODL_ROUTER,
+                                  router_id, odl_const.ODL_UPDATE, router_dict)
+        return router_dict
+
+    @journal.call_thread_on_end
+    def delete_router(self, context, router_id):
+        session = db_api.get_session()
+        router_dict = self.get_router(context, router_id)
+        dependency_list = [router_dict['gw_port_id']]
+        with session.begin(subtransactions=True):
+            super(OpenDaylightL3RouterPlugin, self).delete_router(context,
+                                                                  router_id)
+            db.create_pending_row(context.session, odl_const.ODL_ROUTER,
+                                  router_id, odl_const.ODL_DELETE,
+                                  dependency_list)
+
+    @journal.call_thread_on_end
+    def create_floatingip(self, context, floatingip,
+                          initial_status=q_const.FLOATINGIP_STATUS_ACTIVE):
+        session = db_api.get_session()
+        with session.begin(subtransactions=True):
+            fip_dict = super(
+                OpenDaylightL3RouterPlugin, self).create_floatingip(
+                    context, floatingip, initial_status)
+            db.create_pending_row(context.session, odl_const.ODL_FLOATINGIP,
+                                  fip_dict['id'], odl_const.ODL_CREATE,
+                                  fip_dict)
+        return fip_dict
+
+    @journal.call_thread_on_end
+    def update_floatingip(self, context, floatingip_id, floatingip):
+        session = db_api.get_session()
+        with session.begin(subtransactions=True):
+            fip_dict = super(
+                OpenDaylightL3RouterPlugin, self).update_floatingip(
+                    context, floatingip_id, floatingip)
+
+            # Update status based on association
+            if fip_dict.get('port_id') is None:
+                fip_dict['status'] = q_const.FLOATINGIP_STATUS_DOWN
+            else:
+                fip_dict['status'] = q_const.FLOATINGIP_STATUS_ACTIVE
+            self.update_floatingip_status(context, floatingip_id,
+                                          fip_dict['status'])
+
+            db.create_pending_row(context.session, odl_const.ODL_FLOATINGIP,
+                                  floatingip_id, odl_const.ODL_UPDATE,
+                                  fip_dict)
+        return fip_dict
+
+    @journal.call_thread_on_end
+    def delete_floatingip(self, context, floatingip_id):
+        session = db_api.get_session()
+        floatingip_dict = self.get_floatingip(context, floatingip_id)
+        dependency_list = [floatingip_dict['router_id']]
+        dependency_list.append(floatingip_dict['floating_network_id'])
+        with session.begin(subtransactions=True):
+            super(OpenDaylightL3RouterPlugin, self).delete_floatingip(
+                context, floatingip_id)
+            db.create_pending_row(context.session, odl_const.ODL_FLOATINGIP,
+                                  floatingip_id, odl_const.ODL_DELETE,
+                                  dependency_list)
+
+    @journal.call_thread_on_end
+    def add_router_interface(self, context, router_id, interface_info):
+        session = db_api.get_session()
+        with session.begin(subtransactions=True):
+            new_router = super(
+                OpenDaylightL3RouterPlugin, self).add_router_interface(
+                    context, router_id, interface_info)
+            router_dict = self._generate_router_dict(router_id, interface_info,
+                                                     new_router)
+            db.create_pending_row(context.session, odl_const.ODL_ROUTER_INTF,
+                                  odl_const.ODL_UUID_NOT_USED,
+                                  odl_const.ODL_ADD, router_dict)
+        return new_router
+
+    @journal.call_thread_on_end
+    def remove_router_interface(self, context, router_id, interface_info):
+        session = db_api.get_session()
+        with session.begin(subtransactions=True):
+            new_router = super(
+                OpenDaylightL3RouterPlugin, self).remove_router_interface(
+                    context, router_id, interface_info)
+            router_dict = self._generate_router_dict(router_id, interface_info,
+                                                     new_router)
+            db.create_pending_row(context.session, odl_const.ODL_ROUTER_INTF,
+                                  odl_const.ODL_UUID_NOT_USED,
+                                  odl_const.ODL_REMOVE, router_dict)
+        return new_router
+
+    def _generate_router_dict(self, router_id, interface_info, new_router):
+        # Get network info for the subnet that is being added to the router.
+        # Check if the interface information is by port-id or subnet-id.
+        add_by_port, add_by_sub = self._validate_interface_info(interface_info)
+        if add_by_sub:
+            _port_id = new_router['port_id']
+            _subnet_id = interface_info['subnet_id']
+        elif add_by_port:
+            _port_id = interface_info['port_id']
+            _subnet_id = new_router['subnet_id']
+
+        router_dict = {'subnet_id': _subnet_id,
+                       'port_id': _port_id,
+                       'id': router_id,
+                       'tenant_id': new_router['tenant_id']}
+
+        return router_dict
+
+    dvr_deletens_if_no_port_warned = False
+
+    def dvr_deletens_if_no_port(self, context, port_id):
+        # TODO(yamahata): implement this method or delete this logging
+        # For now, this is defined to avoid attribute exception
+        # Since ODL L3 does not create namespaces, this is always going to
+        # be a noop. When it is confirmed, delete this comment and logging
+        if not self.dvr_deletens_if_no_port_warned:
+            LOG.debug('dvr is not suported yet. '
+                      'this method needs to be implemented')
+            self.dvr_deletens_if_no_port_warned = True
+        return []
diff --git a/networking-odl/networking_odl/lbaas/__init__.py b/networking-odl/networking_odl/lbaas/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/networking_odl/lbaas/driver_v1.py b/networking-odl/networking_odl/lbaas/driver_v1.py
new file mode 100644 (file)
index 0000000..aaf3dcf
--- /dev/null
@@ -0,0 +1,125 @@
+#
+# Copyright (C) 2013 Red Hat, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License. You may obtain
+#  a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#  License for the specific language governing permissions and limitations
+#  under the License.
+#
+
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from neutron_lbaas.services.loadbalancer.drivers import abstract_driver
+
+from networking_odl.common import client as odl_client
+from networking_odl.common import constants as odl_const
+
+cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
+LOG = logging.getLogger(__name__)
+LBAAS = "lbaas"
+POOLS_URL_PATH = LBAAS + '/' + odl_const.ODL_POOLS
+HEALTHMONITORS_URL_PATH = LBAAS + '/' + odl_const.ODL_HEALTHMONITORS
+
+
+class OpenDaylightLbaasDriverV1(abstract_driver.LoadBalancerAbstractDriver):
+
+    """OpenDaylight LBaaS Driver for the V1 API
+
+    This code is the backend implementation for the OpenDaylight
+    LBaaS V1 driver for OpenStack Neutron.
+    """
+
+    def __init__(self, plugin):
+        LOG.debug("Initializing OpenDaylight LBaaS driver")
+        self.plugin = plugin
+        self.client = odl_client.OpenDaylightRestClient.create_client()
+
+    def create_vip(self, context, vip):
+        """Create a vip on the OpenDaylight Controller.
+
+        No code related to vip in the OpenDayLight neutronNorthbound,
+        so pass this method.
+        """
+        pass
+
+    def update_vip(self, context, old_vip, vip):
+        """Update a vip on the OpenDaylight Controller.
+
+        No code related to vip in the OpenDayLight neutronNorthbound,
+        so pass this method.
+        """
+        pass
+
+    def delete_vip(self, context, vip):
+        """Delete a vip on the OpenDaylight Controller.
+
+        No code related to vip in the OpenDayLight neutronNorthbound,
+        so pass this method.
+        """
+        pass
+
+    def create_pool(self, context, pool):
+        """Create a pool on the OpenDaylight Controller."""
+        url = POOLS_URL_PATH
+        self.client.sendjson('post', url, {odl_const.ODL_POOL: pool})
+
+    def update_pool(self, context, old_pool, pool):
+        """Update a pool on the OpenDaylight Controller."""
+        url = POOLS_URL_PATH + "/" + old_pool['id']
+        self.client.sendjson('put', url, {odl_const.ODL_POOL: pool})
+
+    def delete_pool(self, context, pool):
+        """Delete a pool on the OpenDaylight Controller."""
+        url = POOLS_URL_PATH + "/" + pool['id']
+        self.client.sendjson('delete', url, None)
+
+    def create_member(self, context, member):
+        """Create a pool member on the OpenDaylight Controller."""
+        url = (
+            POOLS_URL_PATH + '/' + member['pool_id'] +
+            '/' + odl_const.ODL_MEMBERS)
+        self.client.sendjson('post', url, {odl_const.ODL_MEMBER: member})
+
+    def update_member(self, context, old_member, member):
+        """Update a pool member on the OpenDaylight Controller."""
+        url = (
+            POOLS_URL_PATH + '/' + member['pool_id'] +
+            '/' + odl_const.ODL_MEMBERS + "/" + old_member['id'])
+        self.client.sendjson('put', url, {odl_const.ODL_MEMBER: member})
+
+    def delete_member(self, context, member):
+        """Delete a pool member on the OpenDaylight Controller."""
+        url = (
+            POOLS_URL_PATH + '/' + member['pool_id'] +
+            '/' + odl_const.ODL_MEMBERS + "/" + member['id'])
+        self.client.sendjson('delete', url, None)
+
+    def create_pool_health_monitor(self, context, health_monitor, pool_id):
+        """Create a pool health monitor on the OpenDaylight Controller."""
+        url = HEALTHMONITORS_URL_PATH
+        self.client.sendjson(
+            'post', url, {odl_const.ODL_HEALTHMONITOR: health_monitor})
+
+    def update_pool_health_monitor(self, context, old_health_monitor,
+                                   health_monitor, pool_id):
+        """Update a pool health monitor on the OpenDaylight Controller."""
+        url = HEALTHMONITORS_URL_PATH + "/" + old_health_monitor['id']
+        self.client.sendjson(
+            'put', url, {odl_const.ODL_HEALTHMONITOR: health_monitor})
+
+    def delete_pool_health_monitor(self, context, health_monitor, pool_id):
+        """Delete a pool health monitor on the OpenDaylight Controller."""
+        url = HEALTHMONITORS_URL_PATH + "/" + health_monitor['id']
+        self.client.sendjson('delete', url, None)
+
+    def stats(self, context, pool_id):
+        """Retrieve pool statistics from the OpenDaylight Controller."""
+        pass
diff --git a/networking-odl/networking_odl/lbaas/driver_v2.py b/networking-odl/networking_odl/lbaas/driver_v2.py
new file mode 100644 (file)
index 0000000..720a5c2
--- /dev/null
@@ -0,0 +1,126 @@
+#
+# Copyright (C) 2013 Red Hat, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License. You may obtain
+#  a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#  License for the specific language governing permissions and limitations
+#  under the License.
+#
+
+from oslo_config import cfg
+from oslo_log import helpers as log_helpers
+from oslo_log import log as logging
+
+from neutron_lbaas.drivers import driver_base
+
+from networking_odl.common import client as odl_client
+from networking_odl.common import constants as odl_const
+
+cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
+LOG = logging.getLogger(__name__)
+
+LBAAS = "lbaas"
+
+
+class OpenDaylightLbaasDriverV2(driver_base.LoadBalancerBaseDriver):
+
+    @log_helpers.log_method_call
+    def __init__(self, plugin):
+        LOG.debug("Initializing OpenDaylight LBaaS driver")
+        self.plugin = plugin
+        self.client = odl_client.OpenDaylightRestClient.create_client()
+        self._loadbalancer = ODLLoadBalancerManager(self.client)
+        self._listener = ODLListenerManager(self.client)
+        self._pool = ODLPoolManager(self.client)
+        self._member = ODLMemberManager(self.client)
+        self._healthmonitor = ODLHealthMonitorManager(self.client)
+
+
+class OpenDaylightManager(object):
+
+    out_of_sync = True
+    url_path = ""
+    obj_type = ""
+
+    """OpenDaylight LBaaS Driver for the V2 API
+
+    This code is the backend implementation for the OpenDaylight
+    LBaaS V2 driver for OpenStack Neutron.
+    """
+
+    @log_helpers.log_method_call
+    def __init__(self, client):
+        self.client = client
+        self.url_path = LBAAS + self.obj_type
+
+    @log_helpers.log_method_call
+    def create(self, context, obj):
+        self.client.sendjson('post', self.url_path, None)
+
+    @log_helpers.log_method_call
+    def update(self, context, obj):
+        self.client.sendjson('put', self.url_path + '/' + obj.id, None)
+
+    @log_helpers.log_method_call
+    def delete(self, context, obj):
+        self.client.sendjson('delete', self.url_path + '/' + obj.id, None)
+
+
+class ODLLoadBalancerManager(OpenDaylightManager,
+                             driver_base.BaseLoadBalancerManager):
+
+    @log_helpers.log_method_call
+    def __init__(self, client):
+        self.obj_type = odl_const.ODL_LOADBALANCERS
+        super(ODLLoadBalancerManager, self).__init__(client)
+
+    @log_helpers.log_method_call
+    def refresh(self, context, lb):
+        pass
+
+    @log_helpers.log_method_call
+    def stats(self, context, lb):
+        pass
+
+
+class ODLListenerManager(OpenDaylightManager,
+                         driver_base.BaseListenerManager):
+
+    @log_helpers.log_method_call
+    def __init__(self, client):
+        self.obj_type = odl_const.ODL_LISTENERS
+        super(ODLListenerManager, self).__init__(client)
+
+
+class ODLPoolManager(OpenDaylightManager,
+                     driver_base.BasePoolManager):
+
+    @log_helpers.log_method_call
+    def __init__(self, client):
+        self.obj_type = odl_const.ODL_POOLS
+        super(ODLPoolManager, self).__init__(client)
+
+
+class ODLMemberManager(OpenDaylightManager,
+                       driver_base.BaseMemberManager):
+
+    @log_helpers.log_method_call
+    def __init__(self, client):
+        self.obj_type = odl_const.ODL_MEMBERS
+        super(ODLMemberManager, self).__init__(client)
+
+
+class ODLHealthMonitorManager(OpenDaylightManager,
+                              driver_base.BaseHealthMonitorManager):
+
+    @log_helpers.log_method_call
+    def __init__(self, client):
+        self.obj_type = odl_const.ODL_HEALTHMONITORS
+        super(ODLHealthMonitorManager, self).__init__(client)
diff --git a/networking-odl/networking_odl/ml2/README.odl b/networking-odl/networking_odl/ml2/README.odl
new file mode 100644 (file)
index 0000000..eef8d44
--- /dev/null
@@ -0,0 +1,41 @@
+OpenDaylight ML2 MechanismDriver
+================================
+OpenDaylight is an Open Source SDN Controller developed by a plethora of
+companies and hosted by the Linux Foundation. The OpenDaylight website
+contains more information on the capabilities OpenDaylight provides:
+
+    http://www.opendaylight.org
+
+Theory of operation
+===================
+The OpenStack Neutron integration with OpenDaylight consists of the ML2
+MechanismDriver which acts as a REST proxy and passess all Neutron API
+calls into OpenDaylight. OpenDaylight contains a NB REST service (called
+the NeutronAPIService) which caches data from these proxied API calls and
+makes it available to other services inside of OpenDaylight. One current
+user of the SB side of the NeutronAPIService is the OVSDB code in
+OpenDaylight. OVSDB uses the neutron information to isolate tenant networks
+using GRE or VXLAN tunnels.
+
+How to use the OpenDaylight ML2 MechanismDriver
+===============================================
+To use the ML2 MechanismDriver, you need to ensure you have it configured
+as one of the "mechanism_drivers" in ML2:
+
+    mechanism_drivers=opendaylight
+
+The next step is to setup the "[ml2_odl]" section in either the ml2_conf.ini
+file or in a separate ml2_conf_odl.ini file. An example is shown below:
+
+    [ml2_odl]
+    password = admin
+    username = admin
+    url = http://192.168.100.1:8080/controller/nb/v2/neutron
+
+When starting OpenDaylight, ensure you have the SimpleForwarding application
+disabled or remove the .jar file from the plugins directory. Also ensure you
+start OpenDaylight before you start OpenStack Neutron.
+
+There is devstack support for this which will automatically pull down OpenDaylight
+and start it as part of devstack as well. The patch for this will likely merge
+around the same time as this patch merges.
diff --git a/networking-odl/networking_odl/ml2/__init__.py b/networking-odl/networking_odl/ml2/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/networking_odl/ml2/legacy_port_binding.py b/networking-odl/networking_odl/ml2/legacy_port_binding.py
new file mode 100644 (file)
index 0000000..7b9b918
--- /dev/null
@@ -0,0 +1,84 @@
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+from oslo_log import log
+
+from neutron.extensions import portbindings
+from neutron.plugins.common import constants
+from neutron.plugins.ml2 import driver_api
+from neutron_lib import constants as n_const
+
+from networking_odl.ml2 import port_binding
+
+
+LOG = log.getLogger(__name__)
+
+
+class LegacyPortBindingManager(port_binding.PortBindingController):
+
+    def __init__(self):
+        self.vif_details = {portbindings.CAP_PORT_FILTER: True}
+        self.supported_vnic_types = [portbindings.VNIC_NORMAL]
+
+    def bind_port(self, port_context):
+        """Set binding for all valid segments
+
+        """
+        vnic_type = port_context.current.get(portbindings.VNIC_TYPE,
+                                             portbindings.VNIC_NORMAL)
+        if vnic_type not in self.supported_vnic_types:
+            LOG.debug("Refusing to bind due to unsupported vnic_type: %s",
+                      vnic_type)
+            return
+
+        valid_segment = None
+        for segment in port_context.segments_to_bind:
+            if self._check_segment(segment):
+                valid_segment = segment
+                break
+
+        if valid_segment:
+            vif_type = self._get_vif_type(port_context)
+            LOG.debug("Bind port %(port)s on network %(network)s with valid "
+                      "segment %(segment)s and VIF type %(vif_type)r.",
+                      {'port': port_context.current['id'],
+                       'network': port_context.network.current['id'],
+                       'segment': valid_segment, 'vif_type': vif_type})
+
+            port_context.set_binding(
+                segment[driver_api.ID], vif_type,
+                self.vif_details,
+                status=n_const.PORT_STATUS_ACTIVE)
+
+    def _check_segment(self, segment):
+        """Verify a segment is valid for the OpenDaylight MechanismDriver.
+
+        Verify the requested segment is supported by ODL and return True or
+        False to indicate this to callers.
+        """
+
+        network_type = segment[driver_api.NETWORK_TYPE]
+        return network_type in [constants.TYPE_LOCAL, constants.TYPE_GRE,
+                                constants.TYPE_VXLAN, constants.TYPE_VLAN]
+
+    def _get_vif_type(self, port_context):
+        """Get VIF type string for given PortContext
+
+        Dummy implementation: it always returns following constant.
+        neutron.extensions.portbindings.VIF_TYPE_OVS
+        """
+
+        return portbindings.VIF_TYPE_OVS
diff --git a/networking-odl/networking_odl/ml2/mech_driver.py b/networking-odl/networking_odl/ml2/mech_driver.py
new file mode 100644 (file)
index 0000000..adde8d9
--- /dev/null
@@ -0,0 +1,458 @@
+# Copyright (c) 2013-2014 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import abc
+import copy
+import six
+
+import netaddr
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import excutils
+import requests
+
+from neutron.common import utils
+from neutron import context as neutron_context
+from neutron.extensions import allowedaddresspairs as addr_pair
+from neutron.extensions import securitygroup as sg
+from neutron.plugins.ml2 import driver_api
+from neutron.plugins.ml2 import driver_context
+from neutron_lib import exceptions as n_exc
+
+from networking_odl._i18n import _LE
+from networking_odl.common import callback as odl_call
+from networking_odl.common import client as odl_client
+from networking_odl.common import constants as odl_const
+from networking_odl.common import utils as odl_utils
+from networking_odl.ml2 import port_binding
+
+
+cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
+LOG = logging.getLogger(__name__)
+
+not_found_exception_map = {odl_const.ODL_NETWORKS: n_exc.NetworkNotFound,
+                           odl_const.ODL_SUBNETS: n_exc.SubnetNotFound,
+                           odl_const.ODL_PORTS: n_exc.PortNotFound,
+                           odl_const.ODL_SGS: sg.SecurityGroupNotFound,
+                           odl_const.ODL_SG_RULES:
+                               sg.SecurityGroupRuleNotFound}
+
+
+@six.add_metaclass(abc.ABCMeta)
+class ResourceFilterBase(object):
+    @staticmethod
+    @abc.abstractmethod
+    def filter_create_attributes(resource, context):
+        pass
+
+    @staticmethod
+    @abc.abstractmethod
+    def filter_update_attributes(resource, context):
+        pass
+
+    @staticmethod
+    @abc.abstractmethod
+    def filter_create_attributes_with_plugin(resource, plugin, dbcontext):
+        pass
+
+    @staticmethod
+    def _filter_unmapped_null(resource_dict, unmapped_keys):
+        # NOTE(yamahata): bug work around
+        # https://bugs.eclipse.org/bugs/show_bug.cgi?id=475475
+        #   Null-value for an unmapped element causes next mapped
+        #   collection to contain a null value
+        #   JSON: { "unmappedField": null, "mappedCollection": [ "a" ] }
+        #
+        #   Java Object:
+        #   class Root {
+        #     Collection<String> mappedCollection = new ArrayList<String>;
+        #   }
+        #
+        #   Result:
+        #   Field B contains one element; null
+        #
+        # TODO(yamahata): update along side with neutron and ODL
+        #   add when neutron adds more extensions
+        #   delete when ODL neutron northbound supports it
+        # TODO(yamahata): do same thing for other resources
+        keys_to_del = [key for key in unmapped_keys
+                       if resource_dict.get(key) is None]
+        if keys_to_del:
+            odl_utils.try_del(resource_dict, keys_to_del)
+
+
+class NetworkFilter(ResourceFilterBase):
+    _UNMAPPED_KEYS = ['qos_policy_id']
+
+    @classmethod
+    def filter_create_attributes(cls, network, context):
+        """Filter out network attributes not required for a create."""
+        odl_utils.try_del(network, ['status', 'subnets'])
+        cls._filter_unmapped_null(network, cls._UNMAPPED_KEYS)
+
+    @classmethod
+    def filter_update_attributes(cls, network, context):
+        """Filter out network attributes for an update operation."""
+        odl_utils.try_del(network, ['id', 'status', 'subnets', 'tenant_id'])
+        cls._filter_unmapped_null(network, cls._UNMAPPED_KEYS)
+
+    @classmethod
+    def filter_create_attributes_with_plugin(cls, network, plugin, dbcontext):
+        context = driver_context.NetworkContext(plugin, dbcontext, network)
+        cls.filter_create_attributes(network, context)
+
+
+class SubnetFilter(ResourceFilterBase):
+    @staticmethod
+    def filter_create_attributes(subnet, context):
+        """Filter out subnet attributes not required for a create."""
+        pass
+
+    @staticmethod
+    def filter_update_attributes(subnet, context):
+        """Filter out subnet attributes for an update operation."""
+        odl_utils.try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr',
+                          'allocation_pools', 'tenant_id'])
+
+    @classmethod
+    def filter_create_attributes_with_plugin(cls, subnet, plugin, dbcontext):
+        network = plugin.get_network(dbcontext, subnet['network_id'])
+        context = driver_context.SubnetContext(plugin, dbcontext, subnet,
+                                               network)
+        cls.filter_create_attributes(subnet, context)
+
+
+class PortFilter(ResourceFilterBase):
+    _UNMAPPED_KEYS = ['binding:profile', 'dns_name',
+                      'port_security_enabled', 'qos_policy_id']
+
+    @staticmethod
+    def _add_security_groups(port, context):
+        """Populate the 'security_groups' field with entire records."""
+        dbcontext = context._plugin_context
+        groups = [context._plugin.get_security_group(dbcontext, sg)
+                  for sg in port['security_groups']]
+        port['security_groups'] = groups
+
+    @classmethod
+    def _fixup_allowed_ipaddress_pairs(cls, allowed_address_pairs):
+        """unify (ip address or network address) into network address"""
+        for address_pair in allowed_address_pairs:
+            ip_address = address_pair['ip_address']
+            network_address = str(netaddr.IPNetwork(ip_address))
+            address_pair['ip_address'] = network_address
+
+    @classmethod
+    def filter_create_attributes(cls, port, context):
+        """Filter out port attributes not required for a create."""
+        cls._add_security_groups(port, context)
+        cls._fixup_allowed_ipaddress_pairs(port[addr_pair.ADDRESS_PAIRS])
+        cls._filter_unmapped_null(port, cls._UNMAPPED_KEYS)
+        odl_utils.try_del(port, ['status'])
+
+        # NOTE(yamahata): work around for port creation for router
+        # tenant_id=''(empty string) is passed when port is created
+        # by l3 plugin internally for router.
+        # On the other hand, ODL doesn't accept empty string for tenant_id.
+        # In that case, deduce tenant_id from network_id for now.
+        # Right fix: modify Neutron so that don't allow empty string
+        # for tenant_id even for port for internal use.
+        # TODO(yamahata): eliminate this work around when neutron side
+        # is fixed
+        # assert port['tenant_id'] != ''
+        if port['tenant_id'] == '':
+            LOG.debug('empty string was passed for tenant_id: %s(port)', port)
+            port['tenant_id'] = context._network_context._network['tenant_id']
+
+    @classmethod
+    def filter_update_attributes(cls, port, context):
+        """Filter out port attributes for an update operation."""
+        cls._add_security_groups(port, context)
+        cls._fixup_allowed_ipaddress_pairs(port[addr_pair.ADDRESS_PAIRS])
+        cls._filter_unmapped_null(port, cls._UNMAPPED_KEYS)
+        odl_utils.try_del(port, ['network_id', 'id', 'status', 'tenant_id'])
+
+    @classmethod
+    def filter_create_attributes_with_plugin(cls, port, plugin, dbcontext):
+        network = plugin.get_network(dbcontext, port['network_id'])
+        # TODO(yamahata): port binding
+        binding = {}
+        context = driver_context.PortContext(
+            plugin, dbcontext, port, network, binding, None)
+        cls.filter_create_attributes(port, context)
+
+
+class SecurityGroupFilter(ResourceFilterBase):
+    @staticmethod
+    def filter_create_attributes(sg, context):
+        """Filter out security-group attributes not required for a create."""
+        pass
+
+    @staticmethod
+    def filter_update_attributes(sg, context):
+        """Filter out security-group attributes for an update operation."""
+        pass
+
+    @staticmethod
+    def filter_create_attributes_with_plugin(sg, plugin, dbcontext):
+        pass
+
+
+class SecurityGroupRuleFilter(ResourceFilterBase):
+    @staticmethod
+    def filter_create_attributes(sg_rule, context):
+        """Filter out sg-rule attributes not required for a create."""
+        pass
+
+    @staticmethod
+    def filter_update_attributes(sg_rule, context):
+        """Filter out sg-rule attributes for an update operation."""
+        pass
+
+    @staticmethod
+    def filter_create_attributes_with_plugin(sg_rule, plugin, dbcontext):
+        pass
+
+
+class OpenDaylightDriver(object):
+
+    """OpenDaylight Python Driver for Neutron.
+
+    This code is the backend implementation for the OpenDaylight ML2
+    MechanismDriver for OpenStack Neutron.
+    """
+    FILTER_MAP = {
+        odl_const.ODL_NETWORKS: NetworkFilter,
+        odl_const.ODL_SUBNETS: SubnetFilter,
+        odl_const.ODL_PORTS: PortFilter,
+        odl_const.ODL_SGS: SecurityGroupFilter,
+        odl_const.ODL_SG_RULES: SecurityGroupRuleFilter,
+    }
+    out_of_sync = True
+
+    def __init__(self):
+        LOG.debug("Initializing OpenDaylight ML2 driver")
+        self.client = odl_client.OpenDaylightRestClient.create_client()
+        self.sec_handler = odl_call.OdlSecurityGroupsHandler(self)
+        self.port_binding_controller = port_binding.PortBindingManager.create()
+        # TODO(rzang): Each port binding controller should have any necessary
+        # parameter passed in from configuration files.
+        # BTW, CAP_PORT_FILTER seems being obsoleted.
+        # Leave the code commmeted out for now for future reference.
+        #
+        # self.vif_details = {portbindings.CAP_PORT_FILTER: True}
+        # self._network_topology = network_topology.NetworkTopologyManager(
+        #     vif_details=self.vif_details)
+
+    def synchronize(self, operation, object_type, context):
+        """Synchronize ODL with Neutron following a configuration change."""
+        if self.out_of_sync:
+            self.sync_full(context._plugin)
+        else:
+            self.sync_single_resource(operation, object_type, context)
+
+    def sync_resources(self, plugin, dbcontext, collection_name):
+        """Sync objects from Neutron over to OpenDaylight.
+
+        This will handle syncing networks, subnets, and ports from Neutron to
+        OpenDaylight. It also filters out the requisite items which are not
+        valid for create API operations.
+        """
+        filter_cls = self.FILTER_MAP[collection_name]
+        to_be_synced = []
+        obj_getter = getattr(plugin, 'get_%s' % collection_name)
+        if collection_name == odl_const.ODL_SGS:
+            resources = obj_getter(dbcontext, default_sg=True)
+        else:
+            resources = obj_getter(dbcontext)
+        for resource in resources:
+            try:
+                # Convert underscores to dashes in the URL for ODL
+                collection_name_url = collection_name.replace('_', '-')
+                urlpath = collection_name_url + '/' + resource['id']
+                self.client.sendjson('get', urlpath, None)
+            except requests.exceptions.HTTPError as e:
+                with excutils.save_and_reraise_exception() as ctx:
+                    if e.response.status_code == requests.codes.not_found:
+                        filter_cls.filter_create_attributes_with_plugin(
+                            resource, plugin, dbcontext)
+                        to_be_synced.append(resource)
+                        ctx.reraise = False
+            else:
+                # TODO(yamahata): compare result with resource.
+                # If they don't match, update it below
+                pass
+
+        if to_be_synced:
+            key = collection_name[:-1] if len(to_be_synced) == 1 else (
+                collection_name)
+            # Convert underscores to dashes in the URL for ODL
+            collection_name_url = collection_name.replace('_', '-')
+            self.client.sendjson('post', collection_name_url,
+                                 {key: to_be_synced})
+
+        # https://bugs.launchpad.net/networking-odl/+bug/1371115
+        # TODO(yamahata): update resources with unsyned attributes
+        # TODO(yamahata): find dangling ODL resouce that was deleted in
+        # neutron db
+
+    @utils.synchronized('odl-sync-full')
+    def sync_full(self, plugin):
+        """Resync the entire database to ODL.
+
+        Transition to the in-sync state on success.
+        Note: we only allow a single thread in here at a time.
+        """
+        if not self.out_of_sync:
+            return
+        dbcontext = neutron_context.get_admin_context()
+        for collection_name in [odl_const.ODL_NETWORKS,
+                                odl_const.ODL_SUBNETS,
+                                odl_const.ODL_PORTS,
+                                odl_const.ODL_SGS,
+                                odl_const.ODL_SG_RULES]:
+            self.sync_resources(plugin, dbcontext, collection_name)
+        self.out_of_sync = False
+
+    def sync_single_resource(self, operation, object_type, context):
+        """Sync over a single resource from Neutron to OpenDaylight.
+
+        Handle syncing a single operation over to OpenDaylight, and correctly
+        filter attributes out which are not required for the requisite
+        operation (create or update) being handled.
+        """
+        # Convert underscores to dashes in the URL for ODL
+        object_type_url = object_type.replace('_', '-')
+        try:
+            obj_id = context.current['id']
+            if operation == odl_const.ODL_DELETE:
+                self.out_of_sync |= not self.client.try_delete(
+                    object_type_url + '/' + obj_id)
+            else:
+                filter_cls = self.FILTER_MAP[object_type]
+                if operation == odl_const.ODL_CREATE:
+                    urlpath = object_type_url
+                    method = 'post'
+                    attr_filter = filter_cls.filter_create_attributes
+                elif operation == odl_const.ODL_UPDATE:
+                    urlpath = object_type_url + '/' + obj_id
+                    method = 'put'
+                    attr_filter = filter_cls.filter_update_attributes
+                resource = copy.deepcopy(context.current)
+                attr_filter(resource, context)
+                self.client.sendjson(method, urlpath,
+                                     {object_type_url[:-1]: resource})
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                LOG.error(_LE("Unable to perform %(operation)s on "
+                              "%(object_type)s %(object_id)s"),
+                          {'operation': operation,
+                           'object_type': object_type,
+                           'object_id': obj_id})
+                self.out_of_sync = True
+
+    def sync_from_callback(self, operation, res_type, res_id, resource_dict):
+        object_type = res_type.plural.replace('_', '-')
+        try:
+            if operation == odl_const.ODL_DELETE:
+                self.out_of_sync |= not self.client.try_delete(
+                    object_type + '/' + res_id)
+            else:
+                if operation == odl_const.ODL_CREATE:
+                    urlpath = object_type
+                    method = 'post'
+                elif operation == odl_const.ODL_UPDATE:
+                    urlpath = object_type + '/' + res_id
+                    method = 'put'
+                self.client.sendjson(method, urlpath, resource_dict)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                LOG.error(_LE("Unable to perform %(operation)s on "
+                              "%(object_type)s %(res_id)s "
+                              "%(resource_dict)s"),
+                          {'operation': operation,
+                           'object_type': object_type,
+                           'res_id': res_id,
+                           'resource_dict': resource_dict})
+                self.out_of_sync = True
+
+    def bind_port(self, port_context):
+        """Set binding for a valid segments
+
+        """
+        self.port_binding_controller.bind_port(port_context)
+
+
+class OpenDaylightMechanismDriver(driver_api.MechanismDriver):
+
+    """Mechanism Driver for OpenDaylight.
+
+    This driver was a port from the NCS MechanismDriver.  The API
+    exposed by ODL is slightly different from the API exposed by NCS,
+    but the general concepts are the same.
+    """
+
+    def initialize(self):
+        self.url = cfg.CONF.ml2_odl.url
+        self.timeout = cfg.CONF.ml2_odl.timeout
+        self.username = cfg.CONF.ml2_odl.username
+        self.password = cfg.CONF.ml2_odl.password
+        required_opts = ('url', 'username', 'password')
+        for opt in required_opts:
+            if not getattr(self, opt):
+                raise cfg.RequiredOptError(opt, 'ml2_odl')
+
+        self.odl_drv = OpenDaylightDriver()
+
+    # Postcommit hooks are used to trigger synchronization.
+
+    def create_network_postcommit(self, context):
+        self.odl_drv.synchronize(odl_const.ODL_CREATE, odl_const.ODL_NETWORKS,
+                                 context)
+
+    def update_network_postcommit(self, context):
+        self.odl_drv.synchronize(odl_const.ODL_UPDATE, odl_const.ODL_NETWORKS,
+                                 context)
+
+    def delete_network_postcommit(self, context):
+        self.odl_drv.synchronize(odl_const.ODL_DELETE, odl_const.ODL_NETWORKS,
+                                 context)
+
+    def create_subnet_postcommit(self, context):
+        self.odl_drv.synchronize(odl_const.ODL_CREATE, odl_const.ODL_SUBNETS,
+                                 context)
+
+    def update_subnet_postcommit(self, context):
+        self.odl_drv.synchronize(odl_const.ODL_UPDATE, odl_const.ODL_SUBNETS,
+                                 context)
+
+    def delete_subnet_postcommit(self, context):
+        self.odl_drv.synchronize(odl_const.ODL_DELETE, odl_const.ODL_SUBNETS,
+                                 context)
+
+    def create_port_postcommit(self, context):
+        self.odl_drv.synchronize(odl_const.ODL_CREATE, odl_const.ODL_PORTS,
+                                 context)
+
+    def update_port_postcommit(self, context):
+        self.odl_drv.synchronize(odl_const.ODL_UPDATE, odl_const.ODL_PORTS,
+                                 context)
+
+    def delete_port_postcommit(self, context):
+        self.odl_drv.synchronize(odl_const.ODL_DELETE, odl_const.ODL_PORTS,
+                                 context)
+
+    def bind_port(self, context):
+        self.odl_drv.bind_port(context)
diff --git a/networking-odl/networking_odl/ml2/mech_driver_v2.py b/networking-odl/networking_odl/ml2/mech_driver_v2.py
new file mode 100644 (file)
index 0000000..dfc8df1
--- /dev/null
@@ -0,0 +1,146 @@
+# Copyright (c) 2013-2014 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from neutron.db import api as db_api
+from neutron.plugins.ml2 import driver_api as api
+
+from networking_odl.common import callback
+from networking_odl.common import config as odl_conf
+from networking_odl.common import constants as odl_const
+from networking_odl.journal import cleanup
+from networking_odl.journal import full_sync
+from networking_odl.journal import journal
+from networking_odl.journal import maintenance
+from networking_odl.ml2 import port_binding
+
+LOG = logging.getLogger(__name__)
+
+
+class OpenDaylightMechanismDriver(api.MechanismDriver):
+    """OpenDaylight Python Driver for Neutron.
+
+    This code is the backend implementation for the OpenDaylight ML2
+    MechanismDriver for OpenStack Neutron.
+    """
+
+    def initialize(self):
+        LOG.debug("Initializing OpenDaylight ML2 driver")
+        cfg.CONF.register_opts(odl_conf.odl_opts, "ml2_odl")
+        self.sg_handler = callback.OdlSecurityGroupsHandler(self)
+        self.journal = journal.OpendaylightJournalThread()
+        self.port_binding_controller = port_binding.PortBindingManager.create()
+        self._start_maintenance_thread()
+
+    def _start_maintenance_thread(self):
+        # start the maintenance thread and register all the maintenance
+        # operations :
+        # (1) JournalCleanup - Delete completed rows from journal
+        # (2) CleanupProcessing - Mark orphaned processing rows to pending
+        # (3) Full sync - Re-sync when detecting an ODL "cold reboot"
+        cleanup_obj = cleanup.JournalCleanup()
+        self._maintenance_thread = maintenance.MaintenanceThread()
+        self._maintenance_thread.register_operation(
+            cleanup_obj.delete_completed_rows)
+        self._maintenance_thread.register_operation(
+            cleanup_obj.cleanup_processing_rows)
+        self._maintenance_thread.register_operation(full_sync.full_sync)
+        self._maintenance_thread.start()
+
+    @staticmethod
+    def _record_in_journal(context, object_type, operation, data=None):
+        if data is None:
+            data = context.current
+        journal.record(context._plugin_context.session, object_type,
+                       context.current['id'], operation, data)
+
+    def create_network_precommit(self, context):
+        OpenDaylightMechanismDriver._record_in_journal(
+            context, odl_const.ODL_NETWORK, odl_const.ODL_CREATE)
+
+    def create_subnet_precommit(self, context):
+        OpenDaylightMechanismDriver._record_in_journal(
+            context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE)
+
+    def create_port_precommit(self, context):
+        OpenDaylightMechanismDriver._record_in_journal(
+            context, odl_const.ODL_PORT, odl_const.ODL_CREATE)
+
+    def update_network_precommit(self, context):
+        OpenDaylightMechanismDriver._record_in_journal(
+            context, odl_const.ODL_NETWORK, odl_const.ODL_UPDATE)
+
+    def update_subnet_precommit(self, context):
+        OpenDaylightMechanismDriver._record_in_journal(
+            context, odl_const.ODL_SUBNET, odl_const.ODL_UPDATE)
+
+    def update_port_precommit(self, context):
+        OpenDaylightMechanismDriver._record_in_journal(
+            context, odl_const.ODL_PORT, odl_const.ODL_UPDATE)
+
+    def delete_network_precommit(self, context):
+        OpenDaylightMechanismDriver._record_in_journal(
+            context, odl_const.ODL_NETWORK, odl_const.ODL_DELETE, data=[])
+
+    def delete_subnet_precommit(self, context):
+        # Use the journal row's data field to store parent object
+        # uuids. This information is required for validation checking
+        # when deleting parent objects.
+        new_context = [context.current['network_id']]
+        OpenDaylightMechanismDriver._record_in_journal(
+            context, odl_const.ODL_SUBNET, odl_const.ODL_DELETE,
+            data=new_context)
+
+    def delete_port_precommit(self, context):
+        # Use the journal row's data field to store parent object
+        # uuids. This information is required for validation checking
+        # when deleting parent objects.
+        new_context = [context.current['network_id']]
+        for subnet in context.current['fixed_ips']:
+            new_context.append(subnet['subnet_id'])
+        OpenDaylightMechanismDriver._record_in_journal(
+            context, odl_const.ODL_PORT, odl_const.ODL_DELETE,
+            data=new_context)
+
+    @journal.call_thread_on_end
+    def sync_from_callback(self, operation, res_type, res_id, resource_dict):
+        object_type = res_type.singular
+        object_uuid = (resource_dict[object_type]['id']
+                       if operation == 'create' else res_id)
+        if resource_dict is not None:
+            resource_dict = resource_dict[object_type]
+        journal.record(db_api.get_session(), object_type, object_uuid,
+                       operation, resource_dict)
+
+    def _postcommit(self, context):
+        self.journal.set_sync_event()
+
+    create_network_postcommit = _postcommit
+    create_subnet_postcommit = _postcommit
+    create_port_postcommit = _postcommit
+    update_network_postcommit = _postcommit
+    update_subnet_postcommit = _postcommit
+    update_port_postcommit = _postcommit
+    delete_network_postcommit = _postcommit
+    delete_subnet_postcommit = _postcommit
+    delete_port_postcommit = _postcommit
+
+    def bind_port(self, port_context):
+        """Set binding for a valid segments
+
+        """
+        return self.port_binding_controller.bind_port(port_context)
diff --git a/networking-odl/networking_odl/ml2/network_topology.py b/networking-odl/networking_odl/ml2/network_topology.py
new file mode 100644 (file)
index 0000000..b0bfae1
--- /dev/null
@@ -0,0 +1,313 @@
+# Copyright (c) 2015-2016 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import abc
+import importlib
+import logging
+
+import six
+from six.moves.urllib import parse
+
+from neutron.extensions import portbindings
+from oslo_log import log
+from oslo_serialization import jsonutils
+
+from networking_odl.common import cache
+from networking_odl.common import client
+from networking_odl.common import utils
+from networking_odl._i18n import _, _LI, _LW, _LE
+from networking_odl.ml2 import port_binding
+
+
+LOG = log.getLogger(__name__)
+
+
+class NetworkTopologyManager(port_binding.PortBindingController):
+
+    # the first valid vif type will be chosed following the order
+    # on this list. This list can be modified to adapt to user preferences.
+    valid_vif_types = [
+        portbindings.VIF_TYPE_VHOST_USER, portbindings.VIF_TYPE_OVS]
+
+    # List of class names of registered implementations of interface
+    # NetworkTopologyParser
+    network_topology_parsers = [
+        'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyParser']
+
+    def __init__(self, vif_details=None, client=None):
+        # Details for binding port
+        self._vif_details = vif_details or {portbindings.CAP_PORT_FILTER: True}
+
+        # Rest client used for getting network topology from ODL
+        self._client = client or NetworkTopologyClient.create_client()
+
+        # Table of NetworkTopologyElement
+        self._elements_by_ip = cache.Cache(
+            self._fetch_and_parse_network_topology)
+
+        # Parsers used for processing network topology
+        self._parsers = list(self._create_parsers())
+
+    def bind_port(self, port_context):
+        """Set binding for a valid segment
+
+        """
+        host_name = port_context.host
+        elements = list()
+        try:
+            # Append to empty list to add as much elements as possible
+            # in the case it raises an exception
+            elements.extend(self._fetch_elements_by_host(host_name))
+        except Exception:
+            LOG.exception(
+                _LE('Error fetching elements for host %(host_name)r.'),
+                {'host_name': host_name}, exc_info=1)
+
+        if not elements:
+            # In case it wasn't able to find any network topology element
+            # for given host then it uses the legacy OVS one keeping the old
+            # behaviour
+            LOG.warning(
+                _LW('Using legacy OVS network topology element for port '
+                    'binding for host: %(host_name)r.'),
+                {'host_name': host_name})
+
+            # Imported here to avoid cyclic module dependencies
+            from networking_odl.ml2 import ovsdb_topology
+            elements = [ovsdb_topology.OvsdbNetworkTopologyElement()]
+
+        # TODO(Federico Ressi): in the case there are more candidate virtual
+        # switches instances for the same host it choses one for binding
+        # port. As there isn't any know way to perform this selection it
+        # selects a VIF type that is valid for all switches that have
+        # been found and a VIF type valid for all them. This has to be improved
+        for vif_type in self.valid_vif_types:
+            vif_type_is_valid_for_all = True
+            for element in elements:
+                if vif_type not in element.valid_vif_types:
+                    # it is invalid for at least one element: discard it
+                    vif_type_is_valid_for_all = False
+                    break
+
+            if vif_type_is_valid_for_all:
+                # This is the best VIF type valid for all elements
+                LOG.debug(
+                    "Found VIF type %(vif_type)r valid for all network "
+                    "topology elements for host %(host_name)r.",
+                    {'vif_type': vif_type, 'host_name': host_name})
+
+                for element in elements:
+                    # It assumes that any element could be good for given host
+                    # In most of the cases I expect exactely one element for
+                    # every compute host
+                    try:
+                        return element.bind_port(
+                            port_context, vif_type, self._vif_details)
+
+                    except Exception:
+                        LOG.exception(
+                            _LE('Network topology element has failed binding '
+                                'port:\n%(element)s'),
+                            {'element': element.to_json()})
+
+        LOG.error(
+            _LE('Unable to bind port element for given host and valid VIF '
+                'types:\n'
+                '\thostname: %(host_name)s\n'
+                '\tvalid VIF types: %(valid_vif_types)s'),
+            {'host_name': host_name,
+             'valid_vif_types': ', '.join(self.valid_vif_types)})
+        # TDOO(Federico Ressi): should I raise an exception here?
+
+    def _create_parsers(self):
+        for parser_name in self.network_topology_parsers:
+            try:
+                yield NetworkTopologyParser.create_parser(parser_name)
+
+            except Exception:
+                LOG.exception(
+                    _LE('Error initializing topology parser: %(parser_name)r'),
+                    {'parser_name': parser_name})
+
+    def _fetch_elements_by_host(self, host_name, cache_timeout=60.0):
+        '''Yields all network topology elements referring to given host name
+
+        '''
+
+        host_addresses = [host_name]
+        try:
+            # It uses both compute host name and known IP addresses to
+            # recognize topology elements valid for given computed host
+            ip_addresses = utils.get_addresses_by_name(host_name)
+        except Exception:
+            ip_addresses = []
+            LOG.exception(
+                _LE('Unable to resolve IP addresses for host %(host_name)r'),
+                {'host_name': host_name})
+        else:
+            host_addresses.extend(ip_addresses)
+
+        yield_elements = set()
+        try:
+            for __, element in self._elements_by_ip.fetch_all(
+                    host_addresses, cache_timeout):
+                # yields every element only once
+                if element not in yield_elements:
+                    yield_elements.add(element)
+                    yield element
+
+        except cache.CacheFetchError as error:
+            # This error is expected on most of the cases because typically not
+            # all host_addresses maps to a network topology element.
+            if yield_elements:
+                # As we need only one element for every host we ignore the
+                # case in which others host addresseses didn't map to any host
+                LOG.debug(
+                    'Host addresses not found in networking topology: %s',
+                    ', '.join(error.missing_keys))
+            else:
+                LOG.exception(
+                    _LE('No such network topology elements for given host '
+                        '%(host_name)r and given IPs: %(ip_addresses)s.'),
+                    {'host_name': host_name,
+                     'ip_addresses': ", ".join(ip_addresses)})
+                error.reraise_cause()
+
+    def _fetch_and_parse_network_topology(self, addresses):
+        # The cache calls this method to fecth new elements when at least one
+        # of the addresses is not in the cache or it has expired.
+
+        # pylint: disable=unused-argument
+        LOG.info(_LI('Fetch network topology from ODL.'))
+        response = self._client.get()
+        response.raise_for_status()
+
+        network_topology = response.json()
+        if LOG.isEnabledFor(logging.DEBUG):
+            topology_str = jsonutils.dumps(
+                network_topology, sort_keys=True, indent=4,
+                separators=(',', ': '))
+            LOG.debug("Got network topology:\n%s", topology_str)
+
+        at_least_one_element_for_asked_addresses = False
+        for parser in self._parsers:
+            try:
+                for element in parser.parse_network_topology(network_topology):
+                    if not isinstance(element, NetworkTopologyElement):
+                        raise TypeError(_(
+                            "Yield element doesn't implement interface "
+                            "'NetworkTopologyElement': {!r}").format(element))
+                    # the same element can be known by more host addresses
+                    for host_address in element.host_addresses:
+                        if host_address in addresses:
+                            at_least_one_element_for_asked_addresses = True
+                        yield host_address, element
+            except Exception:
+                LOG.exception(
+                    _LE("Parser %(parser)r failed to parse network topology."),
+                    {'parser': parser})
+
+        if not at_least_one_element_for_asked_addresses:
+            # this will mark entries for given addresses as failed to allow
+            # calling this method again as soon it is requested and avoid
+            # waiting for cache expiration
+            raise ValueError(
+                _('No such topology element for given host addresses: {}')
+                .format(', '.join(addresses)))
+
+
+@six.add_metaclass(abc.ABCMeta)
+class NetworkTopologyParser(object):
+
+    @classmethod
+    def create_parser(cls, parser_class_name):
+        '''Creates a 'NetworkTopologyParser' of given class name.
+
+        '''
+        module_name, class_name = parser_class_name.rsplit('.', 1)
+        module = importlib.import_module(module_name)
+        clss = getattr(module, class_name)
+        if not issubclass(clss, cls):
+            raise TypeError(_(
+                "Class {class_name!r} of module {module_name!r} doesn't "
+                "implement 'NetworkTopologyParser' interface.").format(
+                    class_name=class_name, module_name=module_name))
+        return clss()
+
+    @abc.abstractmethod
+    def parse_network_topology(self, network_topology):
+        '''Parses OpenDaylight network topology
+
+        Yields all network topology elements implementing
+        'NetworkTopologyElement' interface found in given network topology.
+        '''
+
+
+@six.add_metaclass(abc.ABCMeta)
+class NetworkTopologyElement(object):
+
+    @abc.abstractproperty
+    def host_addresses(self):
+        '''List of known host addresses of a single compute host
+
+        Either host names and ip addresses are valid.
+        Neutron host controller must know at least one of these compute host
+        names or ip addresses to find this element.
+        '''
+
+    @abc.abstractproperty
+    def valid_vif_types(self):
+        '''Returns a tuple listing VIF types supported by the compute node
+
+        '''
+
+    @abc.abstractmethod
+    def bind_port(self, port_context, vif_type, vif_details):
+        '''Bind port context using given vif type and vif details
+
+        This method is expected to search for a valid segment and then
+        call port_context.set_binding()
+        '''
+
+    def to_dict(self):
+        cls = type(self)
+        return {
+            'class': cls.__module__ + '.' + cls.__name__,
+            'host_addresses': list(self.host_addresses),
+            'valid_vif_types': list(self.valid_vif_types)}
+
+    def to_json(self):
+        return jsonutils.dumps(
+            self.to_dict(), sort_keys=True, indent=4, separators=(',', ': '))
+
+
+class NetworkTopologyClient(client.OpenDaylightRestClient):
+
+    _GET_ODL_NETWORK_TOPOLOGY_URL =\
+        'restconf/operational/network-topology:network-topology'
+
+    def __init__(self, url, username, password, timeout):
+        if url:
+            url = parse.urlparse(url)
+            port = ''
+            if url.port:
+                port = ':' + str(url.port)
+            topology_url = '{}://{}{}/{}'.format(
+                url.scheme, url.hostname, port,
+                self._GET_ODL_NETWORK_TOPOLOGY_URL)
+        else:
+            topology_url = None
+        super(NetworkTopologyClient, self).__init__(
+            topology_url, username, password, timeout)
diff --git a/networking-odl/networking_odl/ml2/ovsdb_topology.py b/networking-odl/networking_odl/ml2/ovsdb_topology.py
new file mode 100644 (file)
index 0000000..f2c8ad8
--- /dev/null
@@ -0,0 +1,218 @@
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+import collections
+import os
+
+from oslo_log import log
+import six
+from six.moves.urllib import parse
+
+from neutron.extensions import portbindings
+from neutron.plugins.common import constants
+from neutron.plugins.ml2 import driver_api
+from neutron_lib import constants as n_const
+
+from networking_odl._i18n import _
+from networking_odl.ml2 import network_topology
+
+
+LOG = log.getLogger(__name__)
+
+
+class OvsdbNetworkTopologyParser(network_topology.NetworkTopologyParser):
+
+    def new_element(self, uuid):
+        return OvsdbNetworkTopologyElement(uuid=uuid)
+
+    def parse_network_topology(self, network_topologies):
+        elements_by_uuid = collections.OrderedDict()
+        for topology in network_topologies[
+                'network-topology']['topology']:
+            if topology['topology-id'].startswith('ovsdb:'):
+                for node in topology['node']:
+                    # expected url format: ovsdb://uuid/<uuid>[/<path>]]
+                    node_url = parse.urlparse(node['node-id'])
+                    if node_url.scheme == 'ovsdb'\
+                            and node_url.netloc == 'uuid':
+                        # split_res = ['', '<uuid>', '<path>']
+                        split_res = node_url.path.split('/', 2)
+
+                        # uuid is used to identify nodes referring to the same
+                        # element
+                        uuid = split_res[1]
+                        element = elements_by_uuid.get(uuid)
+                        if element is None:
+                            elements_by_uuid[uuid] = element =\
+                                self.new_element(uuid)
+
+                        # inner_path can be [] or [<path>]
+                        inner_path = split_res[2:]
+                        self._update_element_from_json_ovsdb_topology_node(
+                            node, element, uuid, *inner_path)
+
+        # There can be more OVS instances connected beside the same IP address
+        # Cache will yield more instaces for the same key
+        for __, element in six.iteritems(elements_by_uuid):
+            yield element
+
+    def _update_element_from_json_ovsdb_topology_node(
+            self, node, element, uuid, path=None):
+
+        if not path:
+            # global element section (root path)
+
+            # fetch remote IP address
+            element.remote_ip = node["ovsdb:connection-info"]["remote-ip"]
+
+            for vif_type_entry in node.get(
+                    "ovsdb:interface-type-entry", []):
+                # Is this a good place to add others OVS VIF types?
+                if vif_type_entry.get("interface-type") ==\
+                        "ovsdb:interface-type-dpdkvhostuser":
+                    element.support_vhost_user = True
+                    break
+            else:
+                LOG.debug(
+                    'Interface type not found in network topology node %r.',
+                    uuid)
+
+            LOG.debug(
+                'Topology element updated:\n'
+                ' - uuid: %(uuid)r\n'
+                ' - remote_ip: %(remote_ip)r\n'
+                ' - support_vhost_user: %(support_vhost_user)r',
+                {'uuid': uuid,
+                 'remote_ip': element.remote_ip,
+                 'support_vhost_user': element.support_vhost_user})
+        elif path == 'bridge/br-int':
+            datapath_type = node.get("ovsdb:datapath-type")
+            if datapath_type == "ovsdb:datapath-type-netdev":
+                element.has_datapath_type_netdev = True
+                LOG.debug(
+                    'Topology element updated:\n'
+                    ' - uuid: %(uuid)r\n'
+                    ' - has_datapath_type_netdev: %('
+                    'has_datapath_type_netdev)r',
+                    {'uuid': uuid,
+                     'has_datapath_type_netdev':
+                     element.has_datapath_type_netdev})
+
+
+class OvsdbNetworkTopologyElement(network_topology.NetworkTopologyElement):
+
+    uuid = None
+    remote_ip = None  # it can be None or a string
+    has_datapath_type_netdev = False  # it can be False or True
+    support_vhost_user = False  # it can be False or True
+
+    # location for vhostuser sockets
+    vhostuser_socket_dir = '/var/run/openvswitch'
+
+    # prefix for ovs port
+    port_prefix = 'vhu'
+
+    def __init__(self, **kwargs):
+        for name, value in six.iteritems(kwargs):
+            setattr(self, name, value)
+
+    @property
+    def host_addresses(self):
+        # For now it support only the remote IP found in connection info
+        return self.remote_ip,
+
+    @property
+    def valid_vif_types(self):
+        if self.has_datapath_type_netdev and self.support_vhost_user:
+            return [
+                portbindings.VIF_TYPE_VHOST_USER,
+                portbindings.VIF_TYPE_OVS]
+        else:
+            return [portbindings.VIF_TYPE_OVS]
+
+    def bind_port(self, port_context, vif_type, vif_details):
+
+        port_context_id = port_context.current['id']
+        network_context_id = port_context.network.current['id']
+
+        # Bind port to the first valid segment
+        for segment in port_context.segments_to_bind:
+            if self._is_valid_segment(segment):
+                # Guest best VIF type for given host
+                vif_details = self._get_vif_details(
+                    vif_details=vif_details, port_context_id=port_context_id,
+                    vif_type=vif_type)
+                LOG.debug(
+                    'Bind port with valid segment:\n'
+                    '\tport: %(port)r\n'
+                    '\tnetwork: %(network)r\n'
+                    '\tsegment: %(segment)r\n'
+                    '\tVIF type: %(vif_type)r\n'
+                    '\tVIF details: %(vif_details)r',
+                    {'port': port_context_id,
+                     'network': network_context_id,
+                     'segment': segment, 'vif_type': vif_type,
+                     'vif_details': vif_details})
+                port_context.set_binding(
+                    segment[driver_api.ID], vif_type, vif_details,
+                    status=n_const.PORT_STATUS_ACTIVE)
+                return
+
+        raise ValueError(
+            _('Unable to find any valid segment in given context.'))
+
+    def to_dict(self):
+        data = super(OvsdbNetworkTopologyElement, self).to_dict()
+        data.update(
+            {'uuid': self.uuid,
+             'has_datapath_type_netdev': self.has_datapath_type_netdev,
+             'support_vhost_user': self.support_vhost_user,
+             'valid_vif_types': self.valid_vif_types})
+        if portbindings.VIF_TYPE_VHOST_USER in self.valid_vif_types:
+            data.update({'port_prefix': self.port_prefix,
+                         'vhostuser_socket_dir': self.vhostuser_socket_dir})
+        return data
+
+    def _is_valid_segment(self, segment):
+        """Verify a segment is valid for the OpenDaylight MechanismDriver.
+
+        Verify the requested segment is supported by ODL and return True or
+        False to indicate this to callers.
+        """
+
+        network_type = segment[driver_api.NETWORK_TYPE]
+        return network_type in [constants.TYPE_LOCAL, constants.TYPE_GRE,
+                                constants.TYPE_VXLAN, constants.TYPE_VLAN]
+
+    def _get_vif_details(self, vif_details, port_context_id, vif_type):
+        vif_details = dict(vif_details)
+        if vif_type == portbindings.VIF_TYPE_VHOST_USER:
+            socket_path = os.path.join(
+                self.vhostuser_socket_dir,
+                (self.port_prefix + port_context_id)[:14])
+
+            vif_details.update({
+                portbindings.VHOST_USER_MODE:
+                portbindings.VHOST_USER_MODE_CLIENT,
+                portbindings.VHOST_USER_OVS_PLUG: True,
+                portbindings.VHOST_USER_SOCKET: socket_path
+            })
+        return vif_details
+
+    def __setattr__(self, name, value):
+        # raises Attribute error if the class hasn't this attribute
+        getattr(type(self), name)
+        super(OvsdbNetworkTopologyElement, self).__setattr__(name, value)
diff --git a/networking-odl/networking_odl/ml2/port_binding.py b/networking-odl/networking_odl/ml2/port_binding.py
new file mode 100644 (file)
index 0000000..d34dc01
--- /dev/null
@@ -0,0 +1,121 @@
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import abc
+import six
+import stevedore
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import excutils
+
+from networking_odl._i18n import _LI, _LE
+
+
+LOG = log.getLogger(__name__)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class PortBindingController(object):
+
+    @abc.abstractmethod
+    def bind_port(self, port_context):
+        """Attempt to bind a port.
+
+        :param context: PortContext instance describing the port
+
+        This method is called outside any transaction to attempt to
+        establish a port binding using calling mechanism driver. Bindings
+        may be created at each of multiple levels of a hierarchical
+        network, and are established from the top level downward. At
+        each level, the mechanism driver determines whether it can
+        bind to any of the network segments in the
+        context.segments_to_bind property, based on the value of the
+        context.host property, any relevant port or network
+        attributes, and its own knowledge of the network topology. At
+        the top level, context.segments_to_bind contains the static
+        segments of the port's network. At each lower level of
+        binding, it contains static or dynamic segments supplied by
+        the driver that bound at the level above. If the driver is
+        able to complete the binding of the port to any segment in
+        context.segments_to_bind, it must call context.set_binding
+        with the binding details. If it can partially bind the port,
+        it must call context.continue_binding with the network
+        segments to be used to bind at the next lower level.
+        If the binding results are committed after bind_port returns,
+        they will be seen by all mechanism drivers as
+        update_port_precommit and update_port_postcommit calls. But if
+        some other thread or process concurrently binds or updates the
+        port, these binding results will not be committed, and
+        update_port_precommit and update_port_postcommit will not be
+        called on the mechanism drivers with these results. Because
+        binding results can be discarded rather than committed,
+        drivers should avoid making persistent state changes in
+        bind_port, or else must ensure that such state changes are
+        eventually cleaned up.
+        Implementing this method explicitly declares the mechanism
+        driver as having the intention to bind ports. This is inspected
+        by the QoS service to identify the available QoS rules you
+        can use with ports.
+        """
+
+
+class PortBindingManager(PortBindingController):
+    # At this point, there is no requirement to have multiple
+    # port binding controllers at the same time.
+    # Stay with single controller until there is a real requirement
+
+    def __init__(self, name, controller):
+        self.name = name
+        self.controller = controller
+
+    @classmethod
+    def create(
+            cls, namespace='networking_odl.ml2.port_binding_controllers',
+            name=cfg.CONF.ml2_odl.port_binding_controller):
+
+        ext_mgr = stevedore.named.NamedExtensionManager(
+            namespace, [name], invoke_on_load=True)
+
+        assert len(ext_mgr.extensions) == 1, (
+            "Wrong port binding controller is specified")
+
+        extension = ext_mgr.extensions[0]
+        if isinstance(extension.obj, PortBindingController):
+            return cls(extension.name, extension.obj)
+        else:
+            raise ValueError(
+                ("Port binding controller '%(name)s (%(controller)r)' "
+                 "doesn't implement PortBindingController interface."),
+                {'name': extension.name, 'controller': extension.obj})
+
+    def bind_port(self, port_context):
+        controller_details = {'name': self.name, 'controller': self.controller}
+        try:
+            self.controller.bind_port(port_context)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                LOG.exception(
+                    _LE("Controller '%(name)s (%(controller)r)' had an error "
+                        "when binding port."), controller_details)
+        else:
+            if port_context._new_bound_segment:
+                LOG.info(
+                    _LI("Controller '%(name)s (%(controller)r)' has bound "
+                        "port."), controller_details)
+            else:
+                LOG.debug(
+                    "Controller %(name)s (%(controller)r) hasn't bound "
+                    "port.", controller_details)
diff --git a/networking-odl/networking_odl/ml2/pseudo_agentdb_binding.py b/networking-odl/networking_odl/ml2/pseudo_agentdb_binding.py
new file mode 100644 (file)
index 0000000..d24bd55
--- /dev/null
@@ -0,0 +1,263 @@
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import logging
+from neutron_lib import constants as nl_const
+from requests import exceptions
+import six.moves.urllib.parse as urlparse
+from string import Template
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_serialization import jsonutils
+
+from neutron import context
+from neutron.extensions import portbindings
+from neutron import manager
+from neutron.plugins.ml2 import driver_api
+
+from networking_odl._i18n import _LE, _LI, _LW
+from networking_odl.common import client as odl_client
+from networking_odl.journal import maintenance as mt
+from networking_odl.ml2 import port_binding
+
+cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
+LOG = log.getLogger(__name__)
+
+
+class PseudoAgentDBBindingController(port_binding.PortBindingController):
+    """Switch agnostic Port binding controller for OpenDayLight."""
+
+    AGENTDB_BINARY = 'neutron-odlagent-portbinding'
+    L2_TYPE = "ODL L2"
+
+    # TODO(mzmalick): binary, topic and resource_versions to be provided
+    # by ODL, Pending ODL NB patches.
+    agentdb_row = {
+        'binary': AGENTDB_BINARY,
+        'host': '',
+        'topic': nl_const.L2_AGENT_TOPIC,
+        'configurations': {},
+        'resource_versions': '',
+        'agent_type': L2_TYPE,
+        'start_flag': True}
+    # We are not running host agents, so above start_flag is redundant
+
+    def __init__(self, hostconf_uri=None, db_plugin=None):
+        """Initialization."""
+        LOG.debug("Initializing ODL Port Binding Controller")
+
+        if not hostconf_uri:
+            # extract host/port from ODL URL and append hostconf_uri path
+            hostconf_uri = self._make_hostconf_uri(
+                cfg.CONF.ml2_odl.url, cfg.CONF.ml2_odl.odl_hostconf_uri)
+
+        LOG.debug("ODLPORTBINDING hostconfigs URI: %s", hostconf_uri)
+
+        # TODO(mzmalick): disable port-binding for ODL lightweight testing
+        self.odl_rest_client = odl_client.OpenDaylightRestClient.create_client(
+            url=hostconf_uri)
+
+        # Neutron DB plugin instance
+        self.agents_db = db_plugin
+
+        # Start polling ODL restconf using maintenance thread.
+        # default: 30s (should be <=  agent keep-alive poll interval)
+        self._start_maintenance_thread(cfg.CONF.ml2_odl.restconf_poll_interval)
+
+    def _make_hostconf_uri(self, odl_url=None, path=''):
+        """Make ODL hostconfigs URI with host/port extraced from ODL_URL."""
+        # NOTE(yamahata): for unit test.
+        odl_url = odl_url or 'http://localhost:8080/'
+
+        # extract ODL_IP and ODL_PORT from ODL_ENDPOINT and append path
+        # urlsplit and urlunparse don't throw exceptions
+        purl = urlparse.urlsplit(odl_url)
+        return urlparse.urlunparse((purl.scheme, purl.netloc,
+                                    path, '', '', ''))
+    #
+    # TODO(mzmalick):
+    # 1. implement websockets for ODL hostconfig events
+    #
+
+    def _start_maintenance_thread(self, poll_interval):
+        self._mainth = mt.MaintenanceThread()
+        self._mainth.maintenance_interval = poll_interval
+        self._mainth.register_operation(self._get_and_update_hostconfigs)
+        self._mainth.start()
+
+    def _rest_get_hostconfigs(self):
+        try:
+            response = self.odl_rest_client.get()
+            response.raise_for_status()
+            hostconfigs = response.json()['hostconfigs']['hostconfig']
+        except exceptions.ConnectionError:
+            LOG.error(_LE("Cannot connect to the Opendaylight Controller"),
+                      exc_info=True)
+            return None
+        except KeyError:
+            LOG.error(_LE("got invalid hostconfigs"),
+                      exc_info=True)
+            return None
+        except Exception:
+            LOG.warning(_LW("REST/GET odl hostconfig failed, "),
+                        exc_info=True)
+            return None
+        else:
+            if LOG.isEnabledFor(logging.DEBUG):
+                _hconfig_str = jsonutils.dumps(
+                    response, sort_keys=True, indent=4, separators=(',', ': '))
+                LOG.debug("ODLPORTBINDING hostconfigs:\n%s", _hconfig_str)
+
+        return hostconfigs
+
+    def _get_and_update_hostconfigs(self, session=None):
+        LOG.info(_LI("REST/GET hostconfigs from ODL"))
+
+        hostconfigs = self._rest_get_hostconfigs()
+
+        if not hostconfigs:
+            LOG.warning(_LW("ODL hostconfigs REST/GET failed, "
+                            "will retry on next poll"))
+            return  # retry on next poll
+
+        self._update_agents_db(hostconfigs=hostconfigs)
+
+    def _get_neutron_db_plugin(self):
+        if (not self.agents_db) and manager.NeutronManager.has_instance():
+            self.agents_db = manager.NeutronManager.get_plugin()
+        return self.agents_db
+
+    def _update_agents_db(self, hostconfigs):
+        LOG.debug("ODLPORTBINDING Updating agents DB with ODL hostconfigs")
+
+        agents_db = self._get_neutron_db_plugin()
+
+        if not agents_db:  # if ML2 is still initializing
+            LOG.warning(_LW("ML2 still initializing, Will retry agentdb"
+                            " update on next poll"))
+            return  # Retry on next poll
+
+        for host_config in hostconfigs:
+            try:
+                self.agentdb_row['host'] = host_config['host-id']
+                self.agentdb_row['agent_type'] = host_config['host-type']
+                self.agentdb_row['configurations'] = host_config['config']
+
+                agents_db.create_or_update_agent(
+                    context.get_admin_context(), self.agentdb_row)
+            except Exception:
+                LOG.exception(_LE("Unable to update agentdb."))
+                continue  # try next hostcofig
+
+    def _substitute_hconfig_tmpl(self, port_context, hconfig):
+        # TODO(mzmalick): Explore options for inlines string splicing of
+        #                 port-id to 14 bytes as required by vhostuser types
+        subs_ids = {
+            # $IDENTIFER string substitution in hostconfigs JSON string
+            'PORT_ID': port_context.current['id'][:14]
+        }
+
+        # Substitute identifiers and Convert JSON string to dict
+        hconfig_conf_json = Template(hconfig['configurations'])
+        substituted_str = hconfig_conf_json.safe_substitute(subs_ids)
+        hconfig['configurations'] = jsonutils.loads(substituted_str)
+
+        return hconfig
+
+    def bind_port(self, port_context):
+        """bind port using ODL host configuration."""
+        # Get all ODL hostconfigs for this host and type
+        agentdb = port_context.host_agents(self.L2_TYPE)
+
+        if not agentdb:
+            LOG.warning(_LW("No valid hostconfigs in agentsdb for host %s"),
+                        port_context.host)
+            return
+
+        for raw_hconfig in agentdb:
+            # do any $identifier substitution
+            hconfig = self._substitute_hconfig_tmpl(port_context, raw_hconfig)
+
+            # Found ODL hostconfig for this host in agentdb
+            LOG.debug("ODLPORTBINDING bind port with hostconfig: %s", hconfig)
+
+            if self._hconfig_bind_port(port_context, hconfig):
+                break  # Port binding suceeded!
+            else:  # Port binding failed!
+                LOG.warning(_LW("Failed to bind Port %(pid)s for host "
+                                "%(host)s on network %(network)s."), {
+                    'pid': port_context.current['id'],
+                    'host': port_context.host,
+                    'network': port_context.network.current['id']})
+        else:  # No hostconfig found for host in agentdb.
+            LOG.warning(_LW("No ODL hostconfigs for host %s found in agentdb"),
+                        port_context.host)
+
+    def _hconfig_bind_port(self, port_context, hconfig):
+        """bind port after validating odl host configuration."""
+        valid_segment = None
+
+        for segment in port_context.segments_to_bind:
+            if self._is_valid_segment(segment, hconfig['configurations']):
+                valid_segment = segment
+                break
+        else:
+            LOG.debug("No valid segments found!")
+            return False
+
+        confs = hconfig['configurations']['supported_vnic_types']
+
+        # nova provides vnic_type in port_context to neutron.
+        # neutron provides supported vif_type for binding based on vnic_type
+        # in this case ODL hostconfigs has the vif_type to bind for vnic_type
+        vnic_type = port_context.current.get(portbindings.VNIC_TYPE)
+
+        if vnic_type != portbindings.VNIC_NORMAL:
+            LOG.error(_LE("Binding failed: unsupported VNIC %s"), vnic_type)
+            return False
+
+        for conf in confs:
+            if conf["vnic_type"] == vnic_type:
+                vif_type = conf.get('vif_type', portbindings.VIF_TYPE_OVS)
+                LOG.debug("Binding vnic:'%s' to vif:'%s'", vnic_type, vif_type)
+                break
+        else:
+            vif_type = portbindings.VIF_TYPE_OVS  # default: OVS
+            LOG.warning(_LW("No supported vif type found for host %s!, "
+                            "defaulting to OVS"), port_context.host)
+
+        vif_details = conf.get('vif_details', {})
+
+        if not vif_details:  # empty vif_details could be trouble, warn.
+            LOG.warning(_LW("hostconfig:vif_details was empty!"))
+
+        LOG.debug("Bind port %(port)s on network %(network)s with valid "
+                  "segment %(segment)s and VIF type %(vif_type)r "
+                  "VIF details %(vif_details)r.",
+                  {'port': port_context.current['id'],
+                   'network': port_context.network.current['id'],
+                   'segment': valid_segment, 'vif_type': vif_type,
+                   'vif_details': vif_details})
+
+        port_context.set_binding(valid_segment[driver_api.ID], vif_type,
+                                 vif_details,
+                                 status=nl_const.PORT_STATUS_ACTIVE)
+        return True
+
+    def _is_valid_segment(self, segment, conf):
+        """Verify a segment is supported by ODL."""
+        network_type = segment[driver_api.NETWORK_TYPE]
+        return network_type in conf['allowed_network_types']
diff --git a/networking-odl/networking_odl/tests/__init__.py b/networking-odl/networking_odl/tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/networking_odl/tests/base.py b/networking-odl/networking_odl/tests/base.py
new file mode 100644 (file)
index 0000000..d28be71
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright (c) 2015-2016 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+import mock
+
+from neutron.tests import base
+
+
+class DietTestCase(base.DietTestCase):
+
+    def patch(self, target, name, *args, **kwargs):
+        context = mock.patch.object(target, name, *args, **kwargs)
+        patch = context.start()
+        self.addCleanup(context.stop)
+        return patch
diff --git a/networking-odl/networking_odl/tests/unit/__init__.py b/networking-odl/networking_odl/tests/unit/__init__.py
new file mode 100644 (file)
index 0000000..faed26a
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_config import cfg
+
+
+cfg.CONF.use_stderr = False
diff --git a/networking-odl/networking_odl/tests/unit/common/__init__.py b/networking-odl/networking_odl/tests/unit/common/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/networking_odl/tests/unit/common/test_cache.py b/networking-odl/networking_odl/tests/unit/common/test_cache.py
new file mode 100644 (file)
index 0000000..b702455
--- /dev/null
@@ -0,0 +1,242 @@
+# Copyright (c) 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import mock
+
+from neutron.tests import base
+
+from networking_odl.common import cache
+
+
+class TestCache(base.DietTestCase):
+
+    def test_init_with_callable(self):
+
+        def given_fetch_method():
+            pass
+
+        cache.Cache(given_fetch_method)
+
+    def test_init_without_callable(self):
+        self.assertRaises(TypeError, lambda: cache.Cache(object()))
+
+    def test_fecth_once(self):
+        value = 'value'
+
+        given_fetch_method = mock.Mock(return_value=iter([('key', value)]))
+        given_cache = cache.Cache(given_fetch_method)
+
+        # When value with key is fetched
+        result = given_cache.fetch('key', 60.0)
+
+        # Result is returned
+        self.assertIs(value, result)
+
+        # Then fetch method is called once
+        given_fetch_method.assert_called_once_with(('key',))
+
+    def test_fecth_with_no_result(self):
+        given_fetch_method = mock.Mock(return_value=iter([]))
+        given_cache = cache.Cache(given_fetch_method)
+
+        # When value with key is fetched
+        try:
+            given_cache.fetch('key', 60.0)
+        except cache.CacheFetchError as error:
+            given_fetch_method.assert_called_once_with(('key',))
+            self.assertRaises(KeyError, error.reraise_cause)
+        else:
+            self.fail('Expecting CacheFetchError to be raised.')
+
+    @mock.patch.object(cache, 'LOG')
+    def test_fecth_with_failure(self, logger):
+        # pylint: disable=unused-argument
+
+        given_error = RuntimeError("It doesn't work like this!")
+
+        def failing_function(keys):
+            raise given_error
+
+        given_fetch_method = mock.Mock(side_effect=failing_function)
+        given_cache = cache.Cache(given_fetch_method)
+
+        # When value with key is fetched
+        try:
+            given_cache.fetch('key', 60.0)
+        except cache.CacheFetchError as error:
+            given_fetch_method.assert_called_once_with(('key',))
+            self.assertRaises(RuntimeError, error.reraise_cause)
+        else:
+            self.fail('Expecting CacheFetchError to be raised.')
+        logger.warning.assert_called_once_with(
+            'Error fetching values for keys: %r', "'key'",
+            exc_info=(type(given_error), given_error, mock.ANY))
+
+    def test_fecth_again_after_clear(self):
+        value1 = 'value1'
+        value2 = 'value2'
+        given_fetch_method = mock.Mock(
+            side_effect=[iter([('key', value1)]),
+                         iter([('key', value2)])])
+        given_cache = cache.Cache(given_fetch_method)
+
+        # When value with key is fetched
+        result1 = given_cache.fetch('key', 60.0)
+
+        # When cache is cleared
+        given_cache.clear()
+
+        # When value with same key is fetched again
+        result2 = given_cache.fetch('key', 0.0)
+
+        # Then first result is returned
+        self.assertIs(value1, result1)
+
+        # Then fetch method is called twice
+        self.assertEqual(
+            [mock.call(('key',)), mock.call(('key',))],
+            given_fetch_method.mock_calls)
+
+        # Then second result is returned
+        self.assertIs(value2, result2)
+
+    def test_fecth_again_before_timeout(self):
+        value1 = 'value1'
+        value2 = 'value2'
+        given_fetch_method = mock.Mock(
+            side_effect=[iter([('key', value1)]),
+                         iter([('key', value2)])])
+        given_cache = cache.Cache(given_fetch_method)
+
+        # When value with key is fetched
+        result1 = given_cache.fetch('key', 1.0)
+
+        # When value with same key is fetched again and cached entry is not
+        # expired
+        result2 = given_cache.fetch('key', 0.0)
+
+        # First result is returned
+        self.assertIs(value1, result1)
+
+        # Then fetch method is called once
+        given_fetch_method.assert_called_once_with(('key',))
+
+        # Then first result is returned twice
+        self.assertIs(value1, result2)
+
+    def test_fecth_again_after_timeout(self):
+        value1 = 'value1'
+        value2 = 'value2'
+        given_fetch_method = mock.Mock(
+            side_effect=[iter([('key', value1)]),
+                         iter([('key', value2)])])
+        given_cache = cache.Cache(given_fetch_method)
+
+        # When value with key is fetched
+        result1 = given_cache.fetch('key', 0.0)
+
+        # When value with same key is fetched again and cached entry is
+        # expired
+        result2 = given_cache.fetch('key', 0.0)
+
+        # Then first result is returned
+        self.assertIs(value1, result1)
+
+        # Then fetch method is called twice
+        self.assertEqual(
+            [mock.call(('key',)), mock.call(('key',))],
+            given_fetch_method.mock_calls)
+
+        # Then second result is returned
+        self.assertIs(value2, result2)
+
+    def test_fecth_two_values_yielding_both_before_timeout(self):
+        value1 = 'value1'
+        value2 = 'value2'
+        given_fetch_method = mock.Mock(
+            return_value=iter([('key1', value1),
+                               ('key2', value2)]))
+        given_cache = cache.Cache(given_fetch_method)
+
+        # When value with key is fetched
+        result1 = given_cache.fetch('key1', 60.0)
+
+        # When value with another key is fetched and cached entry is not
+        # expired
+        result2 = given_cache.fetch('key2', 60.0)
+
+        # Then first result is returned
+        self.assertIs(value1, result1)
+
+        # Then fetch method is called once
+        given_fetch_method.assert_called_once_with(('key1',))
+
+        # Then second result is returned
+        self.assertIs(value2, result2)
+
+    def test_fecth_two_values_yielding_both_after_timeout(self):
+        value1 = 'value1'
+        value2 = 'value2'
+        given_fetch_method = mock.Mock(
+            return_value=[('key1', value1), ('key2', value2)])
+        given_cache = cache.Cache(given_fetch_method)
+
+        # When value with key is fetched
+        result1 = given_cache.fetch('key1', 0.0)
+
+        # When value with another key is fetched and cached entry is
+        # expired
+        result2 = given_cache.fetch('key2', 0.0)
+
+        # Then first result is returned
+        self.assertIs(value1, result1)
+
+        # Then fetch method is called twice
+        self.assertEqual(
+            [mock.call(('key1',)), mock.call(('key2',))],
+            given_fetch_method.mock_calls)
+
+        # Then second result is returned
+        self.assertIs(value2, result2)
+
+    def test_fecth_all_with_multiple_entries(self):
+        given_fetch_method = mock.Mock(
+            return_value=iter([('key', 'value1'),
+                               ('key', 'value2')]))
+        given_cache = cache.Cache(given_fetch_method)
+
+        # When value with key is fetched
+        results = list(given_cache.fetch_all(['key'], 0.0))
+
+        # Then fetch method is once
+        given_fetch_method.assert_called_once_with(('key',))
+
+        # Then both results are yield in the right order
+        self.assertEqual([('key', 'value1'), ('key', 'value2')], results)
+
+    def test_fecth_all_with_repeated_entries(self):
+        entry = ('key', 'value')
+        given_fetch_method = mock.Mock(
+            return_value=iter([entry, entry, entry]))
+        given_cache = cache.Cache(given_fetch_method)
+
+        # When value with key is fetched
+        results = list(given_cache.fetch_all(['key'], 0.0))
+
+        # Then fetch method is once
+        given_fetch_method.assert_called_once_with(('key',))
+
+        # Then results are yield in the right order
+        self.assertEqual([entry, entry, entry], results)
diff --git a/networking-odl/networking_odl/tests/unit/common/test_callback.py b/networking-odl/networking_odl/tests/unit/common/test_callback.py
new file mode 100644 (file)
index 0000000..f5e2ee6
--- /dev/null
@@ -0,0 +1,83 @@
+# Copyright (c) 2013-2014 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from networking_odl.common import callback
+from networking_odl.common import constants as odl_const
+from networking_odl.ml2.mech_driver import OpenDaylightDriver
+
+import mock
+import testtools
+
+from neutron.callbacks import events
+from neutron.callbacks import resources
+
+
+FAKE_ID = 'fakeid'
+
+
+class ODLCallbackTestCase(testtools.TestCase):
+    odl_driver = OpenDaylightDriver()
+    sgh = callback.OdlSecurityGroupsHandler(odl_driver)
+
+    def setUp(self):
+        super(ODLCallbackTestCase, self).setUp()
+
+    @mock.patch.object(OpenDaylightDriver, 'sync_from_callback')
+    def _test_callback_for_sg(self, event, op, sg, sg_id, sfc):
+        self.sgh.sg_callback(resources.SECURITY_GROUP,
+                             event,
+                             None,
+                             security_group=sg,
+                             security_group_id=sg_id)
+
+        expected_dict = ({resources.SECURITY_GROUP: sg}
+                         if sg is not None else None)
+        sfc.assert_called_with(
+            op, callback._RESOURCE_MAPPING[resources.SECURITY_GROUP], sg_id,
+            expected_dict)
+
+    def test_callback_sg_create(self):
+        self._test_callback_for_sg(events.AFTER_CREATE, odl_const.ODL_CREATE,
+                                   mock.Mock(), None)
+
+    def test_callback_sg_update(self):
+        self._test_callback_for_sg(events.AFTER_UPDATE, odl_const.ODL_UPDATE,
+                                   mock.Mock(), FAKE_ID)
+
+    def test_callback_sg_delete(self):
+        self._test_callback_for_sg(events.AFTER_DELETE, odl_const.ODL_DELETE,
+                                   None, FAKE_ID)
+
+    @mock.patch.object(OpenDaylightDriver, 'sync_from_callback')
+    def _test_callback_for_sg_rules(self, event, op, sg_rule, sg_rule_id, sfc):
+        self.sgh.sg_callback(resources.SECURITY_GROUP_RULE,
+                             event,
+                             None,
+                             security_group_rule=sg_rule,
+                             security_group_rule_id=sg_rule_id)
+
+        expected_dict = ({resources.SECURITY_GROUP_RULE: sg_rule}
+                         if sg_rule is not None else None)
+        sfc.assert_called_with(
+            op, callback._RESOURCE_MAPPING[resources.SECURITY_GROUP_RULE],
+            sg_rule_id, expected_dict)
+
+    def test_callback_sg_rules_create(self):
+        self._test_callback_for_sg_rules(
+            events.AFTER_CREATE, odl_const.ODL_CREATE, mock.Mock(), None)
+
+    def test_callback_sg_rules_delete(self):
+        self._test_callback_for_sg_rules(
+            events.AFTER_DELETE, odl_const.ODL_DELETE, None, FAKE_ID)
diff --git a/networking-odl/networking_odl/tests/unit/common/test_lightweight_testing.py b/networking-odl/networking_odl/tests/unit/common/test_lightweight_testing.py
new file mode 100644 (file)
index 0000000..ea3b5a8
--- /dev/null
@@ -0,0 +1,174 @@
+# Copyright (c) 2015 Intel Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import mock
+
+from networking_odl.common import lightweight_testing as lwt
+
+from neutron.tests import base
+
+
+class LightweightTestingTestCase(base.DietTestCase):
+
+    def test_create_client_with_lwt_enabled(self):
+        """Have to do the importation here, otherwise there will be a loop"""
+        from networking_odl.common import client as odl_client
+        odl_client.cfg.CONF.set_override('enable_lightweight_testing',
+                                         True, 'ml2_odl')
+        # DietTestCase does not automatically cleans configuration overrides
+        self.addCleanup(odl_client.cfg.CONF.reset)
+
+        client = odl_client.OpenDaylightRestClient.create_client()
+        self.assertIsInstance(client, lwt.OpenDaylightLwtClient)
+
+    def test_create_client_with_lwt_disabled(self):
+        """Have to do the importation here, otherwise there will be a loop"""
+        from networking_odl.common import client as odl_client
+        odl_client.cfg.CONF.set_override('enable_lightweight_testing',
+                                         False, 'ml2_odl')
+        # DietTestCase does not automatically cleans configuration overrides
+        self.addCleanup(odl_client.cfg.CONF.reset)
+
+        client = odl_client.OpenDaylightRestClient.create_client()
+        self.assertIsInstance(client, odl_client.OpenDaylightRestClient)
+
+    @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
+                     {'networks': {}}, clear=True)
+    def test_post_single_resource(self):
+        client = lwt.OpenDaylightLwtClient.create_client()
+        fake_network1 = {'id': 'fakeid1', 'name': 'fake_network1'}
+        obj = {'networks': fake_network1}
+        response = client.sendjson('post', 'networks', obj)
+        self.assertEqual(lwt.NO_CONTENT, response.status_code)
+        lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict
+        self.assertEqual(lwt_dict['networks']['fakeid1'],
+                         fake_network1)
+
+    @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
+                     {'networks': {}}, clear=True)
+    def test_post_multiple_resources(self):
+        client = lwt.OpenDaylightLwtClient.create_client()
+        fake_network1 = {'id': 'fakeid1', 'name': 'fake_network1'}
+        fake_network2 = {'id': 'fakeid2', 'name': 'fake_network2'}
+        obj = {'networks': [fake_network1, fake_network2]}
+        response = client.sendjson('post', 'networks', obj)
+        self.assertEqual(lwt.NO_CONTENT, response.status_code)
+        lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict
+        self.assertEqual(lwt_dict['networks']['fakeid1'],
+                         fake_network1)
+        self.assertEqual(lwt_dict['networks']['fakeid2'],
+                         fake_network2)
+
+    @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
+                     {'ports': {'fakeid1': {'id': 'fakeid1',
+                                            'name': 'fake_port1'}}},
+                     clear=True)
+    def test_get_single_resource(self):
+        client = lwt.OpenDaylightLwtClient.create_client()
+        url_path = 'ports/fakeid1'
+        response = client.sendjson('get', url_path, None)
+        self.assertEqual(lwt.OK, response.status_code)
+        res = response.json()
+        # For single resource, the return value is a dict
+        self.assertEqual(res['port']['name'], 'fake_port1')
+
+    @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
+                     {'ports': {'fakeid1': {'id': 'fakeid1',
+                                            'name': 'fake_port1'},
+                                'fakeid2': {'id': 'fakeid2',
+                                            'name': 'fake_port2'}}},
+                     clear=True)
+    def test_get_multiple_resources(self):
+        client = lwt.OpenDaylightLwtClient.create_client()
+        url_path = 'ports/'
+        response = client.sendjson('get', url_path, None)
+        self.assertEqual(lwt.OK, response.status_code)
+        res = response.json()
+        for port in res:
+            self.assertIn(port['port']['name'],
+                          ['fake_port1', 'fake_port2'])
+
+    @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
+                     {'subnets': {'fakeid1': {'id': 'fakeid1',
+                                              'name': 'fake_subnet1'}}},
+                     clear=True)
+    def test_put_single_resource(self):
+        client = lwt.OpenDaylightLwtClient.create_client()
+        changed = {'id': 'fakeid1', 'name': 'fake_subnet1_changed'}
+        obj = {'subnets': changed}
+
+        url_path = 'subnets/fakeid1'
+        response = client.sendjson('put', url_path, obj)
+        self.assertEqual(lwt.NO_CONTENT, response.status_code)
+        lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict
+        self.assertEqual('fake_subnet1_changed',
+                         lwt_dict['subnets']['fakeid1']['name'])
+
+        """Check the client does not change the parameter"""
+        self.assertEqual('fakeid1', changed['id'])
+        self.assertEqual('fake_subnet1_changed', changed['name'])
+
+    @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
+                     {'subnets': {'fakeid1': {'id': 'fakeid1',
+                                              'name': 'fake_subnet1'},
+                                  'fakeid2': {'id': 'fakeid2',
+                                              'name': 'fake_subnet2'}}},
+                     clear=True)
+    def test_put_multiple_resources(self):
+        client = lwt.OpenDaylightLwtClient.create_client()
+        changed1 = {'id': 'fakeid1', 'name': 'fake_subnet1_changed'}
+        changed2 = {'id': 'fakeid2', 'name': 'fake_subnet2_changed'}
+        obj = {'subnets': [changed1, changed2]}
+
+        url_path = 'subnets/'
+        response = client.sendjson('put', url_path, obj)
+        self.assertEqual(lwt.NO_CONTENT, response.status_code)
+        lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict
+        self.assertEqual('fake_subnet1_changed',
+                         lwt_dict['subnets']['fakeid1']['name'])
+        self.assertEqual('fake_subnet2_changed',
+                         lwt_dict['subnets']['fakeid2']['name'])
+
+    @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
+                     {'networks': {'fakeid1': {'id': 'fakeid1',
+                                               'name': 'fake_network1'}}},
+                     clear=True)
+    def test_delete_single_resource(self):
+        client = lwt.OpenDaylightLwtClient.create_client()
+        url_path = 'networks/fakeid1'
+        response = client.sendjson('delete', url_path, None)
+        self.assertEqual(lwt.NO_CONTENT, response.status_code)
+        lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict
+        network = lwt_dict['networks'].get('fakeid1')
+        self.assertIsNone(network)
+
+    @mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
+                     {'networks': {'fakeid1': {'id': 'fakeid1',
+                                               'name': 'fake_network1'},
+                                   'fakeid2': {'id': 'fakeid2',
+                                               'name': 'fake_network2'}}},
+                     clear=True)
+    def test_delete_multiple_resources(self):
+        client = lwt.OpenDaylightLwtClient.create_client()
+        network1 = {'id': 'fakeid1'}
+        network2 = {'id': 'fakeid2'}
+        obj = {'networks': [network1, network2]}
+        response = client.sendjson('delete', 'networks/', obj)
+        self.assertEqual(lwt.NO_CONTENT, response.status_code)
+        lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict
+        network = lwt_dict['networks'].get('fakeid1')
+        self.assertIsNone(network)
+        network = lwt_dict['networks'].get('fakeid2')
+        self.assertIsNone(network)
diff --git a/networking-odl/networking_odl/tests/unit/common/test_utils.py b/networking-odl/networking_odl/tests/unit/common/test_utils.py
new file mode 100644 (file)
index 0000000..dcfb50e
--- /dev/null
@@ -0,0 +1,156 @@
+# Copyright (c) 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import mock
+
+from neutron.tests import base
+
+from networking_odl.common import cache
+from networking_odl.common import utils
+
+
+class TestGetAddressesByName(base.DietTestCase):
+
+    # pylint: disable=protected-access, unused-argument
+
+    def setUp(self):
+        super(TestGetAddressesByName, self).setUp()
+        self.clear_cache()
+        self.addCleanup(self.clear_cache)
+        time = self.patch(
+            utils.cache, 'time', clock=mock.Mock(return_value=0.0))
+        self.clock = time.clock
+        socket = self.patch(utils, 'socket')
+        self.getaddrinfo = socket.getaddrinfo
+
+    def patch(self, target, name, *args, **kwargs):
+        context = mock.patch.object(target, name, *args, **kwargs)
+        mocked = context.start()
+        self.addCleanup(context.stop)
+        return mocked
+
+    def clear_cache(self):
+        utils._addresses_by_name_cache.clear()
+
+    def test_get_addresses_by_valid_name(self):
+        self.getaddrinfo.return_value = [
+            (2, 1, 6, '', ('127.0.0.1', 0)),
+            (2, 2, 17, '', ('127.0.0.1', 0)),
+            (2, 3, 0, '', ('127.0.0.1', 0)),
+            (2, 1, 6, '', ('10.237.214.247', 0)),
+            (2, 2, 17, '', ('10.237.214.247', 0)),
+            (2, 3, 0, '', ('10.237.214.247', 0))]
+
+        # When valid host name is requested
+        result = utils.get_addresses_by_name('some_host_name')
+
+        # Then correct addresses are returned
+        self.assertEqual(('127.0.0.1', '10.237.214.247'), result)
+
+        # Then fetched addresses are cached
+        self.assertEqual(result, utils.get_addresses_by_name('some_host_name'))
+
+        # Then addresses are fetched only once
+        self.getaddrinfo.assert_called_once_with('some_host_name', None)
+
+    def test_get_addresses_by_valid_name_when_cache_expires(self):
+        self.getaddrinfo.return_value = [
+            (2, 1, 6, '', ('127.0.0.1', 0)),
+            (2, 2, 17, '', ('127.0.0.1', 0)),
+            (2, 3, 0, '', ('127.0.0.1', 0)),
+            (2, 1, 6, '', ('10.237.214.247', 0)),
+            (2, 2, 17, '', ('10.237.214.247', 0)),
+            (2, 3, 0, '', ('10.237.214.247', 0))]
+
+        # When valid host name is requested
+        result1 = utils.get_addresses_by_name('some_host_name')
+
+        # and after a long time
+        self.clock.return_value = 1.0e6
+
+        # When valid host name is requested
+        result2 = utils.get_addresses_by_name('some_host_name')
+
+        # Then correct addresses are returned
+        self.assertEqual(('127.0.0.1', '10.237.214.247'), result1)
+        self.assertEqual(('127.0.0.1', '10.237.214.247'), result2)
+
+        # Then addresses are fetched twice
+        self.getaddrinfo.assert_has_calls(
+            [mock.call('some_host_name', None),
+             mock.call('some_host_name', None)])
+
+    @mock.patch.object(cache, 'LOG')
+    def test_get_addresses_by_invalid_name(self, cache_logger):
+
+        # Given addresses resolution is failing
+        given_error = RuntimeError("I don't know him!")
+
+        def failing_getaddrinfo(name, service):
+            raise given_error
+
+        self.getaddrinfo.side_effect = failing_getaddrinfo
+
+        # When invalid name is requested
+        self.assertRaises(
+            RuntimeError, utils.get_addresses_by_name, 'some_host_name')
+
+        # When invalid name is requested again
+        self.assertRaises(
+            RuntimeError, utils.get_addresses_by_name, 'some_host_name')
+
+        # Then result is fetched only once
+        self.getaddrinfo.assert_has_calls(
+            [mock.call('some_host_name', None)])
+        cache_logger.warning.assert_has_calls(
+            [mock.call(
+                'Error fetching values for keys: %r', "'some_host_name'",
+                exc_info=(RuntimeError, given_error, mock.ANY)),
+             mock.call(
+                'Error fetching values for keys: %r', "'some_host_name'",
+                exc_info=(RuntimeError, given_error, mock.ANY))])
+
+    @mock.patch.object(cache, 'LOG')
+    def test_get_addresses_failing_when_expired_in_cache(self, cache_logger):
+        self.getaddrinfo.return_value = [
+            (2, 1, 6, '', ('127.0.0.1', 0)),
+            (2, 2, 17, '', ('127.0.0.1', 0)),
+            (2, 3, 0, '', ('127.0.0.1', 0)),
+            (2, 1, 6, '', ('10.237.214.247', 0)),
+            (2, 2, 17, '', ('10.237.214.247', 0)),
+            (2, 3, 0, '', ('10.237.214.247', 0))]
+
+        # Given valid result is in chache but expired
+        utils.get_addresses_by_name('some_host_name')
+        self.clock.return_value = 1.0e6
+
+        # Given addresses resolution is now failing
+        given_error = RuntimeError("This is top secret.")
+
+        def failing_getaddrinfo(name, service):
+            raise given_error
+
+        self.getaddrinfo.side_effect = failing_getaddrinfo
+
+        self.assertRaises(
+            RuntimeError, utils.get_addresses_by_name, 'some_host_name')
+
+        # Then result is fetched more times
+        self.getaddrinfo.assert_has_calls(
+            [mock.call('some_host_name', None),
+             mock.call('some_host_name', None)])
+        cache_logger.warning.assert_called_once_with(
+            'Error fetching values for keys: %r', "'some_host_name'",
+            exc_info=(RuntimeError, given_error, mock.ANY))
diff --git a/networking-odl/networking_odl/tests/unit/db/__init__.py b/networking-odl/networking_odl/tests/unit/db/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/networking_odl/tests/unit/db/test_db.py b/networking-odl/networking_odl/tests/unit/db/test_db.py
new file mode 100644 (file)
index 0000000..72749ad
--- /dev/null
@@ -0,0 +1,243 @@
+#
+# Copyright (C) 2016 Red Hat, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License. You may obtain
+#  a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#  License for the specific language governing permissions and limitations
+#  under the License.
+#
+
+import mock
+
+from datetime import datetime
+from datetime import timedelta
+
+from networking_odl.common import constants as odl_const
+from networking_odl.db import db
+from networking_odl.db import models
+
+from neutron.db import api as neutron_db_api
+from neutron.tests.unit.testlib_api import SqlTestCaseLight
+from oslo_db.exception import DBDeadlock
+from unittest2.case import TestCase
+
+
+class DbTestCase(SqlTestCaseLight, TestCase):
+
+    UPDATE_ROW = [odl_const.ODL_NETWORK, 'id', odl_const.ODL_UPDATE,
+                  {'test': 'data'}]
+
+    def setUp(self):
+        super(DbTestCase, self).setUp()
+        self.db_session = neutron_db_api.get_session()
+        self.addCleanup(self._db_cleanup)
+
+    def _db_cleanup(self):
+        self.db_session.query(models.OpendaylightJournal).delete()
+        self.db_session.query(models.OpendaylightMaintenance).delete()
+
+    def _update_row(self, row):
+        self.db_session.merge(row)
+        self.db_session.flush()
+
+    def _test_validate_updates(self, rows, time_deltas, expected_validations):
+        for row in rows:
+            db.create_pending_row(self.db_session, *row)
+
+        # update row created_at
+        rows = db.get_all_db_rows(self.db_session)
+        now = datetime.now()
+        for row, time_delta in zip(rows, time_deltas):
+            row.created_at = now - timedelta(hours=time_delta)
+            self._update_row(row)
+
+        # validate if there are older rows
+        for row, expected_valid in zip(rows, expected_validations):
+            valid = not db.check_for_older_ops(self.db_session, row)
+            self.assertEqual(expected_valid, valid)
+
+    def _test_retry_count(self, retry_num, max_retry,
+                          expected_retry_count, expected_state):
+        # add new pending row
+        db.create_pending_row(self.db_session, *self.UPDATE_ROW)
+
+        # update the row with the requested retry_num
+        row = db.get_all_db_rows(self.db_session)[0]
+        row.retry_count = retry_num - 1
+        db.update_pending_db_row_retry(self.db_session, row, max_retry)
+
+        # validate the state and the retry_count of the row
+        row = db.get_all_db_rows(self.db_session)[0]
+        self.assertEqual(expected_state, row.state)
+        self.assertEqual(expected_retry_count, row.retry_count)
+
+    def _test_update_row_state(self, from_state, to_state):
+        # add new pending row
+        db.create_pending_row(self.db_session, *self.UPDATE_ROW)
+
+        row = db.get_all_db_rows(self.db_session)[0]
+        for state in [from_state, to_state]:
+            # update the row state
+            db.update_db_row_state(self.db_session, row, state)
+
+            # validate the new state
+            row = db.get_all_db_rows(self.db_session)[0]
+            self.assertEqual(state, row.state)
+
+    def test_validate_updates_same_object_uuid(self):
+        self._test_validate_updates(
+            [self.UPDATE_ROW, self.UPDATE_ROW], [1, 0], [True, False])
+
+    def test_validate_updates_same_created_time(self):
+        self._test_validate_updates(
+            [self.UPDATE_ROW, self.UPDATE_ROW], [0, 0], [True, True])
+
+    def test_validate_updates_different_object_uuid(self):
+        other_row = list(self.UPDATE_ROW)
+        other_row[1] += 'a'
+        self._test_validate_updates(
+            [self.UPDATE_ROW, other_row], [1, 0], [True, True])
+
+    def test_validate_updates_different_object_type(self):
+        other_row = list(self.UPDATE_ROW)
+        other_row[0] = odl_const.ODL_PORT
+        other_row[1] += 'a'
+        self._test_validate_updates(
+            [self.UPDATE_ROW, other_row], [1, 0], [True, True])
+
+    def test_get_oldest_pending_row_none_when_no_rows(self):
+        row = db.get_oldest_pending_db_row_with_lock(self.db_session)
+        self.assertIsNone(row)
+
+    def _test_get_oldest_pending_row_none(self, state):
+        db.create_pending_row(self.db_session, *self.UPDATE_ROW)
+        row = db.get_all_db_rows(self.db_session)[0]
+        row.state = state
+        self._update_row(row)
+
+        row = db.get_oldest_pending_db_row_with_lock(self.db_session)
+        self.assertIsNone(row)
+
+    def test_get_oldest_pending_row_none_when_row_processing(self):
+        self._test_get_oldest_pending_row_none(odl_const.PROCESSING)
+
+    def test_get_oldest_pending_row_none_when_row_failed(self):
+        self._test_get_oldest_pending_row_none(odl_const.FAILED)
+
+    def test_get_oldest_pending_row_none_when_row_completed(self):
+        self._test_get_oldest_pending_row_none(odl_const.COMPLETED)
+
+    def test_get_oldest_pending_row(self):
+        db.create_pending_row(self.db_session, *self.UPDATE_ROW)
+        row = db.get_oldest_pending_db_row_with_lock(self.db_session)
+        self.assertIsNotNone(row)
+        self.assertEqual(odl_const.PROCESSING, row.state)
+
+    def test_get_oldest_pending_row_order(self):
+        db.create_pending_row(self.db_session, *self.UPDATE_ROW)
+        older_row = db.get_all_db_rows(self.db_session)[0]
+        older_row.last_retried -= timedelta(minutes=1)
+        self._update_row(older_row)
+
+        db.create_pending_row(self.db_session, *self.UPDATE_ROW)
+        row = db.get_oldest_pending_db_row_with_lock(self.db_session)
+        self.assertEqual(older_row, row)
+
+    def test_get_oldest_pending_row_when_deadlock(self):
+        db.create_pending_row(self.db_session, *self.UPDATE_ROW)
+        update_mock = mock.MagicMock(side_effect=(DBDeadlock, mock.DEFAULT))
+
+        # Mocking is mandatory to achieve a deadlock regardless of the DB
+        # backend being used when running the tests
+        with mock.patch.object(db, 'update_db_row_state', new=update_mock):
+            row = db.get_oldest_pending_db_row_with_lock(self.db_session)
+            self.assertIsNotNone(row)
+
+        self.assertEqual(2, update_mock.call_count)
+
+    def _test_delete_rows_by_state_and_time(self, last_retried, row_retention,
+                                            state, expected_rows):
+        db.create_pending_row(self.db_session, *self.UPDATE_ROW)
+
+        # update state and last retried
+        row = db.get_all_db_rows(self.db_session)[0]
+        row.state = state
+        row.last_retried = row.last_retried - timedelta(seconds=last_retried)
+        self._update_row(row)
+
+        db.delete_rows_by_state_and_time(self.db_session,
+                                         odl_const.COMPLETED,
+                                         timedelta(seconds=row_retention))
+
+        # validate the number of rows in the journal
+        rows = db.get_all_db_rows(self.db_session)
+        self.assertEqual(expected_rows, len(rows))
+
+    def test_delete_completed_rows_no_new_rows(self):
+        self._test_delete_rows_by_state_and_time(0, 10, odl_const.COMPLETED, 1)
+
+    def test_delete_completed_rows_one_new_row(self):
+        self._test_delete_rows_by_state_and_time(6, 5, odl_const.COMPLETED, 0)
+
+    def test_delete_completed_rows_wrong_state(self):
+        self._test_delete_rows_by_state_and_time(10, 8, odl_const.PENDING, 1)
+
+    def test_valid_retry_count(self):
+        self._test_retry_count(1, 1, 1, odl_const.PENDING)
+
+    def test_invalid_retry_count(self):
+        self._test_retry_count(2, 1, 1, odl_const.FAILED)
+
+    def test_update_row_state_to_pending(self):
+        self._test_update_row_state(odl_const.PROCESSING, odl_const.PENDING)
+
+    def test_update_row_state_to_processing(self):
+        self._test_update_row_state(odl_const.PENDING, odl_const.PROCESSING)
+
+    def test_update_row_state_to_failed(self):
+        self._test_update_row_state(odl_const.PROCESSING, odl_const.FAILED)
+
+    def test_update_row_state_to_completed(self):
+        self._test_update_row_state(odl_const.PROCESSING, odl_const.COMPLETED)
+
+    def _test_maintenance_lock_unlock(self, db_func, existing_state,
+                                      expected_state, expected_result):
+        row = models.OpendaylightMaintenance(id='test',
+                                             state=existing_state)
+        self.db_session.add(row)
+        self.db_session.flush()
+
+        self.assertEqual(expected_result, db_func(self.db_session))
+        row = self.db_session.query(models.OpendaylightMaintenance).one()
+        self.assertEqual(expected_state, row['state'])
+
+    def test_lock_maintenance(self):
+        self._test_maintenance_lock_unlock(db.lock_maintenance,
+                                           odl_const.PENDING,
+                                           odl_const.PROCESSING,
+                                           True)
+
+    def test_lock_maintenance_fails_when_processing(self):
+        self._test_maintenance_lock_unlock(db.lock_maintenance,
+                                           odl_const.PROCESSING,
+                                           odl_const.PROCESSING,
+                                           False)
+
+    def test_unlock_maintenance(self):
+        self._test_maintenance_lock_unlock(db.unlock_maintenance,
+                                           odl_const.PROCESSING,
+                                           odl_const.PENDING,
+                                           True)
+
+    def test_unlock_maintenance_fails_when_pending(self):
+        self._test_maintenance_lock_unlock(db.unlock_maintenance,
+                                           odl_const.PENDING,
+                                           odl_const.PENDING,
+                                           False)
diff --git a/networking-odl/networking_odl/tests/unit/fwaas/__init__.py b/networking-odl/networking_odl/tests/unit/fwaas/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/networking_odl/tests/unit/fwaas/test_fwaas_odl.py b/networking-odl/networking_odl/tests/unit/fwaas/test_fwaas_odl.py
new file mode 100644 (file)
index 0000000..b50016c
--- /dev/null
@@ -0,0 +1,29 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+test_fwaas_odl
+----------------------------------
+
+Tests for the L3 FWaaS plugin for networking-odl.
+"""
+
+from networking_odl.fwaas import driver as fwaas_odl
+
+from neutron.tests import base
+
+
+class TestODL_FWaaS(base.BaseTestCase):
+
+    def test_init(self):
+        # just create an instance of OpenDaylightFwaasDriver
+        fwaas_odl.OpenDaylightFwaasDriver()
diff --git a/networking-odl/networking_odl/tests/unit/journal/__init__.py b/networking-odl/networking_odl/tests/unit/journal/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/networking_odl/tests/unit/journal/test_dependency_validations.py b/networking-odl/networking_odl/tests/unit/journal/test_dependency_validations.py
new file mode 100644 (file)
index 0000000..39a4b98
--- /dev/null
@@ -0,0 +1,44 @@
+#
+# Copyright (C) 2016 Intel Corp. Isaku Yamahata <isaku.yamahata@gmail com>
+# All Rights Reserved.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License. You may obtain
+#  a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#  License for the specific language governing permissions and limitations
+#  under the License.
+#
+
+import mock
+
+from neutron.tests import base
+
+from networking_odl.journal import dependency_validations
+
+
+class DependencyValidationsTestCase(base.DietTestCase):
+    _RESOURCE_DUMMY = 'test_type'
+
+    def setUp(self):
+        super(DependencyValidationsTestCase, self).setUp()
+        mock_validation_map = mock.patch.dict(
+            dependency_validations._VALIDATION_MAP)
+        mock_validation_map.start()
+        self.addCleanup(mock_validation_map.stop)
+
+    def test_register_validator(self):
+        mock_session = mock.Mock()
+        mock_validator = mock.Mock(return_value=False)
+        mock_row = mock.Mock()
+        mock_row.object_type = self._RESOURCE_DUMMY
+        dependency_validations.register_validator(self._RESOURCE_DUMMY,
+                                                  mock_validator)
+        valid = dependency_validations.validate(mock_session, mock_row)
+        mock_validator.assert_called_once_with(mock_session, mock_row)
+        self.assertFalse(valid)
diff --git a/networking-odl/networking_odl/tests/unit/journal/test_full_sync.py b/networking-odl/networking_odl/tests/unit/journal/test_full_sync.py
new file mode 100644 (file)
index 0000000..cedccbd
--- /dev/null
@@ -0,0 +1,152 @@
+#
+# Copyright (C) 2016 Red Hat, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License. You may obtain
+#  a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#  License for the specific language governing permissions and limitations
+#  under the License.
+#
+
+import mock
+import requests
+
+from neutron.db import api as neutron_db_api
+from neutron import manager
+from neutron.tests.unit.testlib_api import SqlTestCaseLight
+
+from networking_odl.common import constants as odl_const
+from networking_odl.db import db
+from networking_odl.db import models
+from networking_odl.journal import full_sync
+
+
+class FullSyncTestCase(SqlTestCaseLight):
+    def setUp(self):
+        super(FullSyncTestCase, self).setUp()
+        self.db_session = neutron_db_api.get_session()
+
+        full_sync._CLIENT = mock.MagicMock()
+        self.plugin_mock = mock.patch.object(manager.NeutronManager,
+                                             'get_plugin').start()
+        self.l3_plugin_mock = mock.patch.object(manager.NeutronManager,
+                                                'get_service_plugins').start()
+
+        self.addCleanup(self._db_cleanup)
+
+    def _db_cleanup(self):
+        self.db_session.query(models.OpendaylightJournal).delete()
+
+    def test_no_full_sync_when_canary_exists(self):
+        full_sync.full_sync(self.db_session)
+        self.assertEqual([], db.get_all_db_rows(self.db_session))
+
+    def _mock_l2_resources(self):
+        expected_journal = {odl_const.ODL_NETWORK: '1',
+                            odl_const.ODL_SUBNET: '2',
+                            odl_const.ODL_PORT: '3'}
+        plugin_instance = self.plugin_mock.return_value
+        plugin_instance.get_networks.return_value = [
+            {'id': expected_journal[odl_const.ODL_NETWORK]}]
+        plugin_instance.get_subnets.return_value = [
+            {'id': expected_journal[odl_const.ODL_SUBNET]}]
+        plugin_instance.get_ports.side_effect = ([
+            {'id': expected_journal[odl_const.ODL_PORT]}], [])
+        return expected_journal
+
+    def _filter_out_canary(self, rows):
+        return [row for row in rows if row['object_uuid'] !=
+                full_sync._CANARY_NETWORK_ID]
+
+    def _test_no_full_sync_when_canary_in_journal(self, state):
+        self._mock_canary_missing()
+        self._mock_l2_resources()
+        db.create_pending_row(self.db_session, odl_const.ODL_NETWORK,
+                              full_sync._CANARY_NETWORK_ID,
+                              odl_const.ODL_CREATE, {})
+        row = db.get_all_db_rows(self.db_session)[0]
+        db.update_db_row_state(self.db_session, row, state)
+
+        full_sync.full_sync(self.db_session)
+
+        rows = db.get_all_db_rows(self.db_session)
+        self.assertEqual([], self._filter_out_canary(rows))
+
+    def test_no_full_sync_when_canary_pending_creation(self):
+        self._test_no_full_sync_when_canary_in_journal(odl_const.PENDING)
+
+    def test_no_full_sync_when_canary_is_processing(self):
+        self._test_no_full_sync_when_canary_in_journal(odl_const.PROCESSING)
+
+    def test_client_error_propagates(self):
+        class TestException(Exception):
+            def __init__(self):
+                pass
+
+        full_sync._CLIENT.get.side_effect = TestException()
+        self.assertRaises(TestException, full_sync.full_sync, self.db_session)
+
+    def _mock_canary_missing(self):
+        get_return = mock.MagicMock()
+        get_return.status_code = requests.codes.not_found
+        full_sync._CLIENT.get.return_value = get_return
+
+    def _assert_canary_created(self):
+        rows = db.get_all_db_rows(self.db_session)
+        self.assertTrue(any(r['object_uuid'] == full_sync._CANARY_NETWORK_ID
+                            for r in rows))
+        return rows
+
+    def _test_full_sync_resources(self, expected_journal):
+        self._mock_canary_missing()
+
+        full_sync.full_sync(self.db_session)
+
+        rows = self._assert_canary_created()
+        rows = self._filter_out_canary(rows)
+        self.assertItemsEqual(expected_journal.keys(),
+                              [row['object_type'] for row in rows])
+        for row in rows:
+            self.assertEqual(expected_journal[row['object_type']],
+                             row['object_uuid'])
+
+    def test_full_sync_removes_pending_rows(self):
+        db.create_pending_row(self.db_session, odl_const.ODL_NETWORK, "uuid",
+                              odl_const.ODL_CREATE, {'foo': 'bar'})
+        self._test_full_sync_resources({})
+
+    def test_full_sync_no_resources(self):
+        self._test_full_sync_resources({})
+
+    def test_full_sync_l2_resources(self):
+        self._test_full_sync_resources(self._mock_l2_resources())
+
+    def _mock_router_port(self, port_id):
+        router_port = {'id': port_id,
+                       'device_id': '1',
+                       'tenant_id': '1',
+                       'fixed_ips': [{'subnet_id': '1'}]}
+        plugin_instance = self.plugin_mock.return_value
+        plugin_instance.get_ports.side_effect = ([], [router_port])
+
+    def _mock_l3_resources(self):
+        expected_journal = {odl_const.ODL_ROUTER: '1',
+                            odl_const.ODL_FLOATINGIP: '2',
+                            odl_const.ODL_ROUTER_INTF: '3'}
+        plugin_instance = self.l3_plugin_mock.return_value.get.return_value
+        plugin_instance.get_routers.return_value = [
+            {'id': expected_journal[odl_const.ODL_ROUTER]}]
+        plugin_instance.get_floatingips.return_value = [
+            {'id': expected_journal[odl_const.ODL_FLOATINGIP]}]
+        self._mock_router_port(expected_journal[odl_const.ODL_ROUTER_INTF])
+
+        return expected_journal
+
+    def test_full_sync_l3_resources(self):
+        self._test_full_sync_resources(self._mock_l3_resources())
diff --git a/networking-odl/networking_odl/tests/unit/journal/test_maintenance.py b/networking-odl/networking_odl/tests/unit/journal/test_maintenance.py
new file mode 100644 (file)
index 0000000..eb823cd
--- /dev/null
@@ -0,0 +1,93 @@
+#
+# Copyright (C) 2016 Red Hat, Inc.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License. You may obtain
+#  a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#  License for the specific language governing permissions and limitations
+#  under the License.
+#
+
+import mock
+import threading
+from unittest2.case import TestCase
+
+from neutron.db import api as neutron_db_api
+from neutron.tests.unit.testlib_api import SqlTestCaseLight
+
+from networking_odl.common import constants as odl_const
+from networking_odl.db import models
+from networking_odl.journal import maintenance
+
+
+class MaintenanceThreadTestCase(SqlTestCaseLight, TestCase):
+    def setUp(self):
+        super(MaintenanceThreadTestCase, self).setUp()
+        self.db_session = neutron_db_api.get_session()
+
+        row = models.OpendaylightMaintenance(state=odl_const.PENDING)
+        self.db_session.add(row)
+        self.db_session.flush()
+
+        self.thread = maintenance.MaintenanceThread()
+        self.thread.maintenance_interval = 0.01
+
+    def test__execute_op_no_exception(self):
+        with mock.patch.object(maintenance, 'LOG') as mock_log:
+            operation = mock.MagicMock()
+            operation.__name__ = "test"
+            self.thread._execute_op(operation, self.db_session)
+            self.assertTrue(operation.called)
+            self.assertTrue(mock_log.info.called)
+            self.assertFalse(mock_log.exception.called)
+
+    def test__execute_op_with_exception(self):
+        with mock.patch.object(maintenance, 'LOG') as mock_log:
+            operation = mock.MagicMock(side_effect=Exception())
+            operation.__name__ = "test"
+            self.thread._execute_op(operation, self.db_session)
+            self.assertTrue(mock_log.exception.called)
+
+    def test_thread_works(self):
+        callback_event = threading.Event()
+        count = [0]
+
+        def callback_op(**kwargs):
+            count[0] += 1
+
+            # The following should be true on the second call, so we're making
+            # sure that the thread runs more than once.
+            if count[0] > 1:
+                callback_event.set()
+
+        self.thread.register_operation(callback_op)
+        self.thread.start()
+
+        # Make sure the callback event was called and not timed out
+        self.assertTrue(callback_event.wait(timeout=5))
+
+    def test_thread_continues_after_exception(self):
+        exception_event = threading.Event()
+        callback_event = threading.Event()
+
+        def exception_op(**kwargs):
+            if not exception_event.is_set():
+                exception_event.set()
+                raise Exception()
+
+        def callback_op(**kwargs):
+            callback_event.set()
+
+        for op in [exception_op, callback_op]:
+            self.thread.register_operation(op)
+
+        self.thread.start()
+
+        # Make sure the callback event was called and not timed out
+        self.assertTrue(callback_event.wait(timeout=5))
diff --git a/networking-odl/networking_odl/tests/unit/l2gateway/__init__.py b/networking-odl/networking_odl/tests/unit/l2gateway/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/networking_odl/tests/unit/l2gateway/test_driver.py b/networking-odl/networking_odl/tests/unit/l2gateway/test_driver.py
new file mode 100644 (file)
index 0000000..2506332
--- /dev/null
@@ -0,0 +1,127 @@
+#
+# Copyright (C) 2016 Ericsson India Global Services Pvt Ltd.
+#
+#  Licensed under the Apache License, Version 2.0 (the "License"); you may
+#  not use this file except in compliance with the License. You may obtain
+#  a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#  License for the specific language governing permissions and limitations
+#  under the License.
+#
+
+import copy
+import mock
+
+from networking_odl.l2gateway import driver
+from neutron.tests import base
+
+
+class TestOpenDaylightL2gwDriver(base.DietTestCase):
+
+    def setUp(self):
+        self.mocked_odlclient = mock.patch(
+            'networking_odl.common.client'
+            '.OpenDaylightRestClient.create_client').start().return_value
+        self.driver = driver.OpenDaylightL2gwDriver(service_plugin=None,
+                                                    validator=None)
+        super(TestOpenDaylightL2gwDriver, self).setUp()
+
+    def _get_fake_l2_gateway(self):
+        fake_l2_gateway_id = "5227c228-6bba-4bbe-bdb8-6942768ff0f1"
+        fake_l2_gateway = {
+            "tenant_id": "de0a7495-05c4-4be0-b796-1412835c6820",
+            "id": "5227c228-6bba-4bbe-bdb8-6942768ff0f1",
+            "name": "test-gateway",
+            "devices": [
+                {
+                    "device_name": "switch1",
+                    "interfaces": [
+                        {
+                            "name": "port1",
+                            "segmentation_id": [100]
+                        },
+                        {
+                            "name": "port2",
+                            "segmentation_id": [151, 152]
+                        }
+                    ]
+                },
+                {
+                    "device_name": "switch2",
+                    "interfaces": [
+                        {
+                            "name": "port5",
+                            "segmentation_id": [200]
+                        },
+                        {
+                            "name": "port6",
+                            "segmentation_id": [251, 252]
+                        }
+                    ]
+                }
+            ]
+        }
+        return fake_l2_gateway_id, fake_l2_gateway
+
+    def _get_fake_l2_gateway_connection(self):
+        fake_l2_gateway_connection_id = "5227c228-6bba-4bbe-bdb8-6942768ff02f"
+        fake_l2_gateway_connection = {
+            "tenant_id": "de0a7495-05c4-4be0-b796-1412835c6820",
+            "id": "5227c228-6bba-4bbe-bdb8-6942768ff02f",
+            "network_id": "be0a7495-05c4-4be0-b796-1412835c6830",
+            "default_segmentation_id": 77,
+            "l2_gateway_id": "5227c228-6bba-4bbe-bdb8-6942768ff0f1"
+        }
+        return fake_l2_gateway_connection_id, fake_l2_gateway_connection
+
+    def test_create_l2_gateway_postcommit(self):
+        mocked_sendjson = self.mocked_odlclient.sendjson
+        fake_l2gateway_id, fake_l2gateway = self._get_fake_l2_gateway()
+        expected = {"l2_gateway": fake_l2gateway}
+        self.driver.create_l2_gateway_postcommit(mock.ANY, fake_l2gateway)
+        mocked_sendjson.assert_called_once_with('post', driver.L2GATEWAYS,
+                                                expected)
+
+    def test_delete_l2_gateway_postcommit(self):
+        mocked_trydelete = self.mocked_odlclient.try_delete
+        fake_l2gateway_id, fake_l2gateway = self._get_fake_l2_gateway()
+        self.driver.delete_l2_gateway_postcommit(mock.ANY, fake_l2gateway_id)
+        url = driver.L2GATEWAYS + '/' + fake_l2gateway_id
+        mocked_trydelete.assert_called_once_with(url)
+
+    def test_update_l2_gateway_postcommit(self):
+        mocked_sendjson = self.mocked_odlclient.sendjson
+        fake_l2gateway_id, fake_l2gateway = self._get_fake_l2_gateway()
+        expected = {"l2_gateway": fake_l2gateway}
+        self.driver.update_l2_gateway_postcommit(mock.ANY, fake_l2gateway)
+        url = driver.L2GATEWAYS + '/' + fake_l2gateway_id
+        mocked_sendjson.assert_called_once_with('put', url, expected)
+
+    def test_create_l2_gateway_connection_postcommit(self):
+        mocked_sendjson = self.mocked_odlclient.sendjson
+        (fake_l2gateway_conn_id,
+         fake_l2gateway_conn) = self._get_fake_l2_gateway_connection()
+        expected_l2gateway_conn = copy.deepcopy(fake_l2gateway_conn)
+        expected_l2gateway_conn['gateway_id'] = (
+            fake_l2gateway_conn['l2_gateway_id'])
+        expected_l2gateway_conn.pop('l2_gateway_id')
+        expected = {"l2gateway_connection": expected_l2gateway_conn}
+        self.driver.create_l2_gateway_connection_postcommit(
+            mock.ANY, fake_l2gateway_conn)
+        mocked_sendjson.assert_called_once_with('post',
+                                                driver.L2GATEWAY_CONNECTIONS,
+                                                expected)
+
+    def test_delete_l2_gateway_connection_postcommit(self):
+        mocked_trydelete = self.mocked_odlclient.try_delete
+        (fake_l2gateway_conn_id,
+         fake_l2gateway_conn) = self._get_fake_l2_gateway_connection()
+        url = driver.L2GATEWAY_CONNECTIONS + '/' + fake_l2gateway_conn_id
+        self.driver.delete_l2_gateway_connection_postcommit(
+            mock.ANY, fake_l2gateway_conn_id)
+        mocked_trydelete.assert_called_once_with(url)
diff --git a/networking-odl/networking_odl/tests/unit/l3/__init__.py b/networking-odl/networking_odl/tests/unit/l3/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/networking_odl/tests/unit/l3/test_l3_odl.py b/networking-odl/networking_odl/tests/unit/l3/test_l3_odl.py
new file mode 100644 (file)
index 0000000..232864d
--- /dev/null
@@ -0,0 +1,310 @@
+# -*- coding: utf-8 -*-
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+test_l3_odl
+----------------------------------
+
+Tests for the L3 service plugin for networking-odl.
+"""
+import copy
+import mock
+
+from neutron.extensions import l3
+from neutron.extensions import l3_ext_gw_mode
+from neutron.tests.unit.api.v2 import test_base
+from neutron.tests.unit.extensions import base as test_extensions_base
+from webob import exc
+
+_get_path = test_base._get_path
+
+
+class Testodll3(test_extensions_base.ExtensionTestCase):
+
+    fmt = 'json'
+
+    def setUp(self):
+        super(Testodll3, self).setUp()
+        # support ext-gw-mode
+        for key in l3.RESOURCE_ATTRIBUTE_MAP.keys():
+            l3.RESOURCE_ATTRIBUTE_MAP[key].update(
+                l3_ext_gw_mode.EXTENDED_ATTRIBUTES_2_0.get(key, {}))
+        self._setUpExtension(
+            'neutron.extensions.l3.RouterPluginBase', None,
+            l3.RESOURCE_ATTRIBUTE_MAP, l3.L3, '',
+            allow_pagination=True, allow_sorting=True,
+            supported_extension_aliases=['router', 'ext-gw-mode'],
+            use_quota=True)
+
+    @staticmethod
+    def _get_mock_network_operation_context():
+        current = {'status': 'ACTIVE',
+                   'subnets': [],
+                   'name': 'net1',
+                   'provider:physical_network': None,
+                   'admin_state_up': True,
+                   'tenant_id': 'test-tenant',
+                   'provider:network_type': 'local',
+                   'router:external': False,
+                   'shared': False,
+                   'id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e',
+                   'provider:segmentation_id': None}
+        context = mock.Mock(current=current)
+        return context
+
+    @staticmethod
+    def _get_router_test():
+        router_id = "234237d4-1e7f-11e5-9bd7-080027328c3a"
+        router = {'router': {'name': 'router1', 'admin_state_up': True,
+                             'tenant_id': router_id,
+                             'external_gateway_info': None}}
+        return router_id, router
+
+    @staticmethod
+    def _get_floating_ip_test():
+        floating_ip_id = "e4997650-6a83-4230-950a-8adab8e524b2"
+        floating_ip = {
+            "floatingip": {"fixed_ip_address": None,
+                           "floating_ip_address": None,
+                           "floating_network_id": None,
+                           "id": floating_ip_id,
+                           "router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72",
+                           "port_id": None,
+                           "status": None,
+                           "tenant_id": "test-tenant"
+                           }
+            }
+        return floating_ip_id, floating_ip
+
+    @staticmethod
+    def _get_port_test():
+        port_id = "3a44f4e5-1694-493a-a1fb-393881c673a4"
+        subnet_id = "a2f1f29d-571b-4533-907f-5803ab96ead1"
+        port = {'id': port_id,
+                'network_id': "84b126bb-f45e-4b2e-8202-7e5ce9e21fe7",
+                'fixed_ips': [{'ip_address': '19.4.4.4',
+                               'prefixlen': 24,
+                               'subnet_id': subnet_id}],
+                'subnets': [{'id': subnet_id,
+                             'cidr': '19.4.4.0/24',
+                             'gateway_ip': '19.4.4.1'}]}
+        return port_id, port
+
+    def test_create_router(self):
+        router_id, router = self._get_router_test()
+
+        return_value = copy.deepcopy(router['router'])
+        return_value.update({'status': "ACTIVE", 'id': router_id})
+
+        instance = self.plugin.return_value
+        instance.create_router.return_value = return_value
+        instance.get_routers_count.return_value = 0
+
+        res = self.api.post(_get_path('routers', fmt=self.fmt),
+                            self.serialize(router),
+                            content_type='application/%s' % self.fmt)
+
+        instance.create_router.assert_called_once_with(mock.ANY, router=router)
+        self.assertEqual(exc.HTTPCreated.code, res.status_int)
+        res = self.deserialize(res)
+        self.assertIn('router', res)
+        router = res['router']
+        self.assertEqual(router_id, router['id'])
+        self.assertEqual("ACTIVE", router['status'])
+        self.assertEqual(True, router['admin_state_up'])
+
+    def test_update_router(self):
+        router_id, router = self._get_router_test()
+
+        router_request_info = {'external_gateway_info': {
+            "network_id": "3c5bcddd-6af9-4e6b-9c3e-c153e521cab8",
+            "enable_snat": True}
+            }
+        return_value = copy.deepcopy(router['router'])
+        return_value.update(router_request_info)
+        return_value.update({'status': "ACTIVE", 'id': router_id})
+
+        instance = self.plugin.return_value
+        instance.update_router.return_value = return_value
+
+        router_request = {'router': router_request_info}
+        res = self.api.put(_get_path('routers', id=router_id, fmt=self.fmt),
+                           self.serialize(router_request))
+        instance.update_router.assert_called_once_with(mock.ANY, router_id,
+                                                       router=router_request)
+
+        self.assertEqual(exc.HTTPOk.code, res.status_int)
+        res = self.deserialize(res)
+        self.assertIn('router', res)
+        router = res['router']
+        self.assertEqual(router_id, router['id'])
+        self.assertEqual("3c5bcddd-6af9-4e6b-9c3e-c153e521cab8",
+                         router["external_gateway_info"]['network_id'])
+        self.assertEqual(True, router["external_gateway_info"]['enable_snat'])
+
+    def test_delete_router(self):
+        router_id, router = self._get_router_test()
+
+        instance = self.plugin.return_value
+
+        res = self.api.delete(_get_path('routers', id=router_id, fmt=self.fmt))
+        instance.delete_router.assert_called_once_with(mock.ANY, router_id)
+
+        self.assertEqual(exc.HTTPNoContent.code, res.status_int)
+
+    def test_create_floating_ip(self):
+        floating_ip_id, floating_ip = self._get_floating_ip_test()
+        port_id, port = self._get_port_test()
+
+        floating_ip_request_info = {"floating_network_id":
+                                    "376da547-b977-4cfe-9cba-275c80debf57",
+                                    "tenant_id": "test-tenant",
+                                    "fixed_ip_address": "10.0.0.3",
+                                    "subnet_id": port['subnets'][0]['id'],
+                                    "port_id": port_id,
+                                    "floating_ip_address": "172.24.4.228"
+                                    }
+
+        return_value = copy.deepcopy(floating_ip['floatingip'])
+        return_value.update(floating_ip_request_info)
+        return_value.update({'status': "ACTIVE"})
+
+        instance = self.plugin.return_value
+        instance.create_floatingip.return_value = return_value
+        instance.get_floatingips_count.return_value = 0
+        instance.get_port = mock.Mock(return_value=port)
+
+        floating_ip_request = {'floatingip': floating_ip_request_info}
+
+        res = self.api.post(_get_path('floatingips', fmt=self.fmt),
+                            self.serialize(floating_ip_request))
+
+        instance.create_floatingip.\
+            assert_called_once_with(mock.ANY,
+                                    floatingip=floating_ip_request)
+
+        self.assertEqual(exc.HTTPCreated.code, res.status_int)
+        res = self.deserialize(res)
+        self.assertIn('floatingip', res)
+        floatingip = res['floatingip']
+        self.assertEqual(floating_ip_id, floatingip['id'])
+        self.assertEqual("ACTIVE", floatingip['status'])
+
+    def test_update_floating_ip(self):
+        floating_ip_id, floating_ip = self._get_floating_ip_test()
+
+        floating_ip_request_info = {"port_id": None}
+
+        return_value = copy.deepcopy(floating_ip['floatingip'])
+        return_value.update(floating_ip_request_info)
+        return_value.update({"status": "ACTIVE",
+                             "tenant_id": "test-tenant",
+                             "floating_network_id":
+                                 "376da547-b977-4cfe-9cba-275c80debf57",
+                             "fixed_ip_address": None,
+                             "floating_ip_address": "172.24.4.228"
+                             })
+
+        instance = self.plugin.return_value
+        instance.update_floatingip.return_value = return_value
+        port_id, port = self._get_port_test()
+        instance.get_port = mock.Mock(return_value=port)
+
+        floating_ip_request = {'floatingip': floating_ip_request_info}
+
+        res = self.api.put(_get_path('floatingips', id=floating_ip_id,
+                                     fmt=self.fmt),
+                           self.serialize(floating_ip_request))
+
+        instance.update_floatingip.\
+            assert_called_once_with(mock.ANY,
+                                    floating_ip_id,
+                                    floatingip=floating_ip_request)
+
+        self.assertEqual(exc.HTTPOk.code, res.status_int)
+        res = self.deserialize(res)
+        self.assertIn('floatingip', res)
+        floatingip = res['floatingip']
+        self.assertEqual(floating_ip_id, floatingip['id'])
+        self.assertIsNone(floatingip['port_id'])
+        self.assertIsNone(floatingip['fixed_ip_address'])
+
+    def test_delete_floating_ip(self):
+        floating_ip_id, floating_ip = self._get_floating_ip_test()
+
+        instance = self.plugin.return_value
+        port_id, port = self._get_port_test()
+        instance.get_port = mock.Mock(return_value=port)
+        res = self.api.delete(_get_path('floatingips', id=floating_ip_id))
+        instance.delete_floatingip.assert_called_once_with(mock.ANY,
+                                                           floating_ip_id)
+
+        self.assertEqual(exc.HTTPNoContent.code, res.status_int)
+
+    def test_add_router_interface(self):
+        router_id, router = self._get_router_test()
+        interface_info = {"subnet_id": "a2f1f29d-571b-4533-907f-5803ab96ead1"}
+        return_value = {"tenant_id": "6ba032e4730d42e2ad928f430f5da33e",
+                        "port_id": "3a44f4e5-1694-493a-a1fb-393881c673a4",
+                        "id": router_id
+                        }
+        return_value.update(interface_info)
+
+        instance = self.plugin.return_value
+        instance.add_router_interface.return_value = return_value
+
+        res = self.api.put(_get_path('routers', id=router_id,
+                                     action="add_router_interface",
+                                     fmt=self.fmt),
+                           self.serialize(interface_info)
+                           )
+
+        instance.add_router_interface.assert_called_once_with(mock.ANY,
+                                                              router_id,
+                                                              interface_info)
+
+        self.assertEqual(exc.HTTPOk.code, res.status_int)
+        res = self.deserialize(res)
+        self.assertEqual(router_id, res['id'])
+        self.assertEqual("a2f1f29d-571b-4533-907f-5803ab96ead1",
+                         res['subnet_id'])
+
+    def test_remove_router_interface(self):
+        router_id, router = self._get_router_test()
+        interface_info = {"subnet_id": "a2f1f29d-571b-4533-907f-5803ab96ead1",
+                          "port_id": "3a44f4e5-1694-493a-a1fb-393881c673a4"
+                          }
+        return_value = {"tenant_id": "6ba032e4730d42e2ad928f430f5da33e",
+                        "id": router_id
+                        }
+        return_value.update(interface_info)
+
+        instance = self.plugin.return_value
+        instance.remove_router_interface.return_value = return_value
+        res = self.api.put(_get_path('routers', id=router_id,
+                                     action="remove_router_interface",
+                                     fmt=self.fmt),
+                           self.serialize(interface_info)
+                           )
+
+        instance.remove_router_interface.\
+            assert_called_once_with(mock.ANY,
+                                    router_id,
+                                    interface_info)
+
+        self.assertEqual(exc.HTTPOk.code, res.status_int)
+        res = self.deserialize(res)
+        self.assertEqual(router_id, res['id'])
+        self.assertEqual("a2f1f29d-571b-4533-907f-5803ab96ead1",
+                         res['subnet_id'])
diff --git a/networking-odl/networking_odl/tests/unit/l3/test_l3_odl_v2.py b/networking-odl/networking_odl/tests/unit/l3/test_l3_odl_v2.py
new file mode 100644 (file)
index 0000000..da3f644
--- /dev/null
@@ -0,0 +1,526 @@
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from networking_odl.common import client
+from networking_odl.common import constants as odl_const
+from networking_odl.common import filters
+from networking_odl.db import db
+from networking_odl.journal import journal
+from networking_odl.l3 import l3_odl_v2
+from networking_odl.ml2 import mech_driver_v2
+
+import mock
+from oslo_serialization import jsonutils
+import requests
+
+from neutron import context
+from neutron.db import api as neutron_db_api
+from neutron.extensions import external_net as external_net
+from neutron import manager
+from neutron.plugins.ml2 import config as config
+from neutron.plugins.ml2 import plugin
+from neutron.tests import base
+from neutron.tests.unit.db import test_db_base_plugin_v2
+from neutron.tests.unit import testlib_api
+
+EMPTY_DEP = []
+FLOATINGIP_ID = 'floatingip_uuid'
+NETWORK_ID = 'network_uuid'
+ROUTER_ID = 'router_uuid'
+SUBNET_ID = 'subnet_uuid'
+PORT_ID = 'port_uuid'
+
+
+class OpenDayLightMechanismConfigTests(testlib_api.SqlTestCase):
+
+    def _set_config(self, url='http://127.0.0.1:9999', username='someuser',
+                    password='somepass'):
+        config.cfg.CONF.set_override('mechanism_drivers',
+                                     ['logger', 'opendaylight'],
+                                     'ml2')
+        config.cfg.CONF.set_override('url', url, 'ml2_odl')
+        config.cfg.CONF.set_override('username', username, 'ml2_odl')
+        config.cfg.CONF.set_override('password', password, 'ml2_odl')
+
+    def _test_missing_config(self, **kwargs):
+        self._set_config(**kwargs)
+        self.assertRaises(config.cfg.RequiredOptError,
+                          plugin.Ml2Plugin)
+
+    def test_valid_config(self):
+        self._set_config()
+        plugin.Ml2Plugin()
+
+    def test_missing_url_raises_exception(self):
+        self._test_missing_config(url=None)
+
+    def test_missing_username_raises_exception(self):
+        self._test_missing_config(username=None)
+
+    def test_missing_password_raises_exception(self):
+        self._test_missing_config(password=None)
+
+
+class DataMatcher(object):
+
+    def __init__(self, operation, object_type, object_dict):
+        self._data = object_dict.copy()
+        self._object_type = object_type
+        filters.filter_for_odl(object_type, operation, self._data)
+
+    def __eq__(self, s):
+        data = jsonutils.loads(s)
+        if self._object_type == odl_const.ODL_ROUTER_INTF:
+            return self._data == data
+        else:
+            return self._data == data[self._object_type]
+
+    def __ne__(self, s):
+        return not self.__eq__(s)
+
+
+class OpenDaylightL3TestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
+                             base.BaseTestCase):
+    def setUp(self):
+        config.cfg.CONF.set_override("core_plugin",
+                                     'neutron.plugins.ml2.plugin.Ml2Plugin')
+        core_plugin = config.cfg.CONF.core_plugin
+        super(OpenDaylightL3TestCase, self).setUp(plugin=core_plugin)
+        config.cfg.CONF.set_override('mechanism_drivers',
+                                     ['logger', 'opendaylight'], 'ml2')
+        config.cfg.CONF.set_override('url', 'http://127.0.0.1:9999', 'ml2_odl')
+        config.cfg.CONF.set_override('username', 'someuser', 'ml2_odl')
+        config.cfg.CONF.set_override('password', 'somepass', 'ml2_odl')
+        mock.patch.object(journal.OpendaylightJournalThread,
+                          'start_odl_sync_thread').start()
+        self.db_session = neutron_db_api.get_session()
+        self.mech = mech_driver_v2.OpenDaylightMechanismDriver()
+        self.plugin = manager.NeutronManager.get_plugin()
+        self.plugin._network_is_external = mock.Mock(return_value=True)
+        self.driver = l3_odl_v2.OpenDaylightL3RouterPlugin()
+        self.thread = journal.OpendaylightJournalThread()
+        self.driver.get_floatingip = mock.Mock(
+            return_value={'router_id': ROUTER_ID,
+                          'floating_network_id': NETWORK_ID})
+        self.addCleanup(self._db_cleanup)
+
+    @staticmethod
+    def _get_mock_router_operation_info(network, subnet):
+        router_context = context.get_admin_context()
+        router = {odl_const.ODL_ROUTER:
+                  {'name': 'router1',
+                   'admin_state_up': True,
+                   'tenant_id': network['network']['tenant_id'],
+                   'external_gateway_info': {'network_id':
+                                             network['network']['id']}}}
+        return router_context, router
+
+    @staticmethod
+    def _get_mock_floatingip_operation_info(network, subnet):
+        floatingip_context = context.get_admin_context()
+        floatingip = {odl_const.ODL_FLOATINGIP:
+                      {'floating_network_id': network['network']['id'],
+                       'tenant_id': network['network']['tenant_id']}}
+        return floatingip_context, floatingip
+
+    @staticmethod
+    def _get_mock_router_interface_operation_info(network, subnet):
+        router_intf_context = context.get_admin_context()
+        router_intf_dict = {'subnet_id': subnet['subnet']['id'],
+                            'id': network['network']['id']}
+        return router_intf_context, router_intf_dict
+
+    @classmethod
+    def _get_mock_operation_info(cls, object_type, *args):
+        getter = getattr(cls, '_get_mock_' + object_type + '_operation_info')
+        return getter(*args)
+
+    def _db_cleanup(self):
+        rows = db.get_all_db_rows(self.db_session)
+        for row in rows:
+            db.delete_row(self.db_session, row=row)
+
+    @classmethod
+    def _get_mock_request_response(cls, status_code):
+        response = mock.Mock(status_code=status_code)
+        response.raise_for_status = mock.Mock() if status_code < 400 else (
+            mock.Mock(side_effect=requests.exceptions.HTTPError(
+                cls._status_code_msgs[status_code])))
+        return response
+
+    def _test_operation(self, status_code, expected_calls, *args, **kwargs):
+        request_response = self._get_mock_request_response(status_code)
+        with mock.patch('requests.request',
+                        return_value=request_response) as mock_method:
+            with mock.patch.object(self.thread.event, 'wait',
+                                   return_value=False):
+                self.thread.run_sync_thread(exit_after_run=True)
+
+        if expected_calls:
+            mock_method.assert_called_with(
+                headers={'Content-Type': 'application/json'},
+                auth=(config.cfg.CONF.ml2_odl.username,
+                      config.cfg.CONF.ml2_odl.password),
+                timeout=config.cfg.CONF.ml2_odl.timeout, *args, **kwargs)
+        self.assertEqual(expected_calls, mock_method.call_count)
+
+    def _call_operation_object(self, operation, object_type, object_id,
+                               network, subnet):
+        object_context, object_dict = self._get_mock_operation_info(
+            object_type, network, subnet)
+        method = getattr(self.driver, operation + '_' + object_type)
+
+        if operation == odl_const.ODL_CREATE:
+            new_object_dict = method(object_context, object_dict)
+        elif operation == odl_const.ODL_UPDATE:
+            new_object_dict = method(object_context, object_id, object_dict)
+        elif operation in [odl_const.ODL_ADD, odl_const.ODL_REMOVE]:
+            router_dict = method(object_context, object_id, object_dict)
+            new_object_dict = self.driver._generate_router_dict(
+                object_id, object_dict, router_dict)
+        else:
+            new_object_dict = method(object_context, object_id)
+
+        return object_context, new_object_dict
+
+    def _test_operation_thread_processing(self, object_type, operation,
+                                          network, subnet, object_id,
+                                          expected_calls=1):
+        http_requests = {odl_const.ODL_CREATE: 'post',
+                         odl_const.ODL_UPDATE: 'put',
+                         odl_const.ODL_DELETE: 'delete',
+                         odl_const.ODL_ADD: 'put',
+                         odl_const.ODL_REMOVE: 'put'}
+        status_codes = {odl_const.ODL_CREATE: requests.codes.created,
+                        odl_const.ODL_UPDATE: requests.codes.ok,
+                        odl_const.ODL_DELETE: requests.codes.no_content,
+                        odl_const.ODL_ADD: requests.codes.created,
+                        odl_const.ODL_REMOVE: requests.codes.created}
+
+        http_request = http_requests[operation]
+        status_code = status_codes[operation]
+
+        # Create database entry.
+        object_context, new_object_dict = self._call_operation_object(
+            operation, object_type, object_id, network, subnet)
+
+        # Setup expected results.
+        if operation in [odl_const.ODL_UPDATE, odl_const.ODL_DELETE]:
+            url = (config.cfg.CONF.ml2_odl.url + '/' + object_type + 's/' +
+                   object_id)
+        elif operation in [odl_const.ODL_ADD, odl_const.ODL_REMOVE]:
+            url = (config.cfg.CONF.ml2_odl.url + '/' + odl_const.ODL_ROUTER +
+                   's/' + object_id + '/' + operation + '_router_interface')
+        else:
+            url = config.cfg.CONF.ml2_odl.url + '/' + object_type + 's'
+
+        if operation in [odl_const.ODL_CREATE, odl_const.ODL_UPDATE,
+                         odl_const.ODL_ADD, odl_const.ODL_REMOVE]:
+            kwargs = {
+                'url': url,
+                'data': DataMatcher(operation, object_type, new_object_dict)}
+        else:
+            kwargs = {'url': url, 'data': None}
+
+        # Call threading routine to process database entry. Test results.
+        self._test_operation(status_code, expected_calls, http_request,
+                             **kwargs)
+
+        return new_object_dict
+
+    def _test_thread_processing(self, object_type):
+        # Create network and subnet.
+        kwargs = {'arg_list': (external_net.EXTERNAL,),
+                  external_net.EXTERNAL: True}
+        with self.network(**kwargs) as network:
+            with self.subnet(network=network, cidr='10.0.0.0/24'):
+                # Add and process create request.
+                new_object_dict = self._test_operation_thread_processing(
+                    object_type, odl_const.ODL_CREATE, network, None, None)
+                object_id = new_object_dict['id']
+                rows = db.get_all_db_rows_by_state(self.db_session,
+                                                   odl_const.COMPLETED)
+                self.assertEqual(1, len(rows))
+
+                # Add and process 'update' request. Adds to database.
+                self._test_operation_thread_processing(
+                    object_type, odl_const.ODL_UPDATE, network, None,
+                    object_id)
+                rows = db.get_all_db_rows_by_state(self.db_session,
+                                                   odl_const.COMPLETED)
+                self.assertEqual(2, len(rows))
+
+                # Add and process 'delete' request. Adds to database.
+                self._test_operation_thread_processing(
+                    object_type, odl_const.ODL_DELETE, network, None,
+                    object_id)
+                rows = db.get_all_db_rows_by_state(self.db_session,
+                                                   odl_const.COMPLETED)
+                self.assertEqual(3, len(rows))
+
+    def _test_db_results(self, object_id, operation, object_type):
+        rows = db.get_all_db_rows(self.db_session)
+
+        self.assertEqual(1, len(rows))
+        self.assertEqual(operation, rows[0]['operation'])
+        self.assertEqual(object_type, rows[0]['object_type'])
+        self.assertEqual(object_id, rows[0]['object_uuid'])
+
+        self._db_cleanup()
+
+    def _test_object_db(self, object_type):
+        # Create network and subnet for testing.
+        kwargs = {'arg_list': (external_net.EXTERNAL,),
+                  external_net.EXTERNAL: True}
+        with self.network(**kwargs) as network:
+            with self.subnet(network=network):
+                object_context, object_dict = self._get_mock_operation_info(
+                    object_type, network, None)
+
+                # Add and test 'create' database entry.
+                method = getattr(self.driver,
+                                 odl_const.ODL_CREATE + '_' + object_type)
+                new_object_dict = method(object_context, object_dict)
+                object_id = new_object_dict['id']
+                self._test_db_results(object_id, odl_const.ODL_CREATE,
+                                      object_type)
+
+                # Add and test 'update' database entry.
+                method = getattr(self.driver,
+                                 odl_const.ODL_UPDATE + '_' + object_type)
+                method(object_context, object_id, object_dict)
+                self._test_db_results(object_id, odl_const.ODL_UPDATE,
+                                      object_type)
+
+                # Add and test 'delete' database entry.
+                method = getattr(self.driver,
+                                 odl_const.ODL_DELETE + '_' + object_type)
+                method(object_context, object_id)
+                self._test_db_results(object_id, odl_const.ODL_DELETE,
+                                      object_type)
+
+    def _test_dependency_processing(
+            self, test_operation, test_object, test_id, test_context,
+            dep_operation, dep_object, dep_id, dep_context):
+
+        # Mock sendjson to verify that it never gets called.
+        mock_sendjson = mock.patch.object(client.OpenDaylightRestClient,
+                                          'sendjson').start()
+
+        # Create dependency db row and mark as 'processing' so it won't
+        # be processed by the journal thread.
+        db.create_pending_row(self.db_session, dep_object,
+                              dep_id, dep_operation, dep_context)
+        row = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING)
+        db.update_db_row_state(self.db_session, row[0], odl_const.PROCESSING)
+
+        # Create test row with dependent ID.
+        db.create_pending_row(self.db_session, test_object,
+                              test_id, test_operation, test_context)
+
+        # Call journal thread.
+        with mock.patch.object(self.thread.event, 'wait',
+                               return_value=False):
+            self.thread.run_sync_thread(exit_after_run=True)
+
+        # Verify that dependency row is still set at 'processing'.
+        rows = db.get_all_db_rows_by_state(self.db_session,
+                                           odl_const.PROCESSING)
+        self.assertEqual(1, len(rows))
+
+        # Verify that the test row was processed and set back to 'pending'
+        # to be processed again.
+        rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING)
+        self.assertEqual(1, len(rows))
+
+        # Verify that _json_data was not called.
+        self.assertFalse(mock_sendjson.call_count)
+
+    def test_router_db(self):
+        self._test_object_db(odl_const.ODL_ROUTER)
+
+    def test_floatingip_db(self):
+        self._test_object_db(odl_const.ODL_FLOATINGIP)
+
+    def test_router_intf_db(self):
+        # Create network, subnet and router for testing.
+        kwargs = {'arg_list': (external_net.EXTERNAL,),
+                  external_net.EXTERNAL: True}
+        with self.network(**kwargs) as network:
+            with self.subnet(cidr='10.0.0.0/24') as subnet:
+                router_context, router_dict = (
+                    self._get_mock_router_operation_info(network, None))
+                new_router_dict = self.driver.create_router(router_context,
+                                                            router_dict)
+                router_id = new_router_dict['id']
+
+                object_type = odl_const.ODL_ROUTER_INTF
+                router_intf_context, router_intf_dict = \
+                    self._get_mock_router_interface_operation_info(network,
+                                                                   subnet)
+
+                # Remove 'router' database entry to allow tests to pass.
+                self._db_cleanup()
+
+                # Add and test router interface 'add' database entry.
+                # Note that router interface events do not generate unique
+                # UUIDs.
+                self.driver.add_router_interface(router_intf_context,
+                                                 router_id, router_intf_dict)
+                self._test_db_results(odl_const.ODL_UUID_NOT_USED,
+                                      odl_const.ODL_ADD, object_type)
+
+                # Add and test 'remove' database entry.
+                self.driver.remove_router_interface(router_intf_context,
+                                                    router_id,
+                                                    router_intf_dict)
+                self._test_db_results(odl_const.ODL_UUID_NOT_USED,
+                                      odl_const.ODL_REMOVE, object_type)
+
+    def test_router_threading(self):
+        self._test_thread_processing(odl_const.ODL_ROUTER)
+
+    def test_floatingip_threading(self):
+        self._test_thread_processing(odl_const.ODL_FLOATINGIP)
+
+    def test_router_intf_threading(self):
+        # Create network, subnet and router for testing.
+        kwargs = {'arg_list': (external_net.EXTERNAL,),
+                  external_net.EXTERNAL: True}
+        with self.network(**kwargs) as network:
+            with self.subnet(cidr='10.0.0.0/24') as subnet:
+                router_context, router_dict = (
+                    self._get_mock_router_operation_info(network, None))
+                new_router_dict = self.driver.create_router(router_context,
+                                                            router_dict)
+                router_id = new_router_dict['id']
+                object_type = odl_const.ODL_ROUTER_INTF
+
+                # Add and process router interface 'add' request. Adds to
+                # database. Expected calls = 2 because the create_router db
+                # entry is also processed.
+                self._test_operation_thread_processing(
+                    object_type, odl_const.ODL_ADD, network, subnet, router_id,
+                    expected_calls=2)
+                rows = db.get_all_db_rows_by_state(self.db_session,
+                                                   odl_const.COMPLETED)
+                self.assertEqual(2, len(rows))
+
+                # Add and process 'remove' request. Adds to database.
+                self._test_operation_thread_processing(
+                    object_type, odl_const.ODL_REMOVE, network, subnet,
+                    router_id)
+                rows = db.get_all_db_rows_by_state(self.db_session,
+                                                   odl_const.COMPLETED)
+                self.assertEqual(3, len(rows))
+
+    def test_delete_network_validate_ext_delete_router_dep(self):
+        router_context = [NETWORK_ID]
+        self._test_dependency_processing(
+            odl_const.ODL_DELETE, odl_const.ODL_NETWORK, NETWORK_ID, None,
+            odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID,
+            router_context)
+
+    def test_create_router_validate_ext_create_port_dep(self):
+        router_context = {'gw_port_id': PORT_ID}
+        self._test_dependency_processing(
+            odl_const.ODL_CREATE, odl_const.ODL_ROUTER, ROUTER_ID,
+            router_context,
+            odl_const.ODL_CREATE, odl_const.ODL_PORT, PORT_ID, None)
+
+    def test_delete_router_validate_ext_delete_floatingip_dep(self):
+        floatingip_context = [ROUTER_ID]
+        self._test_dependency_processing(
+            odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID, None,
+            odl_const.ODL_DELETE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
+            floatingip_context)
+
+    def test_delete_router_validate_ext_remove_routerintf_dep(self):
+        router_intf_dict = {'id': ROUTER_ID}
+        self._test_dependency_processing(
+            odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID, None,
+            odl_const.ODL_REMOVE, odl_const.ODL_ROUTER_INTF,
+            odl_const.ODL_UUID_NOT_USED, router_intf_dict)
+
+    def test_delete_router_validate_self_create_dep(self):
+        self._test_dependency_processing(
+            odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP,
+            odl_const.ODL_CREATE, odl_const.ODL_ROUTER, ROUTER_ID, None)
+
+    def test_delete_router_validate_self_update_dep(self):
+        self._test_dependency_processing(
+            odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP,
+            odl_const.ODL_UPDATE, odl_const.ODL_ROUTER, ROUTER_ID, None)
+
+    def test_update_router_validate_self_create_dep(self):
+        router_context = {'gw_port_id': None}
+        self._test_dependency_processing(
+            odl_const.ODL_UPDATE, odl_const.ODL_ROUTER, ROUTER_ID,
+            router_context,
+            odl_const.ODL_CREATE, odl_const.ODL_ROUTER, ROUTER_ID, None)
+
+    def test_create_floatingip_validate_ext_create_network_dep(self):
+        floatingip_context = {'floating_network_id': NETWORK_ID}
+        self._test_dependency_processing(
+            odl_const.ODL_CREATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
+            floatingip_context,
+            odl_const.ODL_CREATE, odl_const.ODL_NETWORK, NETWORK_ID, None)
+
+    def test_update_floatingip_validate_self_create_dep(self):
+        floatingip_context = {'floating_network_id': NETWORK_ID}
+        self._test_dependency_processing(
+            odl_const.ODL_UPDATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
+            floatingip_context,
+            odl_const.ODL_CREATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
+            EMPTY_DEP)
+
+    def test_delete_floatingip_validate_self_create_dep(self):
+        self._test_dependency_processing(
+            odl_const.ODL_DELETE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
+            EMPTY_DEP,
+            odl_const.ODL_CREATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
+            None)
+
+    def test_delete_floatingip_validate_self_update_dep(self):
+        self._test_dependency_processing(
+            odl_const.ODL_DELETE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
+            EMPTY_DEP,
+            odl_const.ODL_UPDATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
+            None)
+
+    def test_add_router_intf_validate_ext_create_router_dep(self):
+        router_intf_context = {'subnet_id': SUBNET_ID,
+                               'id': ROUTER_ID}
+        self._test_dependency_processing(
+            odl_const.ODL_ADD, odl_const.ODL_ROUTER_INTF,
+            odl_const.ODL_UUID_NOT_USED, router_intf_context,
+            odl_const.ODL_CREATE, odl_const.ODL_ROUTER, ROUTER_ID, None)
+
+    def test_add_router_intf_validate_ext_create_subnet_dep(self):
+        router_intf_context = {'subnet_id': SUBNET_ID,
+                               'id': ROUTER_ID}
+        self._test_dependency_processing(
+            odl_const.ODL_ADD, odl_const.ODL_ROUTER_INTF,
+            odl_const.ODL_UUID_NOT_USED, router_intf_context,
+            odl_const.ODL_CREATE, odl_const.ODL_SUBNET, SUBNET_ID, None)
+
+    def test_remove_router_intf_validate_self_remove_router_intf_dep(self):
+        router_intf_context = {'subnet_id': SUBNET_ID,
+                               'id': ROUTER_ID}
+        self._test_dependency_processing(
+            odl_const.ODL_REMOVE, odl_const.ODL_ROUTER_INTF,
+            odl_const.ODL_UUID_NOT_USED, router_intf_context,
+            odl_const.ODL_ADD, odl_const.ODL_ROUTER_INTF,
+            odl_const.ODL_UUID_NOT_USED, router_intf_context)
diff --git a/networking-odl/networking_odl/tests/unit/lbaas/__init__.py b/networking-odl/networking_odl/tests/unit/lbaas/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/networking_odl/tests/unit/lbaas/test_lbaas_odl_v1.py b/networking-odl/networking_odl/tests/unit/lbaas/test_lbaas_odl_v1.py
new file mode 100644 (file)
index 0000000..bca0ccb
--- /dev/null
@@ -0,0 +1,32 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+test_lbaas_odl
+----------------------------------
+
+Tests for the LBaaS plugin for networking-odl.
+"""
+
+import mock
+
+from networking_odl.lbaas import driver_v1 as lbaas_odl
+
+from neutron.tests import base
+
+
+class TestODL_LBaaS(base.BaseTestCase):
+
+    def test_init(self):
+        # just create an instance of OpenDaylightLbaasDriverV1
+        self.plugin = mock.Mock()
+        lbaas_odl.OpenDaylightLbaasDriverV1(self.plugin)
diff --git a/networking-odl/networking_odl/tests/unit/lbaas/test_lbaas_odl_v2.py b/networking-odl/networking_odl/tests/unit/lbaas/test_lbaas_odl_v2.py
new file mode 100644 (file)
index 0000000..f8292f6
--- /dev/null
@@ -0,0 +1,32 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+test_lbaas_odl
+----------------------------------
+
+Tests for the LBaaS plugin for networking-odl.
+"""
+
+import mock
+
+from networking_odl.lbaas import driver_v2 as lbaas_odl
+
+from neutron.tests import base
+
+
+class TestODL_LBaaS(base.BaseTestCase):
+
+    def test_init(self):
+        # just create an instance of OpenDaylightLbaasDriverV2
+        self.plugin = mock.Mock()
+        lbaas_odl.OpenDaylightLbaasDriverV2(self.plugin)
diff --git a/networking-odl/networking_odl/tests/unit/ml2/__init__.py b/networking-odl/networking_odl/tests/unit/ml2/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/networking_odl/tests/unit/ml2/config-ovs-external_ids.sh b/networking-odl/networking_odl/tests/unit/ml2/config-ovs-external_ids.sh
new file mode 100755 (executable)
index 0000000..15f9b93
--- /dev/null
@@ -0,0 +1,37 @@
+#!/bin/sh
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+uuid=$(sudo ovs-vsctl get Open_vSwitch . _uuid)
+
+# Test data
+sudo ovs-vsctl set Open_vSwitch $uuid \
+    external_ids:odl_os_hostconfig_hostid="devstack"
+
+# sudo ovs-vsctl set Open_vSwitch $uuid \
+#    external_ids:odl_os_hostconfig_hosttype="ODL L2"
+
+config=$(cat <<____CONFIG
+{"supported_vnic_types":[
+    {"vnic_type":"normal","vif_type":"ovs","vif_details":{}}],
+ "allowed_network_types":["local","vlan","vxlan","gre"],
+ "bridge_mappings":{"physnet1":"br-ex"}}
+____CONFIG
+)
+
+echo config: $config
+
+sudo ovs-vsctl set Open_vSwitch $uuid \
+    external_ids:odl_os_hostconfig_config_odl_l2="$config"
diff --git a/networking-odl/networking_odl/tests/unit/ml2/odl_teststub.js b/networking-odl/networking_odl/tests/unit/ml2/odl_teststub.js
new file mode 100644 (file)
index 0000000..1ee02d5
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2016 OpenStack Foundation
+ * All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License. You may obtain
+ * a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ * $nodejs odl_teststub.js
+ *
+ * local.conf or ml2_conf.ini should be set to the following:
+ *
+ * [ml2_odl]
+ * port_binding_controller = pseudo-agentdb-binding
+ * password = admin
+ * username = admin
+ * url = http://localhost:8080/controller/nb/v2/neutron
+ * restconf_uri = http://localhost:8125/ # for this stub
+ *
+ * To test with ODL *end to end* use below URL for restconf_uri and configure
+ * ovsdb external_ids using the test script: config-ovs-external_ids.sh
+ *
+ * http://localhost:8181/restconf/operational/neutron:neutron/hostconfigs
+ */
+
+var http = require('http');
+
+const PORT=8125;
+
+__test_odl_hconfig = {"hostconfigs": {"hostconfig": [
+            {"host-id": "devstack",
+             "host-type": "ODL L2",
+             "config": {
+                 "supported_vnic_types": [
+                     {"vnic_type": "normal",
+                      "vif_type": "ovs",
+                      "vif_details": {}}],
+                 "allowed_network_types": ["local", "vlan", "vxlan", "gre"],
+                 "bridge_mappings": {"physnet1":"br-ex"}
+                 }
+             }]
+        }}
+
+
+function handleRequest(req, res){
+        res.setHeader('Content-Type', 'application/json');
+        res.end(JSON.stringify(__test_odl_hconfig));
+}
+
+var server = http.createServer(handleRequest);
+
+server.listen(PORT, function(){
+                console.log("Server listening on: http://localhost:%s", PORT);
+                });
diff --git a/networking-odl/networking_odl/tests/unit/ml2/ovs_topology.json b/networking-odl/networking_odl/tests/unit/ml2/ovs_topology.json
new file mode 100644 (file)
index 0000000..f855ce7
--- /dev/null
@@ -0,0 +1,171 @@
+{
+    "network-topology": {
+        "topology": [
+            {
+                "topology-id": "flow:1"
+            },
+            {
+                "node": [
+                    {
+                        "node-id": "ovsdb://uuid/c4ad780f-8f91-4fa4-804e-dd16beb191e2/bridge/br-ex",
+                        "ovsdb:bridge-external-ids": [
+                            {
+                                "bridge-external-id-key": "bridge-id",
+                                "bridge-external-id-value": "br-ex"
+                            }
+                        ],
+                        "ovsdb:bridge-name": "br-ex",
+                        "ovsdb:bridge-other-configs": [
+                            {
+                                "bridge-other-config-key": "disable-in-band",
+                                "bridge-other-config-value": "true"
+                            }
+                        ],
+                        "ovsdb:bridge-uuid": "4ba78705-3ac2-4e36-a2e1-32f1647d97a7",
+                        "ovsdb:datapath-id": "00:00:06:87:a7:4b:36:4e",
+                        "ovsdb:datapath-type": "ovsdb:datapath-type-netdev",
+                        "ovsdb:managed-by": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c4ad780f-8f91-4fa4-804e-dd16beb191e2']",
+                        "termination-point": [
+                            {
+                                "ovsdb:interface-external-ids": [
+                                    {
+                                        "external-id-key": "iface-id",
+                                        "external-id-value": "c44000c6-f199-4609-9325-afd8c72b6777"
+                                    },
+                                    {
+                                        "external-id-key": "iface-status",
+                                        "external-id-value": "active"
+                                    },
+                                    {
+                                        "external-id-key": "attached-mac",
+                                        "external-id-value": "fa:16:3e:a0:d5:49"
+                                    }
+                                ],
+                                "ovsdb:interface-type": "ovsdb:interface-type-internal",
+                                "ovsdb:interface-uuid": "c1081aa3-607f-404e-a71e-ea1dd334b263",
+                                "ovsdb:name": "qg-c44000c6-f1",
+                                "ovsdb:ofport": 1,
+                                "ovsdb:port-uuid": "1a2ef41e-4836-420c-977f-7a662c7abe62",
+                                "tp-id": "qg-c44000c6-f1"
+                            },
+                            {
+                                "ovsdb:interface-type": "ovsdb:interface-type-internal",
+                                "ovsdb:interface-uuid": "54439f6a-7a88-4cf6-84b7-0645642618f9",
+                                "ovsdb:name": "br-ex",
+                                "ovsdb:ofport": 65534,
+                                "ovsdb:port-uuid": "9bf4c1ab-d111-479d-84ab-1874f166153b",
+                                "tp-id": "br-ex"
+                            }
+                        ]
+                    },
+                    {
+                        "node-id": "ovsdb://uuid/c4ad780f-8f91-4fa4-804e-dd16beb191e2",
+                        "ovsdb:connection-info": {
+                            "local-ip": "10.237.214.247",
+                            "local-port": 6640,
+                            "remote-ip": "10.237.214.247",
+                            "remote-port": 43247
+                        },
+                        "ovsdb:managed-node-entry": [
+                            {
+                                "bridge-ref": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c4ad780f-8f91-4fa4-804e-dd16beb191e2/bridge/br-int']"
+                            },
+                            {
+                                "bridge-ref": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c4ad780f-8f91-4fa4-804e-dd16beb191e2/bridge/br-ex']"
+                            }
+                        ],
+                        "ovsdb:openvswitch-external-ids": [
+                            {
+                                "external-id-key": "system-id",
+                                "external-id-value": "c4dcfd6c-8f0e-43a6-9cf5-d2a0c37f5c52"
+                            }
+                        ],
+                        "ovsdb:openvswitch-other-configs": [
+                            {
+                                "other-config-key": "local_ip",
+                                "other-config-value": "10.237.214.247"
+                            },
+                            {
+                                "other-config-key": "provider_mappings",
+                                "other-config-value": "default:ens786f0"
+                            }
+                        ],
+                        "ovsdb:ovs-version": "2.3.2"
+                    },
+                    {
+                        "node-id": "ovsdb://uuid/c4ad780f-8f91-4fa4-804e-dd16beb191e2/bridge/br-int",
+                        "ovsdb:bridge-external-ids": [
+                            {
+                                "bridge-external-id-key": "bridge-id",
+                                "bridge-external-id-value": "br-int"
+                            }
+                        ],
+                        "ovsdb:bridge-name": "br-int",
+                        "ovsdb:bridge-uuid": "d3acbe7f-cdab-4ef1-80b8-68e5db3b3b7b",
+                        "ovsdb:datapath-id": "00:00:7e:be:ac:d3:f1:4e",
+                        "ovsdb:datapath-type": "ovsdb:datapath-type-system",
+                        "ovsdb:managed-by": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c4ad780f-8f91-4fa4-804e-dd16beb191e2']",
+                        "termination-point": [
+                            {
+                                "ovsdb:interface-type": "ovsdb:interface-type-internal",
+                                "ovsdb:interface-uuid": "8164bb4f-2b8c-4405-b8de-4b6b776baa27",
+                                "ovsdb:name": "br-int",
+                                "ovsdb:ofport": 65534,
+                                "ovsdb:port-uuid": "c34e1347-6757-4770-a05e-66cfb4b65167",
+                                "tp-id": "br-int"
+                            },
+                            {
+                                "ovsdb:interface-external-ids": [
+                                    {
+                                        "external-id-key": "iface-id",
+                                        "external-id-value": "1d5780fc-da03-4c98-8082-089d70cb65e3"
+                                    },
+                                    {
+                                        "external-id-key": "iface-status",
+                                        "external-id-value": "active"
+                                    },
+                                    {
+                                        "external-id-key": "attached-mac",
+                                        "external-id-value": "fa:16:3e:ee:3e:36"
+                                    }
+                                ],
+                                "ovsdb:interface-type": "ovsdb:interface-type-internal",
+                                "ovsdb:interface-uuid": "00d8d482-abf9-4459-8cb1-9c8e80df4943",
+                                "ovsdb:name": "tap1d5780fc-da",
+                                "ovsdb:ofport": 1,
+                                "ovsdb:port-uuid": "743a236a-a34c-4084-a5ed-8dac56371ca8",
+                                "tp-id": "tap1d5780fc-da"
+                            },
+                            {
+                                "ovsdb:interface-external-ids": [
+                                    {
+                                        "external-id-key": "iface-id",
+                                        "external-id-value": "674fd914-74c0-4065-a88a-929919446555"
+                                    },
+                                    {
+                                        "external-id-key": "iface-status",
+                                        "external-id-value": "active"
+                                    },
+                                    {
+                                        "external-id-key": "attached-mac",
+                                        "external-id-value": "fa:16:3e:62:0c:d3"
+                                    }
+                                ],
+                                "ovsdb:interface-type": "ovsdb:interface-type-internal",
+                                "ovsdb:interface-uuid": "41bde142-61bc-4297-a39d-8b0ee86a0731",
+                                "ovsdb:name": "qr-674fd914-74",
+                                "ovsdb:ofport": 2,
+                                "ovsdb:port-uuid": "1c505a53-ccfd-4745-9526-211016d9cbb3",
+                                "tp-id": "qr-674fd914-74"
+                            }
+                        ]
+                    }
+                ],
+                "topology-id": "ovsdb:1"
+            },
+            {
+                "topology-id": "netvirt:1"
+            }
+        ]
+    }
+}
\ No newline at end of file
diff --git a/networking-odl/networking_odl/tests/unit/ml2/test_driver.py b/networking-odl/networking_odl/tests/unit/ml2/test_driver.py
new file mode 100644 (file)
index 0000000..661eb55
--- /dev/null
@@ -0,0 +1,99 @@
+# Copyright (c) 2013-2015 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import mock
+from neutron import context
+from neutron.tests.unit.plugins.ml2 import test_plugin
+
+from networking_odl.common import constants as const
+from networking_odl.ml2 import mech_driver as driver
+
+
+class TestODLShim(test_plugin.Ml2PluginV2TestCase):
+
+    def setUp(self):
+        super(TestODLShim, self).setUp()
+        self.context = context.get_admin_context()
+        self.plugin = mock.Mock()
+        self.driver = driver.OpenDaylightMechanismDriver()
+        self.driver.odl_drv = mock.Mock()
+
+    def test_create_network_postcommit(self):
+        self.driver.create_network_postcommit(self.context)
+        self.driver.odl_drv.synchronize.assert_called_with(const.ODL_CREATE,
+                                                           const.ODL_NETWORKS,
+                                                           self.context)
+
+    def test_update_network_postcommit(self):
+        self.driver.update_network_postcommit(self.context)
+        self.driver.odl_drv.synchronize.assert_called_with(const.ODL_UPDATE,
+                                                           const.ODL_NETWORKS,
+                                                           self.context)
+
+    def test_delete_network_postcommit(self):
+        self.driver.delete_network_postcommit(self.context)
+        self.driver.odl_drv.synchronize.assert_called_with(const.ODL_DELETE,
+                                                           const.ODL_NETWORKS,
+                                                           self.context)
+
+    def test_create_subnet_postcommit(self):
+        self.driver.create_subnet_postcommit(self.context)
+        self.driver.odl_drv.synchronize.assert_called_with(const.ODL_CREATE,
+                                                           const.ODL_SUBNETS,
+                                                           self.context)
+
+    def test_update_subnet_postcommit(self):
+        self.driver.update_subnet_postcommit(self.context)
+        self.driver.odl_drv.synchronize.assert_called_with(const.ODL_UPDATE,
+                                                           const.ODL_SUBNETS,
+                                                           self.context)
+
+    def test_delete_subnet_postcommit(self):
+        self.driver.delete_subnet_postcommit(self.context)
+        self.driver.odl_drv.synchronize.assert_called_with(const.ODL_DELETE,
+                                                           const.ODL_SUBNETS,
+                                                           self.context)
+
+    def test_create_port_postcommit(self):
+        self.driver.create_port_postcommit(self.context)
+        self.driver.odl_drv.synchronize.assert_called_with(const.ODL_CREATE,
+                                                           const.ODL_PORTS,
+                                                           self.context)
+
+    def test_update_port_postcommit(self):
+        self.driver.update_port_postcommit(self.context)
+        self.driver.odl_drv.synchronize.assert_called_with(const.ODL_UPDATE,
+                                                           const.ODL_PORTS,
+                                                           self.context)
+
+    def test_delete_port_postcommit(self):
+        self.driver.delete_port_postcommit(self.context)
+        self.driver.odl_drv.synchronize.assert_called_with(const.ODL_DELETE,
+                                                           const.ODL_PORTS,
+                                                           self.context)
+
+    def test_bind_port_delegation(self):
+        # given front-end with attached back-end
+        front_end = self.driver
+        front_end.odl_drv = back_end = mock.MagicMock(
+            spec=driver.OpenDaylightDriver)
+        # given PortContext to be forwarded to back-end without using
+        context = object()
+
+        # when binding port
+        front_end.bind_port(context)
+
+        # then port is bound by back-end
+        back_end.bind_port.assert_called_once_with(context)
diff --git a/networking-odl/networking_odl/tests/unit/ml2/test_legacy_port_binding.py b/networking-odl/networking_odl/tests/unit/ml2/test_legacy_port_binding.py
new file mode 100644 (file)
index 0000000..932c961
--- /dev/null
@@ -0,0 +1,89 @@
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import mock
+
+from neutron.extensions import portbindings
+from neutron.plugins.common import constants
+from neutron.plugins.ml2 import driver_api as api
+from neutron.plugins.ml2 import driver_context as ctx
+from neutron_lib import constants as n_constants
+
+from networking_odl.ml2 import legacy_port_binding
+from networking_odl.tests import base
+
+
+class TestLegacyPortBindingManager(base.DietTestCase):
+    # valid  and invalid segments
+    valid_segment = {
+        api.ID: 'API_ID',
+        api.NETWORK_TYPE: constants.TYPE_LOCAL,
+        api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
+        api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
+
+    invalid_segment = {
+        api.ID: 'API_ID',
+        api.NETWORK_TYPE: constants.TYPE_NONE,
+        api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
+        api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
+
+    def test_check_segment(self):
+        """Validate the _check_segment method."""
+
+        all_network_types = [constants.TYPE_FLAT, constants.TYPE_GRE,
+                             constants.TYPE_LOCAL, constants.TYPE_VXLAN,
+                             constants.TYPE_VLAN, constants.TYPE_NONE]
+
+        mgr = legacy_port_binding.LegacyPortBindingManager()
+
+        valid_types = {
+            network_type
+            for network_type in all_network_types
+            if mgr._check_segment({api.NETWORK_TYPE: network_type})}
+
+        self.assertEqual({
+            constants.TYPE_LOCAL, constants.TYPE_GRE, constants.TYPE_VXLAN,
+            constants.TYPE_VLAN}, valid_types)
+
+    def test_bind_port(self):
+
+        network = mock.MagicMock(spec=api.NetworkContext)
+
+        port_context = mock.MagicMock(
+            spec=ctx.PortContext, current={'id': 'CURRENT_CONTEXT_ID'},
+            segments_to_bind=[self.valid_segment, self.invalid_segment],
+            network=network)
+
+        mgr = legacy_port_binding.LegacyPortBindingManager()
+        vif_type = mgr._get_vif_type(port_context)
+
+        mgr.bind_port(port_context)
+
+        port_context.set_binding.assert_called_once_with(
+            self.valid_segment[api.ID], vif_type,
+            mgr.vif_details, status=n_constants.PORT_STATUS_ACTIVE)
+
+    def test_bind_port_unsupported_vnic_type(self):
+        network = mock.MagicMock(spec=api.NetworkContext)
+        port_context = mock.MagicMock(
+            spec=ctx.PortContext,
+            current={'id': 'CURRENT_CONTEXT_ID',
+                     portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT},
+            segments_to_bind=[self.valid_segment, self.invalid_segment],
+            network=network)
+
+        mgr = legacy_port_binding.LegacyPortBindingManager()
+        mgr.bind_port(port_context)
+        port_context.set_binding.assert_not_called()
diff --git a/networking-odl/networking_odl/tests/unit/ml2/test_mechanism_odl.py b/networking-odl/networking_odl/tests/unit/ml2/test_mechanism_odl.py
new file mode 100644 (file)
index 0000000..95de10c
--- /dev/null
@@ -0,0 +1,596 @@
+# Copyright (c) 2013-2014 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+import copy
+import mock
+import socket
+
+from oslo_config import cfg
+from oslo_serialization import jsonutils
+import requests
+import webob.exc
+
+from neutron.db import segments_db
+from neutron.extensions import portbindings
+from neutron.plugins.common import constants
+from neutron.plugins.ml2 import config as config
+from neutron.plugins.ml2 import driver_api as api
+from neutron.plugins.ml2 import driver_context as driver_context
+from neutron.plugins.ml2 import plugin
+from neutron.tests import base
+from neutron.tests.unit.plugins.ml2 import test_plugin
+from neutron.tests.unit import testlib_api
+from neutron_lib import constants as n_constants
+
+from networking_odl.common import client
+from networking_odl.common import constants as odl_const
+from networking_odl.ml2 import legacy_port_binding
+from networking_odl.ml2 import mech_driver
+from networking_odl.ml2 import network_topology
+
+
+cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
+
+
+HOST = 'fake-host'
+PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin'
+FAKE_NETWORK = {'status': 'ACTIVE',
+                'subnets': [],
+                'name': 'net1',
+                'provider:physical_network': None,
+                'admin_state_up': True,
+                'tenant_id': 'test-tenant',
+                'provider:network_type': 'local',
+                'router:external': False,
+                'shared': False,
+                'id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e',
+                'provider:segmentation_id': None}
+
+FAKE_SUBNET = {'ipv6_ra_mode': None,
+               'allocation_pools': [{'start': '10.0.0.2',
+                                     'end': '10.0.1.254'}],
+               'host_routes': [],
+               'ipv6_address_mode': None,
+               'cidr': '10.0.0.0/23',
+               'id': '72c56c48-e9b8-4dcf-b3a7-0813bb3bd839',
+               'name': '',
+               'enable_dhcp': True,
+               'network_id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e',
+               'tenant_id': 'test-tenant',
+               'dns_nameservers': [],
+               'gateway_ip': '10.0.0.1',
+               'ip_version': 4,
+               'shared': False}
+
+FAKE_PORT = {'status': 'DOWN',
+             'binding:host_id': '',
+             'allowed_address_pairs': [],
+             'device_owner': 'fake_owner',
+             'binding:profile': {},
+             'fixed_ips': [],
+             'id': '72c56c48-e9b8-4dcf-b3a7-0813bb3bd839',
+             'security_groups': [],
+             'device_id': 'fake_device',
+             'name': '',
+             'admin_state_up': True,
+             'network_id': 'c13bba05-eb07-45ba-ace2-765706b2d701',
+             'tenant_id': 'bad_tenant_id',
+             'binding:vif_details': {},
+             'binding:vnic_type': 'normal',
+             'binding:vif_type': 'unbound',
+             'mac_address': '12:34:56:78:21:b6'}
+
+FAKE_SECURITY_GROUP = {'description': 'Default security group',
+                       'id': '6875fc07-853f-4230-9ab9-23d1af894240',
+                       'name': 'default',
+                       'security_group_rules': [],
+                       'tenant_id': '04bb5f9a0fa14ad18203035c791ffae2'}
+
+FAKE_SECURITY_GROUP_RULE = {'direction': 'ingress',
+                            'ethertype': 'IPv4',
+                            'id': '399029df-cefe-4a7a-b6d6-223558627d23',
+                            'port_range_max': 0,
+                            'port_range_min': 0,
+                            'protocol': 0,
+                            'remote_group_id': '6875fc07-853f-4230-9ab9',
+                            'remote_ip_prefix': 0,
+                            'security_group_id': '6875fc07-853f-4230-9ab9',
+                            'tenant_id': '04bb5f9a0fa14ad18203035c791ffae2'}
+
+
+class OpenDaylightTestCase(test_plugin.Ml2PluginV2TestCase):
+    _mechanism_drivers = ['opendaylight']
+
+    def setUp(self):
+        # Set URL/user/pass so init doesn't throw a cfg required error.
+        # They are not used in these tests since sendjson is overwritten.
+        config.cfg.CONF.set_override('url', 'http://127.0.0.1:9999', 'ml2_odl')
+        config.cfg.CONF.set_override('username', 'someuser', 'ml2_odl')
+        config.cfg.CONF.set_override('password', 'somepass', 'ml2_odl')
+
+        super(OpenDaylightTestCase, self).setUp()
+        self.port_create_status = 'DOWN'
+        self.mech = mech_driver.OpenDaylightMechanismDriver()
+        mock.patch.object(
+            client.OpenDaylightRestClient,
+            'sendjson',
+            new=self.check_sendjson).start()
+
+        # Prevent test from accidentally connecting to any web service
+        mock.patch.object(
+            network_topology, 'NetworkTopologyClient',
+            return_value=mock.Mock(
+                specs=network_topology.NetworkTopologyClient,
+                get=mock.Mock(side_effect=requests.HTTPError))).start()
+
+        # Prevent hosts resolution from changing the behaviour of tests
+        mock.patch.object(
+            network_topology.utils,
+            'get_addresses_by_name',
+            side_effect=socket.gaierror).start()
+
+    def check_sendjson(self, method, urlpath, obj):
+        self.assertFalse(urlpath.startswith("http://"))
+
+
+class OpenDayLightMechanismConfigTests(testlib_api.SqlTestCase):
+
+    def _set_config(self, url='http://127.0.0.1:9999', username='someuser',
+                    password='somepass'):
+        config.cfg.CONF.set_override('mechanism_drivers',
+                                     ['logger', 'opendaylight'],
+                                     'ml2')
+        config.cfg.CONF.set_override('url', url, 'ml2_odl')
+        config.cfg.CONF.set_override('username', username, 'ml2_odl')
+        config.cfg.CONF.set_override('password', password, 'ml2_odl')
+
+    def _test_missing_config(self, **kwargs):
+        self._set_config(**kwargs)
+        self.assertRaises(config.cfg.RequiredOptError,
+                          plugin.Ml2Plugin)
+
+    def test_valid_config(self):
+        self._set_config()
+        plugin.Ml2Plugin()
+
+    def test_missing_url_raises_exception(self):
+        self._test_missing_config(url=None)
+
+    def test_missing_username_raises_exception(self):
+        self._test_missing_config(username=None)
+
+    def test_missing_password_raises_exception(self):
+        self._test_missing_config(password=None)
+
+
+class OpenDaylightMechanismTestBasicGet(test_plugin.TestMl2BasicGet,
+                                        OpenDaylightTestCase):
+    pass
+
+
+class OpenDaylightMechanismTestNetworksV2(test_plugin.TestMl2NetworksV2,
+                                          OpenDaylightTestCase):
+    pass
+
+
+class OpenDaylightMechanismTestSubnetsV2(test_plugin.TestMl2SubnetsV2,
+                                         OpenDaylightTestCase):
+    pass
+
+
+class OpenDaylightMechanismTestPortsV2(test_plugin.TestMl2PortsV2,
+                                       OpenDaylightTestCase):
+
+    def setUp(self):
+        mock.patch.object(
+            mech_driver.OpenDaylightDriver,
+            'out_of_sync',
+            new_callable=mock.PropertyMock(return_value=False)).start()
+        super(OpenDaylightMechanismTestPortsV2, self).setUp()
+
+    def test_update_port_mac(self):
+        self.check_update_port_mac(
+            host_arg={portbindings.HOST_ID: HOST},
+            arg_list=(portbindings.HOST_ID,),
+            expected_status=webob.exc.HTTPConflict.code,
+            expected_error='PortBound')
+
+
+class DataMatcher(object):
+
+    def __init__(self, operation, object_type, context):
+        self._data = context.current.copy()
+        self._object_type = object_type
+        filter_cls = mech_driver.OpenDaylightDriver.FILTER_MAP[
+            '%ss' % object_type]
+        attr_filter = getattr(filter_cls, 'filter_%s_attributes' % operation)
+        attr_filter(self._data, context)
+
+    def __eq__(self, s):
+        data = jsonutils.loads(s)
+        return self._data == data[self._object_type]
+
+    def __ne__(self, s):
+        return not self.__eq__(s)
+
+
+class OpenDaylightSyncTestCase(OpenDaylightTestCase):
+
+    def setUp(self):
+        super(OpenDaylightSyncTestCase, self).setUp()
+        self.given_back_end = mech_driver.OpenDaylightDriver()
+
+    def test_simple_sync_all_with_HTTPError_not_found(self):
+        self.given_back_end.out_of_sync = True
+        ml2_plugin = plugin.Ml2Plugin()
+
+        response = mock.Mock(status_code=requests.codes.not_found)
+        fake_exception = requests.exceptions.HTTPError('Test',
+                                                       response=response)
+
+        def side_eff(*args, **kwargs):
+            # HTTP ERROR exception with 404 status code will be raised when use
+            # sendjson to get the object in ODL DB
+            if args[0] == 'get':
+                raise fake_exception
+
+        with mock.patch.object(client.OpenDaylightRestClient, 'sendjson',
+                               side_effect=side_eff), \
+            mock.patch.object(plugin.Ml2Plugin, 'get_networks',
+                              return_value=[FAKE_NETWORK.copy()]), \
+            mock.patch.object(plugin.Ml2Plugin, 'get_network',
+                              return_value=FAKE_NETWORK.copy()), \
+            mock.patch.object(plugin.Ml2Plugin, 'get_subnets',
+                              return_value=[FAKE_SUBNET.copy()]), \
+            mock.patch.object(plugin.Ml2Plugin, 'get_ports',
+                              return_value=[FAKE_PORT.copy()]), \
+            mock.patch.object(plugin.Ml2Plugin, 'get_security_groups',
+                              return_value=[FAKE_SECURITY_GROUP.copy()]), \
+            mock.patch.object(plugin.Ml2Plugin, 'get_security_group_rules',
+                              return_value=[FAKE_SECURITY_GROUP_RULE.copy()]):
+            self.given_back_end.sync_full(ml2_plugin)
+
+            sync_id_list = [FAKE_NETWORK['id'], FAKE_SUBNET['id'],
+                            FAKE_PORT['id'],
+                            FAKE_SECURITY_GROUP['id'],
+                            FAKE_SECURITY_GROUP_RULE['id']]
+
+            act = []
+            for args, kwargs in \
+                client.OpenDaylightRestClient.sendjson.call_args_list:
+                if args[0] == 'post':
+                    for key in args[2]:
+                        act.append(args[2][key][0]['id'])
+            self.assertEqual(act, sync_id_list)
+
+    def test_simple_sync_all_with_all_synced(self):
+        self.given_back_end.out_of_sync = True
+        ml2_plugin = plugin.Ml2Plugin()
+
+        with mock.patch.object(client.OpenDaylightRestClient, 'sendjson',
+                               return_value=None), \
+            mock.patch.object(plugin.Ml2Plugin, 'get_networks',
+                              return_value=[FAKE_NETWORK.copy()]), \
+            mock.patch.object(plugin.Ml2Plugin, 'get_subnets',
+                              return_value=[FAKE_SUBNET.copy()]), \
+            mock.patch.object(plugin.Ml2Plugin, 'get_ports',
+                              return_value=[FAKE_PORT.copy()]), \
+            mock.patch.object(plugin.Ml2Plugin, 'get_security_groups',
+                              return_value=[FAKE_SECURITY_GROUP.copy()]), \
+            mock.patch.object(plugin.Ml2Plugin, 'get_security_group_rules',
+                              return_value=[FAKE_SECURITY_GROUP_RULE.copy()]):
+            self.given_back_end.sync_full(ml2_plugin)
+
+            # it's only called for GET, there is no call for PUT
+            # 5 = network, subnet, port, security_group, security_group_rule
+            self.assertEqual(5,
+                             client.OpenDaylightRestClient.sendjson.call_count)
+
+
+class OpenDaylightMechanismDriverTestCase(base.BaseTestCase):
+
+    def setUp(self):
+        super(OpenDaylightMechanismDriverTestCase, self).setUp()
+        config.cfg.CONF.set_override('mechanism_drivers',
+                                     ['logger', 'opendaylight'], 'ml2')
+        config.cfg.CONF.set_override('url', 'http://127.0.0.1:9999', 'ml2_odl')
+        config.cfg.CONF.set_override('username', 'someuser', 'ml2_odl')
+        config.cfg.CONF.set_override('password', 'somepass', 'ml2_odl')
+        self.mech = mech_driver.OpenDaylightMechanismDriver()
+        self.mech.initialize()
+
+    @staticmethod
+    def _get_mock_network_operation_context():
+        context = mock.Mock(current=FAKE_NETWORK.copy())
+        return context
+
+    @staticmethod
+    def _get_mock_subnet_operation_context():
+        context = mock.Mock(current=FAKE_SUBNET.copy())
+        return context
+
+    @staticmethod
+    def _get_mock_port_operation_context():
+        context = mock.Mock(current=FAKE_PORT.copy())
+        context._plugin.get_security_group = mock.Mock(return_value={})
+        return context
+
+    @classmethod
+    def _get_mock_operation_context(cls, object_type):
+        getter = getattr(cls, '_get_mock_%s_operation_context' % object_type)
+        return getter()
+
+    _status_code_msgs = {
+        200: '',
+        201: '',
+        204: '',
+        400: '400 Client Error: Bad Request',
+        401: '401 Client Error: Unauthorized',
+        403: '403 Client Error: Forbidden',
+        404: '404 Client Error: Not Found',
+        409: '409 Client Error: Conflict',
+        501: '501 Server Error: Not Implemented',
+        503: '503 Server Error: Service Unavailable',
+    }
+
+    @classmethod
+    def _get_mock_request_response(cls, status_code):
+        response = mock.Mock(status_code=status_code)
+        response.raise_for_status = mock.Mock() if status_code < 400 else (
+            mock.Mock(side_effect=requests.exceptions.HTTPError(
+                cls._status_code_msgs[status_code], response=response)))
+        return response
+
+    def _test_single_operation(self, method, context, status_code,
+                               exc_class=None, *args, **kwargs):
+        self.mech.odl_drv.out_of_sync = False
+        request_response = self._get_mock_request_response(status_code)
+        with mock.patch('requests.request',
+                        return_value=request_response) as mock_method:
+            if exc_class is not None:
+                self.assertRaises(exc_class, method, context)
+            else:
+                method(context)
+        mock_method.assert_called_once_with(
+            headers={'Content-Type': 'application/json'},
+            auth=(config.cfg.CONF.ml2_odl.username,
+                  config.cfg.CONF.ml2_odl.password),
+            timeout=config.cfg.CONF.ml2_odl.timeout, *args, **kwargs)
+
+    def _test_create_resource_postcommit(self, object_type, status_code,
+                                         exc_class=None):
+        method = getattr(self.mech, 'create_%s_postcommit' % object_type)
+        context = self._get_mock_operation_context(object_type)
+        url = '%s/%ss' % (config.cfg.CONF.ml2_odl.url, object_type)
+        kwargs = {'url': url,
+                  'data': DataMatcher(odl_const.ODL_CREATE, object_type,
+                                      context)}
+        self._test_single_operation(method, context, status_code, exc_class,
+                                    'post', **kwargs)
+
+    def _test_update_resource_postcommit(self, object_type, status_code,
+                                         exc_class=None):
+        method = getattr(self.mech, 'update_%s_postcommit' % object_type)
+        context = self._get_mock_operation_context(object_type)
+        url = '%s/%ss/%s' % (config.cfg.CONF.ml2_odl.url, object_type,
+                             context.current['id'])
+        kwargs = {'url': url,
+                  'data': DataMatcher(odl_const.ODL_UPDATE, object_type,
+                                      context)}
+        self._test_single_operation(method, context, status_code, exc_class,
+                                    'put', **kwargs)
+
+    def _test_delete_resource_postcommit(self, object_type, status_code,
+                                         exc_class=None):
+        method = getattr(self.mech, 'delete_%s_postcommit' % object_type)
+        context = self._get_mock_operation_context(object_type)
+        url = '%s/%ss/%s' % (config.cfg.CONF.ml2_odl.url, object_type,
+                             context.current['id'])
+        kwargs = {'url': url, 'data': None}
+        self._test_single_operation(method, context, status_code, exc_class,
+                                    odl_const.ODL_DELETE, **kwargs)
+
+    def test_create_network_postcommit(self):
+        self._test_create_resource_postcommit(odl_const.ODL_NETWORK,
+                                              requests.codes.created)
+        for status_code in (requests.codes.bad_request,
+                            requests.codes.unauthorized):
+            self._test_create_resource_postcommit(
+                odl_const.ODL_NETWORK, status_code,
+                requests.exceptions.HTTPError)
+
+    def test_create_subnet_postcommit(self):
+        self._test_create_resource_postcommit(odl_const.ODL_SUBNET,
+                                              requests.codes.created)
+        for status_code in (requests.codes.bad_request,
+                            requests.codes.unauthorized,
+                            requests.codes.forbidden,
+                            requests.codes.not_found,
+                            requests.codes.conflict,
+                            requests.codes.not_implemented):
+            self._test_create_resource_postcommit(
+                odl_const.ODL_SUBNET, status_code,
+                requests.exceptions.HTTPError)
+
+    def test_create_port_postcommit(self):
+        self._test_create_resource_postcommit(odl_const.ODL_PORT,
+                                              requests.codes.created)
+        for status_code in (requests.codes.bad_request,
+                            requests.codes.unauthorized,
+                            requests.codes.forbidden,
+                            requests.codes.not_found,
+                            requests.codes.conflict,
+                            requests.codes.not_implemented,
+                            requests.codes.service_unavailable):
+            self._test_create_resource_postcommit(
+                odl_const.ODL_PORT, status_code,
+                requests.exceptions.HTTPError)
+
+    def test_update_network_postcommit(self):
+        self._test_update_resource_postcommit(odl_const.ODL_NETWORK,
+                                              requests.codes.ok)
+        for status_code in (requests.codes.bad_request,
+                            requests.codes.forbidden,
+                            requests.codes.not_found):
+            self._test_update_resource_postcommit(
+                odl_const.ODL_NETWORK, status_code,
+                requests.exceptions.HTTPError)
+
+    def test_update_subnet_postcommit(self):
+        self._test_update_resource_postcommit(odl_const.ODL_SUBNET,
+                                              requests.codes.ok)
+        for status_code in (requests.codes.bad_request,
+                            requests.codes.unauthorized,
+                            requests.codes.forbidden,
+                            requests.codes.not_found,
+                            requests.codes.not_implemented):
+            self._test_update_resource_postcommit(
+                odl_const.ODL_SUBNET, status_code,
+                requests.exceptions.HTTPError)
+
+    def test_update_port_postcommit(self):
+        self._test_update_resource_postcommit(odl_const.ODL_PORT,
+                                              requests.codes.ok)
+        for status_code in (requests.codes.bad_request,
+                            requests.codes.unauthorized,
+                            requests.codes.forbidden,
+                            requests.codes.not_found,
+                            requests.codes.conflict,
+                            requests.codes.not_implemented):
+            self._test_update_resource_postcommit(
+                odl_const.ODL_PORT, status_code,
+                requests.exceptions.HTTPError)
+
+    def test_delete_network_postcommit(self):
+        self._test_delete_resource_postcommit(odl_const.ODL_NETWORK,
+                                              requests.codes.no_content)
+        self._test_delete_resource_postcommit(odl_const.ODL_NETWORK,
+                                              requests.codes.not_found)
+        for status_code in (requests.codes.unauthorized,
+                            requests.codes.conflict):
+            self._test_delete_resource_postcommit(
+                odl_const.ODL_NETWORK, status_code,
+                requests.exceptions.HTTPError)
+
+    def test_delete_subnet_postcommit(self):
+        self._test_delete_resource_postcommit(odl_const.ODL_SUBNET,
+                                              requests.codes.no_content)
+        self._test_delete_resource_postcommit(odl_const.ODL_SUBNET,
+                                              requests.codes.not_found)
+        for status_code in (requests.codes.unauthorized,
+                            requests.codes.conflict,
+                            requests.codes.not_implemented):
+            self._test_delete_resource_postcommit(
+                odl_const.ODL_SUBNET, status_code,
+                requests.exceptions.HTTPError)
+
+    def test_delete_port_postcommit(self):
+        self._test_delete_resource_postcommit(odl_const.ODL_PORT,
+                                              requests.codes.no_content)
+        self._test_delete_resource_postcommit(odl_const.ODL_PORT,
+                                              requests.codes.not_found)
+        for status_code in (requests.codes.unauthorized,
+                            requests.codes.forbidden,
+                            requests.codes.not_implemented):
+            self._test_delete_resource_postcommit(
+                odl_const.ODL_PORT, status_code,
+                requests.exceptions.HTTPError)
+
+    def test_port_emtpy_tenant_id_work_around(self):
+        """Validate the work around code of port creation"""
+        plugin = mock.Mock()
+        plugin_context = mock.Mock()
+        network = self._get_mock_operation_context(
+            odl_const.ODL_NETWORK).current
+        port = self._get_mock_operation_context(odl_const.ODL_PORT).current
+        tenant_id = network['tenant_id']
+        port['tenant_id'] = ''
+
+        with mock.patch.object(segments_db, 'get_network_segments'):
+            context = driver_context.PortContext(
+                plugin, plugin_context, port, network, {}, 0, None)
+            self.mech.odl_drv.FILTER_MAP[
+                odl_const.ODL_PORTS].filter_create_attributes(port, context)
+            self.assertEqual(tenant_id, port['tenant_id'])
+
+    def test_update_port_filter(self):
+        """Validate the filter code on update port operation"""
+        items_to_filter = ['network_id', 'id', 'status', 'tenant_id']
+        plugin_context = mock.Mock()
+        network = self._get_mock_operation_context(
+            odl_const.ODL_NETWORK).current
+        subnet = self._get_mock_operation_context(odl_const.ODL_SUBNET).current
+        port = self._get_mock_operation_context(odl_const.ODL_PORT).current
+        port['fixed_ips'] = [{'subnet_id': subnet['id'],
+                              'ip_address': '10.0.0.10'}]
+        port['mac_address'] = port['mac_address'].upper()
+        orig_port = copy.deepcopy(port)
+
+        with mock.patch.object(segments_db, 'get_network_segments'):
+            context = driver_context.PortContext(
+                plugin, plugin_context, port, network, {}, 0, None)
+            self.mech.odl_drv.FILTER_MAP[
+                odl_const.ODL_PORTS].filter_update_attributes(port, context)
+            for key, value in port.items():
+                if key not in items_to_filter:
+                    self.assertEqual(orig_port[key], value)
+
+
+class TestOpenDaylightMechanismDriver(base.DietTestCase):
+
+    # given valid  and invalid segments
+    valid_segment = {
+        api.ID: 'API_ID',
+        api.NETWORK_TYPE: constants.TYPE_LOCAL,
+        api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
+        api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
+
+    invalid_segment = {
+        api.ID: 'API_ID',
+        api.NETWORK_TYPE: constants.TYPE_NONE,
+        api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
+        api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
+
+    def test_bind_port_front_end(self):
+        given_front_end = mech_driver.OpenDaylightMechanismDriver()
+        given_port_context = self.given_port_context()
+        given_back_end = mech_driver.OpenDaylightDriver()
+        given_front_end.odl_drv = given_back_end
+        given_back_end.port_binding_controller = \
+            legacy_port_binding.LegacyPortBindingManager()
+
+        # when port is bound
+        given_front_end.bind_port(given_port_context)
+
+        # then context binding is setup with returned vif_type and valid
+        # segment API ID
+        given_port_context.set_binding.assert_called_once_with(
+            self.valid_segment[api.ID], portbindings.VIF_TYPE_OVS,
+            given_back_end.port_binding_controller.vif_details,
+            status=n_constants.PORT_STATUS_ACTIVE)
+
+    def given_port_context(self):
+        from neutron.plugins.ml2 import driver_context as ctx
+
+        # given NetworkContext
+        network = mock.MagicMock(spec=api.NetworkContext)
+
+        # given port context
+        return mock.MagicMock(
+            spec=ctx.PortContext, current={'id': 'CURRENT_CONTEXT_ID'},
+            segments_to_bind=[self.valid_segment, self.invalid_segment],
+            network=network,
+            _new_bound_segment=self.valid_segment)
diff --git a/networking-odl/networking_odl/tests/unit/ml2/test_mechanism_odl_v2.py b/networking-odl/networking_odl/tests/unit/ml2/test_mechanism_odl_v2.py
new file mode 100644 (file)
index 0000000..7e8c7fc
--- /dev/null
@@ -0,0 +1,577 @@
+# Copyright (c) 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import datetime
+
+from networking_odl.common import callback
+from networking_odl.common import client
+from networking_odl.common import constants as odl_const
+from networking_odl.common import filters
+from networking_odl.db import db
+from networking_odl.journal import cleanup
+from networking_odl.journal import journal
+from networking_odl.ml2 import mech_driver_v2
+
+import mock
+from oslo_config import cfg
+from oslo_serialization import jsonutils
+import requests
+
+from neutron.db import api as neutron_db_api
+from neutron import manager
+from neutron.plugins.ml2 import config as config
+from neutron.plugins.ml2 import plugin
+from neutron.tests.unit.plugins.ml2 import test_plugin
+from neutron.tests.unit import testlib_api
+
+cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
+
+SECURITY_GROUP = '2f9244b4-9bee-4e81-bc4a-3f3c2045b3d7'
+SG_FAKE_ID = 'sg_fake_uuid'
+SG_RULE_FAKE_ID = 'sg_rule_fake_uuid'
+
+
+class OpenDaylightConfigBase(test_plugin.Ml2PluginV2TestCase):
+    def setUp(self):
+        super(OpenDaylightConfigBase, self).setUp()
+        config.cfg.CONF.set_override('mechanism_drivers',
+                                     ['logger', 'opendaylight'], 'ml2')
+        config.cfg.CONF.set_override('url', 'http://127.0.0.1:9999', 'ml2_odl')
+        config.cfg.CONF.set_override('username', 'someuser', 'ml2_odl')
+        config.cfg.CONF.set_override('password', 'somepass', 'ml2_odl')
+
+
+class OpenDaylightTestCase(OpenDaylightConfigBase):
+    def setUp(self):
+        super(OpenDaylightTestCase, self).setUp()
+        self.port_create_status = 'DOWN'
+        self.mech = mech_driver_v2.OpenDaylightMechanismDriver()
+        mock.patch.object(journal.OpendaylightJournalThread,
+                          'start_odl_sync_thread').start()
+        self.mock_sendjson = mock.patch.object(client.OpenDaylightRestClient,
+                                               'sendjson').start()
+        self.mock_sendjson.side_effect = self.check_sendjson
+
+    def check_sendjson(self, method, urlpath, obj):
+        self.assertFalse(urlpath.startswith("http://"))
+
+
+class OpenDayLightMechanismConfigTests(testlib_api.SqlTestCase):
+    def _set_config(self, url='http://127.0.0.1:9999', username='someuser',
+                    password='somepass'):
+        config.cfg.CONF.set_override('mechanism_drivers',
+                                     ['logger', 'opendaylight'],
+                                     'ml2')
+        config.cfg.CONF.set_override('url', url, 'ml2_odl')
+        config.cfg.CONF.set_override('username', username, 'ml2_odl')
+        config.cfg.CONF.set_override('password', password, 'ml2_odl')
+
+    def _test_missing_config(self, **kwargs):
+        self._set_config(**kwargs)
+        self.assertRaises(config.cfg.RequiredOptError,
+                          plugin.Ml2Plugin)
+
+    def test_valid_config(self):
+        self._set_config()
+        plugin.Ml2Plugin()
+
+    def test_missing_url_raises_exception(self):
+        self._test_missing_config(url=None)
+
+    def test_missing_username_raises_exception(self):
+        self._test_missing_config(username=None)
+
+    def test_missing_password_raises_exception(self):
+        self._test_missing_config(password=None)
+
+
+class OpenDaylightMechanismTestBasicGet(test_plugin.TestMl2BasicGet,
+                                        OpenDaylightTestCase):
+    pass
+
+
+class OpenDaylightMechanismTestNetworksV2(test_plugin.TestMl2NetworksV2,
+                                          OpenDaylightTestCase):
+    pass
+
+
+class OpenDaylightMechanismTestSubnetsV2(test_plugin.TestMl2SubnetsV2,
+                                         OpenDaylightTestCase):
+    pass
+
+
+class OpenDaylightMechanismTestPortsV2(test_plugin.TestMl2PortsV2,
+                                       OpenDaylightTestCase):
+    pass
+
+
+class DataMatcher(object):
+
+    def __init__(self, operation, object_type, context):
+        if object_type in [odl_const.ODL_SG, odl_const.ODL_SG_RULE]:
+            self._data = context[object_type].copy()
+        else:
+            self._data = context.current.copy()
+        self._object_type = object_type
+        filters.filter_for_odl(object_type, operation, self._data)
+
+    def __eq__(self, s):
+        data = jsonutils.loads(s)
+        return self._data == data[self._object_type]
+
+    def __ne__(self, s):
+        return not self.__eq__(s)
+
+
+class AttributeDict(dict):
+    def __init__(self, *args, **kwargs):
+        super(AttributeDict, self).__init__(*args, **kwargs)
+        self.__dict__ = self
+
+
+class OpenDaylightMechanismDriverTestCase(OpenDaylightConfigBase):
+    def setUp(self):
+        super(OpenDaylightMechanismDriverTestCase, self).setUp()
+        self.db_session = neutron_db_api.get_session()
+        self.mech = mech_driver_v2.OpenDaylightMechanismDriver()
+        self.mock_sync_thread = mock.patch.object(
+            journal.OpendaylightJournalThread, 'start_odl_sync_thread').start()
+        self.mech.initialize()
+        self.thread = journal.OpendaylightJournalThread()
+        self.addCleanup(self._db_cleanup)
+
+    @staticmethod
+    def _get_mock_network_operation_context():
+        current = {'status': 'ACTIVE',
+                   'subnets': [],
+                   'name': 'net1',
+                   'provider:physical_network': None,
+                   'admin_state_up': True,
+                   'tenant_id': 'test-tenant',
+                   'provider:network_type': 'local',
+                   'router:external': False,
+                   'shared': False,
+                   'id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e',
+                   'provider:segmentation_id': None}
+        context = mock.Mock(current=current)
+        context._plugin_context.session = neutron_db_api.get_session()
+        return context
+
+    @staticmethod
+    def _get_mock_subnet_operation_context():
+        current = {'ipv6_ra_mode': None,
+                   'allocation_pools': [{'start': '10.0.0.2',
+                                         'end': '10.0.1.254'}],
+                   'host_routes': [],
+                   'ipv6_address_mode': None,
+                   'cidr': '10.0.0.0/23',
+                   'id': '72c56c48-e9b8-4dcf-b3a7-0813bb3bd839',
+                   'name': '',
+                   'enable_dhcp': True,
+                   'network_id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e',
+                   'tenant_id': 'test-tenant',
+                   'dns_nameservers': [],
+                   'gateway_ip': '10.0.0.1',
+                   'ip_version': 4,
+                   'shared': False}
+        context = mock.Mock(current=current)
+        context._plugin_context.session = neutron_db_api.get_session()
+        return context
+
+    @staticmethod
+    def _get_mock_port_operation_context():
+        current = {'status': 'DOWN',
+                   'binding:host_id': '',
+                   'allowed_address_pairs': [],
+                   'device_owner': 'fake_owner',
+                   'binding:profile': {},
+                   'fixed_ips': [{
+                       'subnet_id': '72c56c48-e9b8-4dcf-b3a7-0813bb3bd839'}],
+                   'id': '83d56c48-e9b8-4dcf-b3a7-0813bb3bd940',
+                   'security_groups': [SECURITY_GROUP],
+                   'device_id': 'fake_device',
+                   'name': '',
+                   'admin_state_up': True,
+                   'network_id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e',
+                   'tenant_id': 'test-tenant',
+                   'binding:vif_details': {},
+                   'binding:vnic_type': 'normal',
+                   'binding:vif_type': 'unbound',
+                   'mac_address': '12:34:56:78:21:b6'}
+        _network = OpenDaylightMechanismDriverTestCase.\
+            _get_mock_network_operation_context().current
+        _plugin = manager.NeutronManager.get_plugin()
+        _plugin.get_security_group = mock.Mock(return_value=SECURITY_GROUP)
+        _plugin.get_port = mock.Mock(return_value=current)
+        _plugin.get_network = mock.Mock(return_value=_network)
+        _plugin_context_mock = {'session': neutron_db_api.get_session()}
+        _network_context_mock = {'_network': _network}
+        context = {'current': AttributeDict(current),
+                   '_plugin': _plugin,
+                   '_plugin_context': AttributeDict(_plugin_context_mock),
+                   '_network_context': AttributeDict(_network_context_mock)}
+        return AttributeDict(context)
+
+    @staticmethod
+    def _get_mock_security_group_operation_context():
+        context = {odl_const.ODL_SG: {'name': 'test_sg',
+                                      'id': SG_FAKE_ID}}
+        return context
+
+    @staticmethod
+    def _get_mock_security_group_rule_operation_context():
+        context = {odl_const.ODL_SG_RULE: {'security_group_id': SG_FAKE_ID,
+                                           'id': SG_RULE_FAKE_ID}}
+        return context
+
+    @classmethod
+    def _get_mock_operation_context(cls, object_type):
+        getter = getattr(cls, '_get_mock_%s_operation_context' % object_type)
+        return getter()
+
+    _status_code_msgs = {
+        200: '',
+        201: '',
+        204: '',
+        400: '400 Client Error: Bad Request',
+        401: '401 Client Error: Unauthorized',
+        403: '403 Client Error: Forbidden',
+        404: '404 Client Error: Not Found',
+        409: '409 Client Error: Conflict',
+        501: '501 Server Error: Not Implemented',
+        503: '503 Server Error: Service Unavailable',
+    }
+
+    def _db_cleanup(self):
+        rows = db.get_all_db_rows(self.db_session)
+        for row in rows:
+            db.delete_row(self.db_session, row=row)
+
+    @classmethod
+    def _get_mock_request_response(cls, status_code):
+        response = mock.Mock(status_code=status_code)
+        response.raise_for_status = mock.Mock() if status_code < 400 else (
+            mock.Mock(side_effect=requests.exceptions.HTTPError(
+                cls._status_code_msgs[status_code])))
+        return response
+
+    def _test_operation(self, method, status_code, expected_calls,
+                        *args, **kwargs):
+        request_response = self._get_mock_request_response(status_code)
+        with mock.patch('requests.request',
+                        return_value=request_response) as mock_method:
+            method(exit_after_run=True)
+
+        if expected_calls:
+            mock_method.assert_called_with(
+                headers={'Content-Type': 'application/json'},
+                auth=(config.cfg.CONF.ml2_odl.username,
+                      config.cfg.CONF.ml2_odl.password),
+                timeout=config.cfg.CONF.ml2_odl.timeout, *args, **kwargs)
+        self.assertEqual(expected_calls, mock_method.call_count)
+
+    def _call_operation_object(self, operation, object_type):
+        context = self._get_mock_operation_context(object_type)
+
+        if object_type in [odl_const.ODL_SG, odl_const.ODL_SG_RULE]:
+            res_type = [rt for rt in callback._RESOURCE_MAPPING.values()
+                        if rt.singular == object_type][0]
+            self.mech.sync_from_callback(operation, res_type,
+                                         context[object_type]['id'], context)
+        else:
+            method = getattr(self.mech, '%s_%s_precommit' % (operation,
+                                                             object_type))
+            method(context)
+
+    def _test_operation_object(self, operation, object_type):
+        self._call_operation_object(operation, object_type)
+
+        context = self._get_mock_operation_context(object_type)
+        row = db.get_oldest_pending_db_row_with_lock(self.db_session)
+        self.assertEqual(operation, row['operation'])
+        self.assertEqual(object_type, row['object_type'])
+        self.assertEqual(context.current['id'], row['object_uuid'])
+
+    def _test_thread_processing(self, operation, object_type,
+                                expected_calls=1):
+        http_requests = {odl_const.ODL_CREATE: 'post',
+                         odl_const.ODL_UPDATE: 'put',
+                         odl_const.ODL_DELETE: 'delete'}
+        status_codes = {odl_const.ODL_CREATE: requests.codes.created,
+                        odl_const.ODL_UPDATE: requests.codes.ok,
+                        odl_const.ODL_DELETE: requests.codes.no_content}
+
+        http_request = http_requests[operation]
+        status_code = status_codes[operation]
+
+        self._call_operation_object(operation, object_type)
+
+        context = self._get_mock_operation_context(object_type)
+        url_object_type = object_type.replace('_', '-')
+        if operation in [odl_const.ODL_UPDATE, odl_const.ODL_DELETE]:
+            if object_type in [odl_const.ODL_SG, odl_const.ODL_SG_RULE]:
+                uuid = context[object_type]['id']
+            else:
+                uuid = context.current['id']
+            url = '%s/%ss/%s' % (config.cfg.CONF.ml2_odl.url, url_object_type,
+                                 uuid)
+        else:
+            url = '%s/%ss' % (config.cfg.CONF.ml2_odl.url, url_object_type)
+
+        if operation in [odl_const.ODL_CREATE, odl_const.ODL_UPDATE]:
+            kwargs = {
+                'url': url,
+                'data': DataMatcher(operation, object_type, context)}
+        else:
+            kwargs = {'url': url, 'data': None}
+        with mock.patch.object(self.thread.event, 'wait',
+                               return_value=False):
+            self._test_operation(self.thread.run_sync_thread, status_code,
+                                 expected_calls, http_request, **kwargs)
+
+    def _test_object_type(self, object_type):
+        # Add and process create request.
+        self._test_thread_processing(odl_const.ODL_CREATE, object_type)
+        rows = db.get_all_db_rows_by_state(self.db_session,
+                                           odl_const.COMPLETED)
+        self.assertEqual(1, len(rows))
+
+        # Add and process update request. Adds to database.
+        self._test_thread_processing(odl_const.ODL_UPDATE, object_type)
+        rows = db.get_all_db_rows_by_state(self.db_session,
+                                           odl_const.COMPLETED)
+        self.assertEqual(2, len(rows))
+
+        # Add and process update request. Adds to database.
+        self._test_thread_processing(odl_const.ODL_DELETE, object_type)
+        rows = db.get_all_db_rows_by_state(self.db_session,
+                                           odl_const.COMPLETED)
+        self.assertEqual(3, len(rows))
+
+    def _test_object_type_pending_network(self, object_type):
+        # Create a network (creates db row in pending state).
+        self._call_operation_object(odl_const.ODL_CREATE,
+                                    odl_const.ODL_NETWORK)
+
+        # Create object_type database row and process. This results in both
+        # the object_type and network rows being processed.
+        self._test_thread_processing(odl_const.ODL_CREATE, object_type,
+                                     expected_calls=2)
+
+        # Verify both rows are now marked as completed.
+        rows = db.get_all_db_rows_by_state(self.db_session,
+                                           odl_const.COMPLETED)
+        self.assertEqual(2, len(rows))
+
+    def _test_object_type_processing_network(self, object_type):
+        self._test_object_operation_pending_another_object_operation(
+            object_type, odl_const.ODL_CREATE, odl_const.ODL_NETWORK,
+            odl_const.ODL_CREATE)
+
+    def _test_object_operation_pending_object_operation(
+        self, object_type, operation, pending_operation):
+        self._test_object_operation_pending_another_object_operation(
+            object_type, operation, object_type, pending_operation)
+
+    def _test_object_operation_pending_another_object_operation(
+        self, object_type, operation, pending_type, pending_operation):
+        # Create the object_type (creates db row in pending state).
+        self._call_operation_object(pending_operation,
+                                    pending_type)
+
+        # Get pending row and mark as processing so that
+        # this row will not be processed by journal thread.
+        row = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING)
+        db.update_db_row_state(self.db_session, row[0], odl_const.PROCESSING)
+
+        # Create the object_type database row and process.
+        # Verify that object request is not processed because the
+        # dependent object operation has not been marked as 'completed'.
+        self._test_thread_processing(operation,
+                                     object_type,
+                                     expected_calls=0)
+
+        # Verify that all rows are still in the database.
+        rows = db.get_all_db_rows_by_state(self.db_session,
+                                           odl_const.PROCESSING)
+        self.assertEqual(1, len(rows))
+        rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING)
+        self.assertEqual(1, len(rows))
+
+    def _test_parent_delete_pending_child_delete(self, parent, child):
+        self._test_object_operation_pending_another_object_operation(
+            parent, odl_const.ODL_DELETE, child, odl_const.ODL_DELETE)
+
+    def _test_cleanup_processing_rows(self, last_retried, expected_state):
+        # Create a dummy network (creates db row in pending state).
+        self._call_operation_object(odl_const.ODL_CREATE,
+                                    odl_const.ODL_NETWORK)
+
+        # Get pending row and mark as processing and update
+        # the last_retried time
+        row = db.get_all_db_rows_by_state(self.db_session,
+                                          odl_const.PENDING)[0]
+        row.last_retried = last_retried
+        db.update_db_row_state(self.db_session, row, odl_const.PROCESSING)
+
+        # Test if the cleanup marks this in the desired state
+        # based on the last_retried timestamp
+        cleanup.JournalCleanup().cleanup_processing_rows(self.db_session)
+
+        # Verify that the Db row is in the desired state
+        rows = db.get_all_db_rows_by_state(self.db_session, expected_state)
+        self.assertEqual(1, len(rows))
+
+    def test_driver(self):
+        for operation in [odl_const.ODL_CREATE, odl_const.ODL_UPDATE,
+                          odl_const.ODL_DELETE]:
+            for object_type in [odl_const.ODL_NETWORK, odl_const.ODL_SUBNET,
+                                odl_const.ODL_PORT]:
+                self._test_operation_object(operation, object_type)
+
+    def test_port_precommit_no_tenant(self):
+        context = self._get_mock_operation_context(odl_const.ODL_PORT)
+        context.current['tenant_id'] = ''
+
+        method = getattr(self.mech, 'create_port_precommit')
+        method(context)
+
+        # Verify that the Db row has a tenant
+        rows = db.get_all_db_rows_by_state(self.db_session, odl_const.PENDING)
+        self.assertEqual(1, len(rows))
+        _network = OpenDaylightMechanismDriverTestCase.\
+            _get_mock_network_operation_context().current
+        self.assertEqual(_network['tenant_id'], rows[0]['data']['tenant_id'])
+
+    def test_network(self):
+        self._test_object_type(odl_const.ODL_NETWORK)
+
+    def test_network_update_pending_network_create(self):
+        self._test_object_operation_pending_object_operation(
+            odl_const.ODL_NETWORK, odl_const.ODL_UPDATE, odl_const.ODL_CREATE)
+
+    def test_network_delete_pending_network_create(self):
+        self._test_object_operation_pending_object_operation(
+            odl_const.ODL_NETWORK, odl_const.ODL_DELETE, odl_const.ODL_CREATE)
+
+    def test_network_delete_pending_network_update(self):
+        self._test_object_operation_pending_object_operation(
+            odl_const.ODL_NETWORK, odl_const.ODL_DELETE, odl_const.ODL_UPDATE)
+
+    def test_network_delete_pending_subnet_delete(self):
+        self._test_parent_delete_pending_child_delete(
+            odl_const.ODL_NETWORK, odl_const.ODL_SUBNET)
+
+    def test_network_delete_pending_port_delete(self):
+        self._test_parent_delete_pending_child_delete(
+            odl_const.ODL_NETWORK, odl_const.ODL_PORT)
+
+    def test_subnet(self):
+        self._test_object_type(odl_const.ODL_SUBNET)
+
+    def test_subnet_update_pending_subnet_create(self):
+        self._test_object_operation_pending_object_operation(
+            odl_const.ODL_SUBNET, odl_const.ODL_UPDATE, odl_const.ODL_CREATE)
+
+    def test_subnet_delete_pending_subnet_create(self):
+        self._test_object_operation_pending_object_operation(
+            odl_const.ODL_SUBNET, odl_const.ODL_DELETE, odl_const.ODL_CREATE)
+
+    def test_subnet_delete_pending_subnet_update(self):
+        self._test_object_operation_pending_object_operation(
+            odl_const.ODL_SUBNET, odl_const.ODL_DELETE, odl_const.ODL_UPDATE)
+
+    def test_subnet_pending_network(self):
+        self._test_object_type_pending_network(odl_const.ODL_SUBNET)
+
+    def test_subnet_processing_network(self):
+        self._test_object_type_processing_network(odl_const.ODL_SUBNET)
+
+    def test_subnet_delete_pending_port_delete(self):
+        self._test_parent_delete_pending_child_delete(
+            odl_const.ODL_SUBNET, odl_const.ODL_PORT)
+
+    def test_port(self):
+        self._test_object_type(odl_const.ODL_PORT)
+
+    def test_port_update_pending_port_create(self):
+        self._test_object_operation_pending_object_operation(
+            odl_const.ODL_PORT, odl_const.ODL_UPDATE, odl_const.ODL_CREATE)
+
+    def test_port_delete_pending_port_create(self):
+        self._test_object_operation_pending_object_operation(
+            odl_const.ODL_PORT, odl_const.ODL_DELETE, odl_const.ODL_CREATE)
+
+    def test_port_delete_pending_port_update(self):
+        self._test_object_operation_pending_object_operation(
+            odl_const.ODL_PORT, odl_const.ODL_DELETE, odl_const.ODL_UPDATE)
+
+    def test_port_pending_network(self):
+        self._test_object_type_pending_network(odl_const.ODL_PORT)
+
+    def test_port_processing_network(self):
+        self._test_object_type_processing_network(odl_const.ODL_PORT)
+
+    def test_cleanup_processing_rows_time_not_expired(self):
+        self._test_cleanup_processing_rows(datetime.datetime.utcnow(),
+                                           odl_const.PROCESSING)
+
+    def test_cleanup_processing_rows_time_expired(self):
+        old_time = datetime.datetime.utcnow() - datetime.timedelta(hours=24)
+        self._test_cleanup_processing_rows(old_time, odl_const.PENDING)
+
+    def test_thread_call(self):
+        """Verify that the sync thread method is called."""
+
+        # Create any object that would spin up the sync thread via the
+        # decorator call_thread_on_end() used by all the event handlers.
+        self._call_operation_object(odl_const.ODL_CREATE,
+                                    odl_const.ODL_NETWORK)
+
+        # Verify that the thread call was made.
+        self.assertTrue(self.mock_sync_thread.called)
+
+    def test_sg(self):
+        self._test_object_type(odl_const.ODL_SG)
+
+    def test_sg_rule(self):
+        self._test_object_type(odl_const.ODL_SG_RULE)
+
+    def _decrease_row_created_time(self, row):
+        row.created_at -= datetime.timedelta(hours=1)
+        self.db_session.merge(row)
+        self.db_session.flush()
+
+    def test_sync_multiple_updates(self):
+        # add 2 updates
+        for i in range(2):
+            self._call_operation_object(odl_const.ODL_UPDATE,
+                                        odl_const.ODL_NETWORK)
+
+        # get the last update row
+        last_row = db.get_all_db_rows(self.db_session)[-1]
+
+        # change the last update created time
+        self._decrease_row_created_time(last_row)
+
+        # create 1 more operation to trigger the sync thread
+        # verify that there are no calls to ODL controller, because the
+        # first row was not valid (exit_after_run = true)
+        self._test_thread_processing(odl_const.ODL_UPDATE,
+                                     odl_const.ODL_NETWORK, expected_calls=0)
+
+        # validate that all the rows are in 'pending' state
+        # first row should be set back to 'pending' because it was not valid
+        rows = db.get_all_db_rows_by_state(self.db_session, 'pending')
+        self.assertEqual(3, len(rows))
diff --git a/networking-odl/networking_odl/tests/unit/ml2/test_networking_topology.py b/networking-odl/networking_odl/tests/unit/ml2/test_networking_topology.py
new file mode 100644 (file)
index 0000000..fb83a7b
--- /dev/null
@@ -0,0 +1,475 @@
+# Copyright (c) 2015-2016 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+from os import path
+
+import mock
+from oslo_log import log
+from oslo_serialization import jsonutils
+import requests
+
+from neutron.extensions import portbindings
+from neutron.plugins.common import constants
+from neutron.plugins.ml2 import driver_api
+from neutron.plugins.ml2 import driver_context
+from neutron_lib import constants as n_constants
+
+from networking_odl.common import cache
+from networking_odl.ml2 import mech_driver
+from networking_odl.ml2 import mech_driver_v2
+from networking_odl.ml2 import network_topology
+from networking_odl.tests import base
+
+
+LOG = log.getLogger(__name__)
+
+
+class TestNetworkTopologyManager(base.DietTestCase):
+
+    # pylint: disable=protected-access
+
+    # given valid  and invalid segments
+    valid_segment = {
+        driver_api.ID: 'API_ID',
+        driver_api.NETWORK_TYPE: constants.TYPE_LOCAL,
+        driver_api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
+        driver_api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
+
+    invalid_segment = {
+        driver_api.ID: 'API_ID',
+        driver_api.NETWORK_TYPE: constants.TYPE_NONE,
+        driver_api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
+        driver_api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
+
+    segments_to_bind = [valid_segment, invalid_segment]
+
+    def setUp(self):
+        super(TestNetworkTopologyManager, self).setUp()
+        self.patch(network_topology.LOG, 'isEnabledFor', lambda level: True)
+        # patch given configuration
+        self.cfg = mocked_cfg = self.patch(network_topology.client, 'cfg')
+        mocked_cfg.CONF.ml2_odl.url =\
+            'http://localhost:8181/controller/nb/v2/neutron'
+        mocked_cfg.CONF.ml2_odl.username = 'admin'
+        mocked_cfg.CONF.ml2_odl.password = 'admin'
+        mocked_cfg.CONF.ml2_odl.timeout = 5
+
+    @mock.patch.object(cache, 'LOG')
+    @mock.patch.object(network_topology, 'LOG')
+    def test_fetch_elements_by_host_with_no_entry(
+            self, network_topology_logger, cache_logger):
+        given_client = self.mock_client('ovs_topology.json')
+        self.mock_get_addresses_by_name(['127.0.0.1', '192.168.0.1'])
+        given_network_topology = network_topology.NetworkTopologyManager(
+            client=given_client)
+
+        try:
+            next(given_network_topology._fetch_elements_by_host(
+                 'some_host_name'))
+        except ValueError as error:
+            cache_logger.warning.assert_called_once_with(
+                'Error fetching values for keys: %r',
+                "'some_host_name', '127.0.0.1', '192.168.0.1'",
+                exc_info=(ValueError, error, mock.ANY))
+            network_topology_logger.exception.assert_called_once_with(
+                'No such network topology elements for given host '
+                '%(host_name)r and given IPs: %(ip_addresses)s.',
+                {'ip_addresses': '127.0.0.1, 192.168.0.1',
+                 'host_name': 'some_host_name'})
+        else:
+            self.fail('Expected ValueError being raised.')
+
+    def test_fetch_element_with_ovs_entry(self):
+        given_client = self.mock_client('ovs_topology.json')
+        self.mock_get_addresses_by_name(['127.0.0.1', '10.237.214.247'])
+        given_network_topology = network_topology.NetworkTopologyManager(
+            client=given_client)
+
+        elements = given_network_topology._fetch_elements_by_host(
+            'some_host_name.')
+
+        self.assertEqual([
+            {'class':
+             'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyElement',
+             'has_datapath_type_netdev': False,
+             'host_addresses': ['10.237.214.247'],
+             'support_vhost_user': False,
+             'uuid': 'c4ad780f-8f91-4fa4-804e-dd16beb191e2',
+             'valid_vif_types': [portbindings.VIF_TYPE_OVS]}],
+            [e.to_dict() for e in elements])
+
+    def test_fetch_elements_with_vhost_user_entry(self):
+        given_client = self.mock_client('vhostuser_topology.json')
+        self.mock_get_addresses_by_name(['127.0.0.1', '192.168.66.1'])
+        given_network_topology = network_topology.NetworkTopologyManager(
+            client=given_client)
+
+        elements = given_network_topology._fetch_elements_by_host(
+            'some_host_name.')
+
+        self.assertEqual([
+            {'class':
+             'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyElement',
+             'has_datapath_type_netdev': True,
+             'host_addresses': ['192.168.66.1'],
+             'support_vhost_user': True,
+             'uuid': 'c805d82d-a5d8-419d-bc89-6e3713ff9f6c',
+             'valid_vif_types': [portbindings.VIF_TYPE_VHOST_USER,
+                                 portbindings.VIF_TYPE_OVS],
+             'port_prefix': 'vhu',
+             'vhostuser_socket_dir': '/var/run/openvswitch'}],
+            [e.to_dict() for e in elements])
+
+    def mock_get_addresses_by_name(self, ips):
+        utils = self.patch(
+            network_topology, 'utils',
+            mock.Mock(
+                get_addresses_by_name=mock.Mock(return_value=tuple(ips))))
+        return utils.get_addresses_by_name
+
+    def mock_client(self, topology_name=None):
+
+        mocked_client = mock.NonCallableMock(
+            specs=network_topology.NetworkTopologyClient)
+
+        if topology_name:
+            cached_file_path = path.join(path.dirname(__file__), topology_name)
+
+            with open(cached_file_path, 'rt') as fd:
+                topology = jsonutils.loads(str(fd.read()), encoding='utf-8')
+
+            mocked_client.get().json.return_value = topology
+
+        return mocked_client
+
+    def test_bind_port_from_mech_driver_with_ovs(self):
+
+        given_client = self.mock_client('ovs_topology.json')
+        self.mock_get_addresses_by_name(['127.0.0.1', '10.237.214.247'])
+        given_network_topology = network_topology.NetworkTopologyManager(
+            vif_details={'some': 'detail'},
+            client=given_client)
+        self.patch(
+            network_topology, 'NetworkTopologyManager',
+            return_value=given_network_topology)
+
+        given_driver = mech_driver.OpenDaylightMechanismDriver()
+        given_driver.odl_drv = mech_driver.OpenDaylightDriver()
+        given_port_context = self.given_port_context()
+
+        # when port is bound
+        given_driver.bind_port(given_port_context)
+
+        # then context binding is setup with returned vif_type and valid
+        # segment api ID
+        given_port_context.set_binding.assert_called_once_with(
+            self.valid_segment[driver_api.ID], portbindings.VIF_TYPE_OVS,
+            {'some': 'detail'}, status=n_constants.PORT_STATUS_ACTIVE)
+
+    def test_bind_port_from_mech_driver_with_vhostuser(self):
+
+        given_client = self.mock_client('vhostuser_topology.json')
+        self.mock_get_addresses_by_name(['127.0.0.1', '192.168.66.1'])
+        given_network_topology = network_topology.NetworkTopologyManager(
+            vif_details={'some': 'detail'},
+            client=given_client)
+        self.patch(
+            network_topology, 'NetworkTopologyManager',
+            return_value=given_network_topology)
+
+        given_driver = mech_driver.OpenDaylightMechanismDriver()
+        given_driver.odl_drv = mech_driver.OpenDaylightDriver()
+        given_port_context = self.given_port_context()
+
+        # when port is bound
+        given_driver.bind_port(given_port_context)
+
+        expected_vif_details = {
+            'vhostuser_socket': '/var/run/openvswitch/vhuCURRENT_CON',
+            'vhostuser_ovs_plug': True,
+            'some': 'detail',
+            'vhostuser_mode': 'client'}
+
+        # then context binding is setup with returned vif_type and valid
+        # segment api ID
+        given_port_context.set_binding.assert_called_once_with(
+            self.valid_segment[driver_api.ID],
+            portbindings.VIF_TYPE_VHOST_USER,
+            expected_vif_details, status=n_constants.PORT_STATUS_ACTIVE)
+
+    def test_bind_port_from_mech_driver_v2_with_ovs(self):
+        given_client = self.mock_client('ovs_topology.json')
+        self.mock_get_addresses_by_name(['127.0.0.1', '10.237.214.247'])
+        given_network_topology = network_topology.NetworkTopologyManager(
+            vif_details={'some': 'detail'},
+            client=given_client)
+        self.patch(
+            network_topology, 'NetworkTopologyManager',
+            return_value=given_network_topology)
+
+        given_driver = mech_driver_v2.OpenDaylightMechanismDriver()
+        given_port_context = self.given_port_context()
+
+        given_driver.initialize()
+        # when port is bound
+        given_driver.bind_port(given_port_context)
+
+        # then context binding is setup with returned vif_type and valid
+        # segment api ID
+        given_port_context.set_binding.assert_called_once_with(
+            self.valid_segment[driver_api.ID], portbindings.VIF_TYPE_OVS,
+            {'some': 'detail'}, status=n_constants.PORT_STATUS_ACTIVE)
+
+    def test_bind_port_from_mech_driver_v2_with_vhostuser(self):
+        given_client = self.mock_client('vhostuser_topology.json')
+        self.mock_get_addresses_by_name(['127.0.0.1', '192.168.66.1'])
+        given_network_topology = network_topology.NetworkTopologyManager(
+            vif_details={'some': 'detail'},
+            client=given_client)
+        self.patch(
+            network_topology, 'NetworkTopologyManager',
+            return_value=given_network_topology)
+
+        given_driver = mech_driver_v2.OpenDaylightMechanismDriver()
+        given_driver._network_topology = given_network_topology
+        given_port_context = self.given_port_context()
+
+        given_driver.initialize()
+        # when port is bound
+        given_driver.bind_port(given_port_context)
+
+        expected_vif_details = {
+            'vhostuser_socket': '/var/run/openvswitch/vhuCURRENT_CON',
+            'vhostuser_ovs_plug': True,
+            'some': 'detail',
+            'vhostuser_mode': 'client'}
+
+        # then context binding is setup with returned vif_type and valid
+        # segment api ID
+        given_port_context.set_binding.assert_called_once_with(
+            self.valid_segment[driver_api.ID],
+            portbindings.VIF_TYPE_VHOST_USER,
+            expected_vif_details, status=n_constants.PORT_STATUS_ACTIVE)
+
+    def test_bind_port_with_vif_type_ovs(self):
+        given_topology = self._mock_network_topology(
+            'ovs_topology.json', vif_details={'much': 'details'})
+        given_port_context = self.given_port_context()
+
+        # when port is bound
+        given_topology.bind_port(given_port_context)
+
+        # then context binding is setup wit returned vif_type and valid
+        # segment api ID
+        given_port_context.set_binding.assert_called_once_with(
+            self.valid_segment[driver_api.ID], portbindings.VIF_TYPE_OVS,
+            {'much': 'details'}, status=n_constants.PORT_STATUS_ACTIVE)
+
+    def test_bind_port_with_vif_type_vhost_user(self):
+        given_topology = self._mock_network_topology(
+            'vhostuser_topology.json', vif_details={'much': 'details'})
+        given_port_context = self.given_port_context()
+
+        # when port is bound
+        given_topology.bind_port(given_port_context)
+
+        # then context binding is setup wit returned vif_type and valid
+        # segment api ID
+        given_port_context.set_binding.assert_called_once_with(
+            self.valid_segment[driver_api.ID],
+            portbindings.VIF_TYPE_VHOST_USER,
+            {'vhostuser_socket': '/var/run/openvswitch/vhuCURRENT_CON',
+             'vhostuser_ovs_plug': True, 'vhostuser_mode': 'client',
+             'much': 'details'},
+            status=n_constants.PORT_STATUS_ACTIVE)
+
+    @mock.patch.object(network_topology, 'LOG')
+    def test_bind_port_without_valid_segment(self, logger):
+        given_topology = self._mock_network_topology('ovs_topology.json')
+        given_port_context = self.given_port_context(
+            given_segments=[self.invalid_segment])
+
+        # when port is bound
+        given_topology.bind_port(given_port_context)
+
+        self.assertFalse(given_port_context.set_binding.called)
+        logger.exception.assert_called_once_with(
+            'Network topology element has failed binding port:\n%(element)s',
+            {'element': mock.ANY})
+        logger.error.assert_called_once_with(
+            'Unable to bind port element for given host and valid VIF types:\n'
+            '\thostname: %(host_name)s\n'
+            '\tvalid VIF types: %(valid_vif_types)s',
+            {'host_name': 'some_host', 'valid_vif_types': 'vhostuser, ovs'})
+
+    def _mock_network_topology(self, given_topology, vif_details=None):
+        self.mock_get_addresses_by_name(
+            ['127.0.0.1', '10.237.214.247', '192.168.66.1'])
+        return network_topology.NetworkTopologyManager(
+            client=self.mock_client(given_topology),
+            vif_details=vif_details)
+
+    def given_port_context(self, given_segments=None):
+        # given NetworkContext
+        network = mock.MagicMock(spec=driver_api.NetworkContext)
+
+        if given_segments is None:
+            given_segments = self.segments_to_bind
+
+        # given port context
+        return mock.MagicMock(
+            spec=driver_context.PortContext,
+            current={'id': 'CURRENT_CONTEXT_ID'},
+            host='some_host',
+            segments_to_bind=given_segments,
+            network=network,
+            _new_bound_segment=self.valid_segment)
+
+    NETOWORK_TOPOLOGY_URL =\
+        'http://localhost:8181/'\
+        'restconf/operational/network-topology:network-topology/'
+
+    def mock_request_network_topology(self, file_name):
+        cached_file_path = path.join(
+            path.dirname(__file__), file_name + '.json')
+
+        if path.isfile(cached_file_path):
+            LOG.debug('Loading topology from file: %r', cached_file_path)
+            with open(cached_file_path, 'rt') as fd:
+                topology = jsonutils.loads(str(fd.read()), encoding='utf-8')
+        else:
+            LOG.debug(
+                'Getting topology from ODL: %r', self.NETOWORK_TOPOLOGY_URL)
+            request = requests.get(
+                self.NETOWORK_TOPOLOGY_URL, auth=('admin', 'admin'),
+                headers={'Content-Type': 'application/json'})
+            request.raise_for_status()
+
+            with open(cached_file_path, 'wt') as fd:
+                LOG.debug('Saving topology to file: %r', cached_file_path)
+                topology = request.json()
+                jsonutils.dump(
+                    topology, fd, sort_keys=True, indent=4,
+                    separators=(',', ': '))
+
+        mocked_request = self.patch(
+            mech_driver.odl_client.requests, 'request',
+            return_value=mock.MagicMock(
+                spec=requests.Response,
+                json=mock.MagicMock(return_value=topology)))
+
+        return mocked_request
+
+
+class TestNetworkTopologyClient(base.DietTestCase):
+
+    given_host = 'given.host'
+    given_port = 1234
+    given_url_with_port = 'http://{}:{}/'.format(
+        given_host, given_port)
+    given_url_without_port = 'http://{}/'.format(given_host)
+    given_username = 'GIVEN_USERNAME'
+    given_password = 'GIVEN_PASSWORD'
+    given_timeout = 20
+
+    def given_client(
+            self, url=None, username=None, password=None, timeout=None):
+        return network_topology.NetworkTopologyClient(
+            url=url or self.given_url_with_port,
+            username=username or self.given_username,
+            password=password or self.given_password,
+            timeout=timeout or self.given_timeout)
+
+    def test_constructor(self):
+        # When client is created
+        rest_client = network_topology.NetworkTopologyClient(
+            url=self.given_url_with_port,
+            username=self.given_username,
+            password=self.given_password,
+            timeout=self.given_timeout)
+
+        self.assertEqual(
+            self.given_url_with_port +
+            'restconf/operational/network-topology:network-topology',
+            rest_client.url)
+        self.assertEqual(
+            (self.given_username, self.given_password), rest_client.auth)
+        self.assertEqual(self.given_timeout, rest_client.timeout)
+
+    def test_request_with_port(self):
+        # Given rest client and used 'requests' module
+        given_client = self.given_client()
+        mocked_requests_module = self.mocked_requests()
+
+        # When a request is performed
+        result = given_client.request(
+            'GIVEN_METHOD', 'given/path', 'GIVEN_DATA')
+
+        # Then request method is called
+        mocked_requests_module.request.assert_called_once_with(
+            'GIVEN_METHOD',
+            url='http://given.host:1234/restconf/operational/' +
+            'network-topology:network-topology/given/path',
+            auth=(self.given_username, self.given_password),
+            data='GIVEN_DATA', headers={'Content-Type': 'application/json'},
+            timeout=self.given_timeout)
+
+        # Then request method result is returned
+        self.assertIs(mocked_requests_module.request.return_value, result)
+
+    def test_request_without_port(self):
+        # Given rest client and used 'requests' module
+        given_client = self.given_client(url=self.given_url_without_port)
+        mocked_requests_module = self.mocked_requests()
+
+        # When a request is performed
+        result = given_client.request(
+            'GIVEN_METHOD', 'given/path', 'GIVEN_DATA')
+
+        # Then request method is called
+        mocked_requests_module.request.assert_called_once_with(
+            'GIVEN_METHOD',
+            url='http://given.host/restconf/operational/' +
+            'network-topology:network-topology/given/path',
+            auth=(self.given_username, self.given_password),
+            data='GIVEN_DATA', headers={'Content-Type': 'application/json'},
+            timeout=self.given_timeout)
+
+        # Then request method result is returned
+        self.assertIs(mocked_requests_module.request.return_value, result)
+
+    def test_get(self):
+        # Given rest client and used 'requests' module
+        given_client = self.given_client()
+        mocked_requests_module = self.mocked_requests()
+
+        # When a request is performed
+        result = given_client.get('given/path', 'GIVEN_DATA')
+
+        # Then request method is called
+        mocked_requests_module.request.assert_called_once_with(
+            'get',
+            url='http://given.host:1234/restconf/operational/' +
+            'network-topology:network-topology/given/path',
+            auth=(self.given_username, self.given_password),
+            data='GIVEN_DATA', headers={'Content-Type': 'application/json'},
+            timeout=self.given_timeout)
+
+        # Then request method result is returned
+        self.assertIs(mocked_requests_module.request.return_value, result)
+
+    def mocked_requests(self):
+        return self.patch(network_topology.client, 'requests')
diff --git a/networking-odl/networking_odl/tests/unit/ml2/test_ovsdb_topology.py b/networking-odl/networking_odl/tests/unit/ml2/test_ovsdb_topology.py
new file mode 100644 (file)
index 0000000..228154d
--- /dev/null
@@ -0,0 +1,248 @@
+# Copyright (c) 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from os import path
+
+import mock
+from oslo_log import log
+from oslo_serialization import jsonutils
+
+from neutron.extensions import portbindings
+from neutron.plugins.common import constants
+from neutron.plugins.ml2 import driver_api
+from neutron.plugins.ml2 import driver_context
+from neutron_lib import constants as n_constants
+
+from networking_odl.ml2 import ovsdb_topology
+from networking_odl.tests import base
+
+
+LOG = log.getLogger(__name__)
+
+
+class TestOvsdbTopologyParser(base.DietTestCase):
+
+    def test_parse_network_topology_ovs(self):
+        given_parser = ovsdb_topology.OvsdbNetworkTopologyParser()
+        given_topology = self.load_network_topology('ovs_topology.json')
+
+        # when parse topology
+        elements = list(given_parser.parse_network_topology(given_topology))
+
+        # then parser yields one element supporting only OVS vif type
+        self.assertEqual(
+            [{'class':
+              'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyElement',
+              'has_datapath_type_netdev': False,
+              'host_addresses': ['10.237.214.247'],
+              'support_vhost_user': False,
+              'uuid': 'c4ad780f-8f91-4fa4-804e-dd16beb191e2',
+              'valid_vif_types': [portbindings.VIF_TYPE_OVS]}],
+            [e.to_dict() for e in elements])
+
+    def test_parse_network_topology_vhostuser(self):
+        given_parser = ovsdb_topology.OvsdbNetworkTopologyParser()
+        given_topology = self.load_network_topology('vhostuser_topology.json')
+
+        # when parse topology
+        elements = list(given_parser.parse_network_topology(given_topology))
+
+        # then parser yields one element supporting VHOSTUSER and OVS vif types
+        self.assertEqual(
+            [{'class':
+              'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyElement',
+              'has_datapath_type_netdev': True,
+              'host_addresses': ['192.168.66.1'],
+              'port_prefix': 'vhu',
+              'support_vhost_user': True,
+              'uuid': 'c805d82d-a5d8-419d-bc89-6e3713ff9f6c',
+              'valid_vif_types': [portbindings.VIF_TYPE_VHOST_USER,
+                                  portbindings.VIF_TYPE_OVS],
+              'vhostuser_socket_dir': '/var/run/openvswitch'}],
+            [e.to_dict() for e in elements])
+
+    def load_network_topology(self, file_name):
+        file_path = path.join(path.dirname(__file__), file_name)
+        LOG.debug('Loading topology from file: %r', file_path)
+        with open(file_path, 'rt') as fd:
+            return jsonutils.loads(str(fd.read()), encoding='utf-8')
+
+
+class TestOvsdbNetworkingTopologyElement(base.DietTestCase):
+
+    # given valid  and invalid segments
+    VALID_SEGMENT = {
+        driver_api.ID: 'API_ID',
+        driver_api.NETWORK_TYPE: constants.TYPE_LOCAL,
+        driver_api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
+        driver_api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
+
+    INVALID_SEGMENT = {
+        driver_api.ID: 'API_ID',
+        driver_api.NETWORK_TYPE: constants.TYPE_NONE,
+        driver_api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
+        driver_api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
+
+    segments_to_bind = [INVALID_SEGMENT, VALID_SEGMENT]
+
+    def given_element(self, uuid='some_uuid', **kwargs):
+        return ovsdb_topology.OvsdbNetworkTopologyElement(uuid=uuid, **kwargs)
+
+    def test_valid_vif_types_with_no_positive_value(self):
+        given_element = self.given_element(
+            has_datapath_type_netdev=False, support_vhost_user=False)
+        valid_vif_types = given_element.valid_vif_types
+        self.assertEqual([portbindings.VIF_TYPE_OVS], valid_vif_types)
+
+    def test_valid_vif_types_with_datapath_type_netdev(self):
+        given_element = self.given_element(
+            has_datapath_type_netdev=True, support_vhost_user=False)
+        valid_vif_types = given_element.valid_vif_types
+        self.assertEqual([portbindings.VIF_TYPE_OVS], valid_vif_types)
+
+    def test_valid_vif_types_with_support_vhost_user(self):
+        given_element = self.given_element(
+            has_datapath_type_netdev=False, support_vhost_user=True)
+        valid_vif_types = given_element.valid_vif_types
+        self.assertEqual([portbindings.VIF_TYPE_OVS], valid_vif_types)
+
+    def test_valid_vif_types_with_all_positive_values(self):
+        given_element = self.given_element(
+            has_datapath_type_netdev=True, support_vhost_user=True)
+        valid_vif_types = given_element.valid_vif_types
+        self.assertEqual(
+            [portbindings.VIF_TYPE_VHOST_USER, portbindings.VIF_TYPE_OVS],
+            valid_vif_types)
+
+    def test_to_json_ovs(self):
+        given_element = self.given_element(
+            has_datapath_type_netdev=False, support_vhost_user=True,
+            remote_ip='192.168.99.33')
+        json = given_element.to_json()
+        self.assertEqual(
+            {'class':
+             'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyElement',
+             'uuid': 'some_uuid',
+             'host_addresses': ['192.168.99.33'],
+             'has_datapath_type_netdev': False,
+             'support_vhost_user': True,
+             'valid_vif_types': [portbindings.VIF_TYPE_OVS]},
+            jsonutils.loads(json))
+
+    def test_to_json_vhost_user(self):
+        given_element = self.given_element(
+            has_datapath_type_netdev=True, support_vhost_user=True,
+            remote_ip='192.168.99.66')
+        json = given_element.to_json()
+        self.assertEqual(
+            {'class':
+             'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyElement',
+             'uuid': 'some_uuid',
+             'host_addresses': ['192.168.99.66'],
+             'has_datapath_type_netdev': True,
+             'support_vhost_user': True,
+             'valid_vif_types':
+             [portbindings.VIF_TYPE_VHOST_USER, portbindings.VIF_TYPE_OVS],
+             'port_prefix': 'vhu',
+             'vhostuser_socket_dir': '/var/run/openvswitch'},
+            jsonutils.loads(json))
+
+    def test_set_attr_with_invalid_name(self):
+        element = self.given_element()
+        self.assertRaises(
+            AttributeError, lambda: setattr(element, 'invalid_attribute', 10))
+
+    def test_is_valid_segment(self):
+        """Validate the _check_segment method."""
+
+        # given driver and all network types
+        given_element = self.given_element(
+            has_datapath_type_netdev=True, support_vhost_user=True,
+            remote_ip='192.168.99.66')
+        all_network_types = [constants.TYPE_FLAT, constants.TYPE_GRE,
+                             constants.TYPE_LOCAL, constants.TYPE_VXLAN,
+                             constants.TYPE_VLAN, constants.TYPE_NONE]
+
+        # when checking segments network type
+        valid_types = {
+            network_type
+            for network_type in all_network_types
+            if given_element._is_valid_segment(
+                {driver_api.NETWORK_TYPE: network_type})}
+
+        # then true is returned only for valid network types
+        self.assertEqual({
+            constants.TYPE_LOCAL, constants.TYPE_GRE, constants.TYPE_VXLAN,
+            constants.TYPE_VLAN}, valid_types)
+
+    def test_bind_port_with_vif_type_ovs(self):
+        given_port_context = self.given_port_context(
+            given_segments=[self.INVALID_SEGMENT, self.VALID_SEGMENT])
+        given_element = self.given_element('some_uuid')
+
+        # When bind port
+        given_element.bind_port(
+            port_context=given_port_context,
+            vif_type=portbindings.VIF_TYPE_OVS,
+            vif_details={'some_details': None})
+
+        given_port_context.set_binding.assert_called_once_with(
+            self.VALID_SEGMENT[driver_api.ID], portbindings.VIF_TYPE_OVS,
+            {'some_details': None}, status=n_constants.PORT_STATUS_ACTIVE)
+
+    def test_bind_port_with_vif_type_vhost_user(self):
+        given_port_context = self.given_port_context(
+            given_segments=[self.INVALID_SEGMENT, self.VALID_SEGMENT])
+        given_element = self.given_element('some_uuid')
+
+        # When bind port
+        given_element.bind_port(
+            port_context=given_port_context,
+            vif_type=portbindings.VIF_TYPE_VHOST_USER,
+            vif_details={'some_details': None})
+
+        given_port_context.set_binding.assert_called_once_with(
+            self.VALID_SEGMENT[driver_api.ID],
+            portbindings.VIF_TYPE_VHOST_USER,
+            {'vhostuser_socket': '/var/run/openvswitch/vhuCURRENT_CON',
+             'some_details': None, 'vhostuser_ovs_plug': True,
+             'vhostuser_mode': 'client'},
+            status=n_constants.PORT_STATUS_ACTIVE)
+
+    @mock.patch.object(ovsdb_topology, 'LOG')
+    def test_bind_port_without_valid_segment(self, logger):
+        given_port_context = self.given_port_context(
+            given_segments=[self.INVALID_SEGMENT])
+        given_element = self.given_element('some_uuid')
+
+        # when port is bound
+        self.assertRaises(
+            ValueError, lambda: given_element.bind_port(
+                port_context=given_port_context,
+                vif_type=portbindings.VIF_TYPE_OVS,
+                vif_details={'some_details': None}))
+
+        self.assertFalse(given_port_context.set_binding.called)
+
+    def given_port_context(self, given_segments):
+        # given NetworkContext
+        network = mock.MagicMock(spec=driver_api.NetworkContext)
+
+        # given port context
+        return mock.MagicMock(
+            spec=driver_context.PortContext,
+            current={'id': 'CURRENT_CONTEXT_ID'},
+            segments_to_bind=given_segments,
+            network=network)
diff --git a/networking-odl/networking_odl/tests/unit/ml2/test_port_binding.py b/networking-odl/networking_odl/tests/unit/ml2/test_port_binding.py
new file mode 100644 (file)
index 0000000..35ae9ec
--- /dev/null
@@ -0,0 +1,44 @@
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import mock
+
+from networking_odl.ml2 import legacy_port_binding
+from networking_odl.ml2 import port_binding
+from networking_odl.tests import base
+
+
+class TestPortBindingManager(base.DietTestCase):
+
+    def test_create(self):
+        mgr = port_binding.PortBindingManager.create(
+            name="legacy-port-binding")
+        self.assertEqual("legacy-port-binding", mgr.name)
+        self.assertIsInstance(mgr.controller,
+                              legacy_port_binding.LegacyPortBindingManager)
+
+    def test_create_with_nonexist_name(self):
+        self.assertRaises(AssertionError,
+                          port_binding.PortBindingManager.create,
+                          name="nonexist-port-binding")
+
+    @mock.patch.object(legacy_port_binding.LegacyPortBindingManager,
+                       "bind_port")
+    def test_bind_port(self, mock_method):
+        port_context = mock.Mock()
+        mgr = port_binding.PortBindingManager.create(
+            name="legacy-port-binding")
+        mgr.controller.bind_port(port_context)
+        mock_method.assert_called_once_with(port_context)
diff --git a/networking-odl/networking_odl/tests/unit/ml2/test_pseudo_agentdb_binding.py b/networking-odl/networking_odl/tests/unit/ml2/test_pseudo_agentdb_binding.py
new file mode 100644 (file)
index 0000000..d69150c
--- /dev/null
@@ -0,0 +1,334 @@
+# Copyright (c) 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from copy import deepcopy
+import mock
+from os import path as os_path
+from string import Template
+
+from neutron.extensions import portbindings
+from neutron.plugins.common import constants
+from neutron.plugins.ml2 import config
+from neutron.plugins.ml2 import driver_api as api
+from neutron.plugins.ml2 import driver_context as ctx
+from neutron_lib import constants as n_const
+
+from networking_odl.ml2 import pseudo_agentdb_binding
+from networking_odl.tests import base
+
+AGENTDB_BINARY = 'neutron-odlagent-portbinding'
+L2_TYPE = "ODL L2"
+
+
+class TestPseudoAgentDBBindingController(base.DietTestCase):
+    """Test class for AgentDBPortBinding."""
+
+    # test data hostconfig and hostconfig-dbget
+    sample_odl_hconfigs = {"hostconfigs": {"hostconfig": [
+        {"host-id": "devstack",
+         "host-type": "ODL L2",
+         "config": """{"supported_vnic_types": [
+                    {"vnic_type": "normal", "vif_type": "ovs",
+                     "vif_details": {}}],
+                    "allowed_network_types": [
+                    "local", "vlan", "vxlan", "gre"],
+                    "bridge_mappings": {"physnet1": "br-ex"}}"""}
+    ]}}
+
+    # Test data for string interpolation of substitutable identifers
+    #   e.g. $PORT_ID identifier in the configurations JSON string  below shall
+    # be substituted with portcontext.current['id'] eliminating the check
+    # for specific vif_type making port-binding truly switch agnostic.
+    # Refer: Python string templates and interpolation (string.Template)
+    sample_hconf_str_tmpl_subs_vpp = {
+        "host": "devstack",      # host-id in ODL JSON
+        "agent_type": "ODL L2",  # host-type in ODL JSON
+                                 # config in ODL JSON
+        "configurations": """{"supported_vnic_types": [
+                    {"vnic_type": "normal", "vif_type": "vhostuser",
+                     "vif_details": {
+                        "uuid": "TEST_UUID",
+                        "has_datapath_type_netdev": true,
+                        "support_vhost_user": true,
+                        "port_prefix": "socket_",
+                        "vhostuser_socket_dir": "/tmp",
+                        "vhostuser_ovs_plug": true,
+                        "vhostuser_mode": "server",
+                        "vhostuser_socket":
+                            "/tmp/socket_$PORT_ID"
+                    }}],
+                    "allowed_network_types": [
+                    "local", "vlan", "vxlan", "gre"],
+                    "bridge_mappings": {"physnet1": "br-ex"}}"""
+    }
+
+    sample_hconf_str_tmpl_subs_ovs = {
+        "host": "devstack",      # host-id in ODL JSON
+        "agent_type": "ODL L2",  # host-type in ODL JSON
+                                 # config in ODL JSON
+        "configurations": """{"supported_vnic_types": [
+                    {"vnic_type": "normal", "vif_type": "vhostuser",
+                     "vif_details": {
+                        "uuid": "TEST_UUID",
+                        "has_datapath_type_netdev": true,
+                        "support_vhost_user": true,
+                        "port_prefix": "vhu_",
+                        "vhostuser_socket_dir": "/var/run/openvswitch",
+                        "vhostuser_ovs_plug": true,
+                        "vhostuser_mode": "client",
+                        "vhostuser_socket":
+                            "/var/run/openvswitch/vhu_$PORT_ID"
+                    }}],
+                    "allowed_network_types": [
+                    "local", "vlan", "vxlan", "gre"],
+                    "bridge_mappings": {"physnet1": "br-ex"}}"""
+    }
+
+    sample_hconf_str_tmpl_nosubs = {
+        "host": "devstack",      # host-id in ODL JSON
+        "agent_type": "ODL L2",  # host-type in ODL JSON
+                                 # config in ODL JSON
+        "configurations": """{"supported_vnic_types": [
+                    {"vnic_type": "normal", "vif_type": "ovs",
+                     "vif_details": {
+                        "uuid": "TEST_UUID",
+                        "has_datapath_type_netdev": true,
+                        "support_vhost_user": true,
+                        "port_prefix": "socket_",
+                        "vhostuser_socket_dir": "/tmp",
+                        "vhostuser_ovs_plug": true,
+                        "vhostuser_mode": "server",
+                        "vhostuser_socket":
+                            "/var/run/openvswitch/PORT_NOSUBS"
+                    }}],
+                    "allowed_network_types": [
+                    "local", "vlan", "vxlan", "gre"],
+                    "bridge_mappings": {"physnet1": "br-ex"}}"""
+    }
+
+    # Test data for vanilla OVS
+    sample_hconfig_dbget_ovs = {"configurations": {"supported_vnic_types": [
+        {"vnic_type": "normal", "vif_type": portbindings.VIF_TYPE_OVS,
+         "vif_details": {
+             "some_test_details": None
+         }}],
+        "allowed_network_types": ["local", "vlan", "vxlan", "gre"],
+        "bridge_mappings": {"physnet1": "br-ex"}}}
+
+    # Test data for OVS-DPDK
+    sample_hconfig_dbget_ovs_dpdk = {"configurations": {
+        "supported_vnic_types": [{
+            "vnic_type": "normal",
+            "vif_type": portbindings.VIF_TYPE_VHOST_USER,
+            "vif_details": {
+                "uuid": "TEST_UUID",
+                "has_datapath_type_netdev": True,
+                "support_vhost_user": True,
+                "port_prefix": "vhu_",
+                # Assumption: /var/run mounted as tmpfs
+                "vhostuser_socket_dir": "/var/run/openvswitch",
+                "vhostuser_ovs_plug": True,
+                "vhostuser_mode": "client",
+                "vhostuser_socket": "/var/run/openvswitch/vhu_$PORT_ID"}}],
+        "allowed_network_types": ["local", "vlan", "vxlan", "gre"],
+        "bridge_mappings": {"physnet1": "br-ex"}}}
+
+    # Test data for VPP
+    sample_hconfig_dbget_vpp = {"configurations": {"supported_vnic_types": [
+        {"vnic_type": "normal", "vif_type": portbindings.VIF_TYPE_VHOST_USER,
+         "vif_details": {
+             "uuid": "TEST_UUID",
+             "has_datapath_type_netdev": True,
+             "support_vhost_user": True,
+             "port_prefix": "socket_",
+             "vhostuser_socket_dir": "/tmp",
+             "vhostuser_ovs_plug": True,
+             "vhostuser_mode": "server",
+             "vhostuser_socket": "/tmp/socket_$PORT_ID"
+         }}],
+        "allowed_network_types": ["local", "vlan", "vxlan", "gre"],
+        "bridge_mappings": {"physnet1": "br-ex"}}}
+
+    # test data valid  and invalid segments
+    test_valid_segment = {
+        api.ID: 'API_ID',
+        api.NETWORK_TYPE: constants.TYPE_LOCAL,
+        api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
+        api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
+
+    test_invalid_segment = {
+        api.ID: 'API_ID',
+        api.NETWORK_TYPE: constants.TYPE_NONE,
+        api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
+        api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
+
+    def setUp(self):
+        """Setup test."""
+        super(TestPseudoAgentDBBindingController, self).setUp()
+
+        config.cfg.CONF.set_override('url',
+                                     'http://localhost:8080'
+                                     '/controller/nb/v2/neutron', 'ml2_odl')
+
+        fake_agents_db = mock.MagicMock()
+        fake_agents_db.create_or_update_agent = mock.MagicMock()
+
+        self.mgr = pseudo_agentdb_binding.PseudoAgentDBBindingController(
+            db_plugin=fake_agents_db)
+
+    def test_make_hostconf_uri(self):
+        """test make uri."""
+        test_path = '/restconf/neutron:neutron/hostconfigs'
+        expected = "http://localhost:8080/restconf/neutron:neutron/hostconfigs"
+        test_uri = self.mgr._make_hostconf_uri(path=test_path)
+
+        self.assertEqual(expected, test_uri)
+
+    def test_update_agents_db(self):
+        """test agent update."""
+        self.mgr._update_agents_db(
+            hostconfigs=self.sample_odl_hconfigs['hostconfigs']['hostconfig'])
+        self.mgr.agents_db.create_or_update_agent.assert_called_once()
+
+    def test_is_valid_segment(self):
+        """Validate the _check_segment method."""
+        all_network_types = [constants.TYPE_FLAT, constants.TYPE_GRE,
+                             constants.TYPE_LOCAL, constants.TYPE_VXLAN,
+                             constants.TYPE_VLAN, constants.TYPE_NONE]
+
+        valid_types = {
+            network_type
+            for network_type in all_network_types
+            if self.mgr._is_valid_segment({api.NETWORK_TYPE: network_type}, {
+                'allowed_network_types': [
+                    constants.TYPE_LOCAL, constants.TYPE_GRE,
+                    constants.TYPE_VXLAN, constants.TYPE_VLAN]})}
+
+        self.assertEqual({
+            constants.TYPE_LOCAL, constants.TYPE_GRE, constants.TYPE_VXLAN,
+            constants.TYPE_VLAN}, valid_types)
+
+    def test_bind_port_with_vif_type_ovs(self):
+        """test bind_port with vanilla ovs."""
+        port_context = self._fake_port_context(
+            fake_segments=[self.test_invalid_segment, self.test_valid_segment])
+
+        vif_type = portbindings.VIF_TYPE_OVS
+        vif_details = {'some_test_details': None}
+
+        self.mgr._hconfig_bind_port(
+            port_context, self.sample_hconfig_dbget_ovs)
+
+        port_context.set_binding.assert_called_once_with(
+            self.test_valid_segment[api.ID], vif_type,
+            vif_details, status=n_const.PORT_STATUS_ACTIVE)
+
+    def _set_pass_vif_details(self, port_context, vif_details):
+        """extract vif_details and update vif_details if needed."""
+        vhostuser_socket_dir = vif_details.get(
+            'vhostuser_socket_dir', '/var/run/openvswitch')
+        port_spec = vif_details.get(
+            'port_prefix', 'vhu_') + port_context.current['id']
+        socket_path = os_path.join(vhostuser_socket_dir, port_spec)
+        vif_details.update({portbindings.VHOST_USER_SOCKET: socket_path})
+
+        return vif_details
+
+    def test_bind_port_with_vif_type_vhost_user(self):
+        """test bind_port with ovs-dpdk."""
+        port_context = self._fake_port_context(
+            fake_segments=[self.test_invalid_segment, self.test_valid_segment],
+            host_agents=[deepcopy(self.sample_hconf_str_tmpl_subs_ovs)])
+
+        self.mgr.bind_port(port_context)
+
+        pass_vif_type = portbindings.VIF_TYPE_VHOST_USER
+        pass_vif_details = self.sample_hconfig_dbget_ovs_dpdk[
+            'configurations']['supported_vnic_types'][0]['vif_details']
+        self._set_pass_vif_details(port_context, pass_vif_details)
+
+        port_context.set_binding.assert_called_once_with(
+            self.test_valid_segment[api.ID], pass_vif_type,
+            pass_vif_details, status=n_const.PORT_STATUS_ACTIVE)
+
+    def test_bind_port_with_vif_type_vhost_user_vpp(self):
+        """test bind_port with vpp."""
+        port_context = self._fake_port_context(
+            fake_segments=[self.test_invalid_segment, self.test_valid_segment],
+            host_agents=[deepcopy(self.sample_hconf_str_tmpl_subs_vpp)])
+
+        self.mgr.bind_port(port_context)
+
+        pass_vif_type = portbindings.VIF_TYPE_VHOST_USER
+        pass_vif_details = self.sample_hconfig_dbget_vpp['configurations'][
+            'supported_vnic_types'][0]['vif_details']
+        self._set_pass_vif_details(port_context, pass_vif_details)
+
+        port_context.set_binding.assert_called_once_with(
+            self.test_valid_segment[api.ID], pass_vif_type,
+            pass_vif_details, status=n_const.PORT_STATUS_ACTIVE)
+
+    def test_bind_port_without_valid_segment(self):
+        """test bind_port without a valid segment."""
+        port_context = self._fake_port_context(
+            fake_segments=[self.test_invalid_segment])
+
+        self.mgr._hconfig_bind_port(
+            port_context, self.sample_hconfig_dbget_ovs)
+
+        port_context.set_binding.assert_not_called()
+
+    def test_no_str_template_substitution_in_configuration_string(self):
+        """Test for no identifier substituion in config JSON string."""
+        port_context = self._fake_port_context(
+            fake_segments=[self.test_invalid_segment, self.test_valid_segment])
+
+        hconf_dict = self.mgr._substitute_hconfig_tmpl(
+            port_context, self.sample_hconf_str_tmpl_nosubs)
+
+        test_string = hconf_dict['configurations'][
+            'supported_vnic_types'][0][
+                'vif_details'][portbindings.VHOST_USER_SOCKET]
+
+        expected_str = '/var/run/openvswitch/PORT_NOSUBS'
+
+        self.assertEqual(expected_str, test_string)
+
+    def test_str_template_substitution_in_configuration_string(self):
+        """Test for identifier substitution in config JSON string."""
+        port_context = self._fake_port_context(
+            fake_segments=[self.test_invalid_segment, self.test_valid_segment])
+
+        hconf_dict = self.mgr._substitute_hconfig_tmpl(
+            port_context, self.sample_hconf_str_tmpl_subs_vpp)
+
+        test_string = hconf_dict['configurations'][
+            'supported_vnic_types'][0][
+                'vif_details'][portbindings.VHOST_USER_SOCKET]
+
+        expected_str = Template('/tmp/socket_$PORT_ID')
+        expected_str = expected_str.safe_substitute({
+            'PORT_ID': port_context.current['id']})
+
+        self.assertEqual(expected_str, test_string)
+
+    def _fake_port_context(self, fake_segments, host_agents=None):
+        network = mock.MagicMock(spec=api.NetworkContext)
+        return mock.MagicMock(
+            spec=ctx.PortContext,
+            current={'id': 'CONTEXT_ID',
+                     portbindings.VNIC_TYPE: portbindings.VNIC_NORMAL},
+            segments_to_bind=fake_segments, network=network,
+            host_agents=lambda agent_type: host_agents)
diff --git a/networking-odl/networking_odl/tests/unit/ml2/vhostuser_topology.json b/networking-odl/networking_odl/tests/unit/ml2/vhostuser_topology.json
new file mode 100644 (file)
index 0000000..5d6b994
--- /dev/null
@@ -0,0 +1,182 @@
+{
+    "network-topology": {
+        "topology": [
+            {
+                "topology-id": "flow:1"
+            },
+            {
+                "node": [
+                    {
+                        "node-id": "ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c/bridge/br-int",
+                        "ovsdb:bridge-external-ids": [
+                            {
+                                "bridge-external-id-key": "opendaylight-iid",
+                                "bridge-external-id-value": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c/bridge/br-int']"
+                            }
+                        ],
+                        "ovsdb:bridge-name": "br-int",
+                        "ovsdb:bridge-uuid": "e92ec02d-dba8-46d8-8047-680cab5ee8b0",
+                        "ovsdb:controller-entry": [
+                            {
+                                "controller-uuid": "8521e6df-54bd-48ac-a249-3bb810fd812c",
+                                "is-connected": false,
+                                "target": "tcp:192.168.66.1:6653"
+                            }
+                        ],
+                        "ovsdb:datapath-type": "ovsdb:datapath-type-netdev",
+                        "ovsdb:fail-mode": "ovsdb:ovsdb-fail-mode-secure",
+                        "ovsdb:managed-by": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c']",
+                        "ovsdb:protocol-entry": [
+                            {
+                                "protocol": "ovsdb:ovsdb-bridge-protocol-openflow-13"
+                            }
+                        ],
+                        "termination-point": [
+                            {
+                                "ovsdb:interface-type": "ovsdb:interface-type-internal",
+                                "ovsdb:interface-uuid": "d21472db-5c3c-4b38-bf18-6ed3a32edff1",
+                                "ovsdb:name": "br-int",
+                                "ovsdb:port-uuid": "30adf59e-ff0d-478f-b37a-e37ea20dddd3",
+                                "tp-id": "br-int"
+                            }
+                        ]
+                    },
+                    {
+                        "node-id": "ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c/bridge/br-nian1_1",
+                        "ovsdb:bridge-name": "br-nian1_1",
+                        "ovsdb:bridge-uuid": "243e01cb-e413-4615-a044-b254141e407d",
+                        "ovsdb:datapath-id": "00:00:ca:01:3e:24:15:46",
+                        "ovsdb:datapath-type": "ovsdb:datapath-type-netdev",
+                        "ovsdb:managed-by": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c']",
+                        "termination-point": [
+                            {
+                                "ovsdb:interface-type": "ovsdb:interface-type-internal",
+                                "ovsdb:interface-uuid": "45184fd2-31eb-4c87-a071-2d64a0893662",
+                                "ovsdb:name": "br-nian1_1",
+                                "ovsdb:ofport": 65534,
+                                "ovsdb:port-uuid": "f5952c1b-6b6d-4fd2-b2cd-201b8c9e0779",
+                                "tp-id": "br-nian1_1"
+                            }
+                        ]
+                    },
+                    {
+                        "node-id": "ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c/bridge/br-ex",
+                        "ovsdb:bridge-external-ids": [
+                            {
+                                "bridge-external-id-key": "bridge-id",
+                                "bridge-external-id-value": "br-ex"
+                            }
+                        ],
+                        "ovsdb:bridge-name": "br-ex",
+                        "ovsdb:bridge-other-configs": [
+                            {
+                                "bridge-other-config-key": "disable-in-band",
+                                "bridge-other-config-value": "true"
+                            }
+                        ],
+                        "ovsdb:bridge-uuid": "43f7768e-c2f9-4ae7-8099-8aee5a17add7",
+                        "ovsdb:datapath-id": "00:00:8e:76:f7:43:e7:4a",
+                        "ovsdb:datapath-type": "ovsdb:datapath-type-netdev",
+                        "ovsdb:managed-by": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c']",
+                        "termination-point": [
+                            {
+                                "ovsdb:interface-type": "ovsdb:interface-type-internal",
+                                "ovsdb:interface-uuid": "bdec1830-e6a5-4476-adff-569c455adb33",
+                                "ovsdb:name": "br-ex",
+                                "ovsdb:ofport": 65534,
+                                "ovsdb:port-uuid": "7ba5939b-ff13-409d-86de-67556021ddff",
+                                "tp-id": "br-ex"
+                            }
+                        ]
+                    },
+                    {
+                        "node-id": "ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c",
+                        "ovsdb:connection-info": {
+                            "local-ip": "192.168.66.1",
+                            "local-port": 6640,
+                            "remote-ip": "192.168.66.1",
+                            "remote-port": 41817
+                        },
+                        "ovsdb:datapath-type-entry": [
+                            {
+                                "datapath-type": "ovsdb:datapath-type-netdev"
+                            },
+                            {
+                                "datapath-type": "ovsdb:datapath-type-system"
+                            }
+                        ],
+                        "ovsdb:interface-type-entry": [
+                            {
+                                "interface-type": "ovsdb:interface-type-ipsec-gre"
+                            },
+                            {
+                                "interface-type": "ovsdb:interface-type-gre"
+                            },
+                            {
+                                "interface-type": "ovsdb:interface-type-gre64"
+                            },
+                            {
+                                "interface-type": "ovsdb:interface-type-dpdkr"
+                            },
+                            {
+                                "interface-type": "ovsdb:interface-type-vxlan"
+                            },
+                            {
+                                "interface-type": "ovsdb:interface-type-dpdkvhostuser"
+                            },
+                            {
+                                "interface-type": "ovsdb:interface-type-tap"
+                            },
+                            {
+                                "interface-type": "ovsdb:interface-type-geneve"
+                            },
+                            {
+                                "interface-type": "ovsdb:interface-type-dpdk"
+                            },
+                            {
+                                "interface-type": "ovsdb:interface-type-internal"
+                            },
+                            {
+                                "interface-type": "ovsdb:interface-type-system"
+                            },
+                            {
+                                "interface-type": "ovsdb:interface-type-lisp"
+                            },
+                            {
+                                "interface-type": "ovsdb:interface-type-patch"
+                            },
+                            {
+                                "interface-type": "ovsdb:interface-type-ipsec-gre64"
+                            },
+                            {
+                                "interface-type": "ovsdb:interface-type-stt"
+                            }
+                        ],
+                        "ovsdb:managed-node-entry": [
+                            {
+                                "bridge-ref": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c/bridge/br-ex']"
+                            },
+                            {
+                                "bridge-ref": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c/bridge/br-int']"
+                            },
+                            {
+                                "bridge-ref": "/network-topology:network-topology/network-topology:topology[network-topology:topology-id='ovsdb:1']/network-topology:node[network-topology:node-id='ovsdb://uuid/c805d82d-a5d8-419d-bc89-6e3713ff9f6c/bridge/br-nian1_1']"
+                            }
+                        ],
+                        "ovsdb:openvswitch-other-configs": [
+                            {
+                                "other-config-key": "local_ip",
+                                "other-config-value": "192.168.66.1"
+                            },
+                            {
+                                "other-config-key": "pmd-cpu-mask",
+                                "other-config-value": "400004"
+                            }
+                        ]
+                    }
+                ],
+                "topology-id": "ovsdb:1"
+            }
+        ]
+    }
+}
diff --git a/networking-odl/rally-jobs/README.rst b/networking-odl/rally-jobs/README.rst
new file mode 100644 (file)
index 0000000..4b345ed
--- /dev/null
@@ -0,0 +1,31 @@
+Rally job related files
+=======================
+
+This directory contains rally tasks and plugins that are run by OpenStack CI.
+
+Structure
+---------
+
+* plugins - directory where you can add rally plugins. Almost everything in
+  Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic
+  cleanup resources, ....
+
+* extra - all files from this directory will be copy pasted to gates, so you
+  are able to use absolute paths in rally tasks.
+  Files will be located in ~/.rally/extra/*
+
+* odl.yaml is a task that is run in gates against OpenStack with
+  Neutron service configured with ODL plugin
+
+Useful links
+------------
+
+* More about Rally: https://rally.readthedocs.org/en/latest/
+
+* Rally release notes: https://rally.readthedocs.org/en/latest/release_notes.html
+
+* How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html
+
+* About plugins:  https://rally.readthedocs.org/en/latest/plugins.html
+
+* Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins
diff --git a/networking-odl/rally-jobs/extra/README.rst b/networking-odl/rally-jobs/extra/README.rst
new file mode 100644 (file)
index 0000000..aab343c
--- /dev/null
@@ -0,0 +1,6 @@
+Extra files
+===========
+
+All files from this directory will be copy pasted to gates, so you are able to
+use absolute path in rally tasks. Files will be in ~/.rally/extra/*
+
diff --git a/networking-odl/rally-jobs/odl.yaml b/networking-odl/rally-jobs/odl.yaml
new file mode 100644 (file)
index 0000000..86852e1
--- /dev/null
@@ -0,0 +1,296 @@
+---
+  NeutronNetworks.create_and_list_networks:
+    -
+      runner:
+        type: "constant"
+        times: 40
+        concurrency: 20
+      context:
+        users:
+          tenants: 1
+          users_per_tenant: 1
+        quotas:
+          neutron:
+             network: -1
+      sla:
+        failure_rate:
+          max: 0
+
+  NeutronNetworks.create_and_list_subnets:
+    -
+      args:
+        subnets_per_network: 2
+      runner:
+        type: "constant"
+        times: 40
+        concurrency: 20
+      context:
+        users:
+          tenants: 1
+          users_per_tenant: 1
+        quotas:
+          neutron:
+             subnet: -1
+             network: -1
+      sla:
+        failure_rate:
+          max: 0
+
+  NeutronNetworks.create_and_list_routers:
+    -
+      args:
+        network_create_args:
+        subnet_create_args:
+        subnet_cidr_start: "1.1.0.0/30"
+        subnets_per_network: 2
+        router_create_args:
+      runner:
+        type: "constant"
+        times: 40
+        concurrency: 20
+      context:
+        users:
+          tenants: 1
+          users_per_tenant: 1
+        quotas:
+          neutron:
+             network: -1
+             subnet: -1
+             router: -1
+      sla:
+        failure_rate:
+          max: 0
+
+  NeutronNetworks.create_and_list_ports:
+    -
+      args:
+        network_create_args:
+        port_create_args:
+        ports_per_network: 2
+      runner:
+        type: "constant"
+        times: 40
+        concurrency: 20
+      context:
+        users:
+          tenants: 1
+          users_per_tenant: 1
+        quotas:
+          neutron:
+             network: -1
+             subnet: -1
+             router: -1
+             port: -1
+      sla:
+        failure_rate:
+          max: 0
+
+  NeutronNetworks.create_and_update_networks:
+    -
+      args:
+        network_create_args: {}
+        network_update_args:
+            admin_state_up: False
+            name: "_updated"
+      runner:
+        type: "constant"
+        times: 40
+        concurrency: 20
+      context:
+        users:
+          tenants: 1
+          users_per_tenant: 1
+        quotas:
+          neutron:
+            network: -1
+      sla:
+        failure_rate:
+          max: 0
+
+  NeutronNetworks.create_and_update_subnets:
+    -
+      args:
+        network_create_args: {}
+        subnet_create_args: {}
+        subnet_cidr_start: "1.4.0.0/16"
+        subnets_per_network: 2
+        subnet_update_args:
+            enable_dhcp: False
+            name: "_subnet_updated"
+      runner:
+        type: "constant"
+        times: 40
+        concurrency: 20
+      context:
+        users:
+          tenants: 5
+          users_per_tenant: 5
+        quotas:
+          neutron:
+            network: -1
+            subnet: -1
+      sla:
+        failure_rate:
+          max: 0
+
+  NeutronNetworks.create_and_update_routers:
+    -
+      args:
+        network_create_args: {}
+        subnet_create_args: {}
+        subnet_cidr_start: "1.1.0.0/30"
+        subnets_per_network: 2
+        router_create_args: {}
+        router_update_args:
+            admin_state_up: False
+            name: "_router_updated"
+      runner:
+        type: "constant"
+        times: 40
+        concurrency: 20
+      context:
+        users:
+          tenants: 1
+          users_per_tenant: 1
+        quotas:
+          neutron:
+            network: -1
+            subnet: -1
+            router: -1
+      sla:
+        failure_rate:
+          max: 0
+
+  NeutronNetworks.create_and_update_ports:
+    -
+      args:
+        network_create_args: {}
+        port_create_args: {}
+        ports_per_network: 5
+        port_update_args:
+            admin_state_up: False
+            device_id: "dummy_id"
+            device_owner: "dummy_owner"
+            name: "_port_updated"
+      runner:
+        type: "constant"
+        times: 40
+        concurrency: 20
+      context:
+        users:
+          tenants: 1
+          users_per_tenant: 1
+        quotas:
+          neutron:
+            network: -1
+            port: -1
+      sla:
+        failure_rate:
+          max: 0
+
+  NeutronNetworks.create_and_delete_networks:
+    -
+      args:
+        network_create_args: {}
+      runner:
+        type: "constant"
+        times: 40
+        concurrency: 20
+      context:
+        users:
+          tenants: 1
+          users_per_tenant: 1
+        quotas:
+          neutron:
+            network: -1
+            subnet: -1
+      sla:
+        failure_rate:
+          max: 0
+
+  NeutronNetworks.create_and_delete_subnets:
+    -
+      args:
+        network_create_args: {}
+        subnet_create_args: {}
+        subnet_cidr_start: "1.1.0.0/30"
+        subnets_per_network: 2
+      runner:
+        type: "constant"
+        times: 40
+        concurrency: 20
+      context:
+        users:
+          tenants: 1
+          users_per_tenant: 1
+        quotas:
+          neutron:
+            network: -1
+            subnet: -1
+      sla:
+        failure_rate:
+          max: 0
+
+  NeutronNetworks.create_and_delete_routers:
+    -
+      args:
+        network_create_args: {}
+        subnet_create_args: {}
+        subnet_cidr_start: "1.1.0.0/30"
+        subnets_per_network: 2
+        router_create_args: {}
+      runner:
+        type: "constant"
+        times: 40
+        concurrency: 20
+      context:
+        users:
+          tenants: 1
+          users_per_tenant: 1
+        quotas:
+          neutron:
+            network: -1
+            subnet: -1
+            router: -1
+      sla:
+          failure_rate:
+            max: 0
+
+  NeutronNetworks.create_and_delete_ports:
+    -
+      args:
+        network_create_args: {}
+        port_create_args: {}
+        ports_per_network: 5
+      runner:
+        type: "constant"
+        times: 40
+        concurrency: 20
+      context:
+        users:
+          tenants: 1
+          users_per_tenant: 1
+        quotas:
+          neutron:
+            network: -1
+            port: -1
+      sla:
+        failure_rate:
+          max: 0
+
+  Quotas.neutron_update:
+    -
+      args:
+        max_quota: 1024
+      runner:
+        type: "constant"
+        times: 40
+        concurrency: 20
+      context:
+        users:
+          tenants: 20
+          users_per_tenant: 1
+      sla:
+        failure_rate:
+          max: 0
+
diff --git a/networking-odl/rally-jobs/plugins/README.rst b/networking-odl/rally-jobs/plugins/README.rst
new file mode 100644 (file)
index 0000000..68ad548
--- /dev/null
@@ -0,0 +1,9 @@
+Rally plugins
+=============
+
+All \*.py modules from this directory will be auto-loaded by Rally and all
+plugins will be discoverable. There is no need of any extra configuration
+and there is no difference between writing them here and in rally code base.
+
+Note that it is better to push all interesting and useful benchmarks to Rally
+code base, this simplifies administration for Operators.
diff --git a/networking-odl/rally-jobs/plugins/__init__.py b/networking-odl/rally-jobs/plugins/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/releasenotes/notes/.placeholder b/networking-odl/releasenotes/notes/.placeholder
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/releasenotes/source/_static/.placeholder b/networking-odl/releasenotes/source/_static/.placeholder
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/releasenotes/source/_templates/.placeholder b/networking-odl/releasenotes/source/_templates/.placeholder
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/networking-odl/releasenotes/source/conf.py b/networking-odl/releasenotes/source/conf.py
new file mode 100644 (file)
index 0000000..64166dc
--- /dev/null
@@ -0,0 +1,264 @@
+# -*- coding: utf-8 -*-
+#
+# Networking OpenDaylight Release Notes documentation build configuration file, created by
+# sphinx-quickstart on Fri Jul 22 14:54:21 2016.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+    'oslosphinx',
+    'reno.sphinxext',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'Networking OpenDaylight Release Notes'
+copyright = u'2016, networking-odl developers'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+import pbr.version
+version_info = pbr.version.VersionInfo('networking-odl')
+# The full version, including alpha/beta/rc tags.
+release = version_info.canonical_version_string()
+# The short X.Y version.
+version = version_info.version_string_with_vcs()
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = []
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'NetworkingOpenDaylightReleaseNotesdoc'
+
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+#  author, documentclass [howto, manual, or own class]).
+latex_documents = [
+  ('index', 'NetworkingOpenDaylightReleaseNotes.tex', u'Networking OpenDaylight Release Notes Documentation',
+   u'networking-odl developers', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    ('index', 'networkingopendaylightreleasenotes', u'Networking OpenDaylight Release Notes Documentation',
+     [u'networking-odl developers'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+  ('index', 'NetworkingOpenDaylightReleaseNotes', u'Networking OpenDaylight Release Notes Documentation',
+   u'networking-odl developers', 'NetworkingOpenDaylightReleaseNotes', 'One line description of project.',
+   'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
diff --git a/networking-odl/releasenotes/source/index.rst b/networking-odl/releasenotes/source/index.rst
new file mode 100644 (file)
index 0000000..3cd7a99
--- /dev/null
@@ -0,0 +1,14 @@
+.. Networking OpenDaylight Release Notes documentation master file, created by
+   sphinx-quickstart on Fri Jul 22 14:54:21 2016.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+Welcome to Networking OpenDaylight Release Notes's documentation!
+=================================================================
+
+Contents:
+
+.. toctree::
+   :maxdepth: 2
+
+   unreleased
diff --git a/networking-odl/releasenotes/source/unreleased.rst b/networking-odl/releasenotes/source/unreleased.rst
new file mode 100644 (file)
index 0000000..875030f
--- /dev/null
@@ -0,0 +1,5 @@
+============================
+Current Series Release Notes
+============================
+
+.. release-notes::
diff --git a/networking-odl/requirements.txt b/networking-odl/requirements.txt
new file mode 100644 (file)
index 0000000..7afa0be
--- /dev/null
@@ -0,0 +1,8 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+
+pbr>=1.6 # Apache-2.0
+Babel>=2.3.4 # BSD
+stevedore>=1.16.0 # Apache-2.0
+neutron-lib>=0.3.0 # Apache-2.0
diff --git a/networking-odl/setup.cfg b/networking-odl/setup.cfg
new file mode 100644 (file)
index 0000000..4121690
--- /dev/null
@@ -0,0 +1,100 @@
+[metadata]
+name = networking-odl
+summary = OpenStack Networking
+description-file =
+    README.rst
+author = OpenStack
+author-email = openstack-dev@lists.openstack.org
+home-page = http://www.openstack.org/
+classifier =
+    Environment :: OpenStack
+    Intended Audience :: Information Technology
+    Intended Audience :: System Administrators
+    License :: OSI Approved :: Apache Software License
+    Operating System :: POSIX :: Linux
+    Programming Language :: Python
+    Programming Language :: Python :: 2
+    Programming Language :: Python :: 2.7
+    Programming Language :: Python :: 3
+    Programming Language :: Python :: 3.4
+    Programming Language :: Python :: 3.5
+
+[files]
+packages =
+    networking_odl
+data_files =
+    etc/neutron =
+        etc/neutron/plugins/ml2/ml2_conf_odl.ini
+
+[global]
+setup-hooks =
+    pbr.hooks.setup_hook
+
+[entry_points]
+# NOTE(asomya): The V2 ML2 driver and service_plugin is experimental and only
+# for testing and evaluation purposes. Once the driver/service_plugin has been
+# proven to be reliable, the current driver/service_plugin will be replaced by
+# the V2 versions. Please take care to only specify a single version of the ML2
+# driver and service_plugin in the configuration files. Mix-matching between
+# different versions of the ML2 drivers/service_plugins will cause a lot of
+# issues in your environment.
+#
+# Allowed configuration settings:
+#
+# [ml2]
+# mechanism_drivers = opendaylight
+# [DEFAULT]
+# service_plugins = odl-router
+#
+# OR
+#
+# [ml2]
+# mechanism_drivers = opendaylight_v2
+# [DEFAULT]
+# service_plugins = odl-router_v2
+console_scripts =
+    neutron-odl-ovs-hostconfig = networking_odl.cmd.set_ovs_hostconfigs:main
+neutron.ml2.mechanism_drivers =
+    opendaylight = networking_odl.ml2.mech_driver:OpenDaylightMechanismDriver
+    opendaylight_v2 = networking_odl.ml2.mech_driver_v2:OpenDaylightMechanismDriver
+neutron.service_plugins =
+    odl-router = networking_odl.l3.l3_odl:OpenDaylightL3RouterPlugin
+    odl-router_v2 = networking_odl.l3.l3_odl_v2:OpenDaylightL3RouterPlugin
+neutron.db.alembic_migrations =
+    networking-odl = networking_odl.db.migration:alembic_migrations
+networking_odl.ml2.port_binding_controllers =
+    network-topology = networking_odl.ml2.network_topology:NetworkTopologyManager
+    legacy-port-binding = networking_odl.ml2.legacy_port_binding:LegacyPortBindingManager
+    pseudo-agentdb-binding = networking_odl.ml2.pseudo_agentdb_binding:PseudoAgentDBBindingController
+oslo.config.opts =
+    ml2_odl = networking_odl.common.config:list_opts
+
+[build_sphinx]
+all_files = 1
+build-dir = doc/build
+source-dir = doc/source
+
+[upload_sphinx]
+upload-dir = doc/build/html
+
+[build_releasenotes]
+build-dir = releasenotes/build
+source-dir = releasenotes/source
+all_files = 1
+
+[extract_messages]
+keywords = _ gettext ngettext l_ lazy_gettext
+mapping_file = babel.cfg
+output_file = networking_odl/locale/networking-odl.pot
+
+[compile_catalog]
+directory = networking_odl/locale
+domain = networking-odl
+
+[update_catalog]
+domain = networking-odl
+output_dir = networking_odl/locale
+input_file = networking_odl/locale/networking-odl.pot
+
+[wheel]
+universal = 1
diff --git a/networking-odl/setup.py b/networking-odl/setup.py
new file mode 100644 (file)
index 0000000..782bb21
--- /dev/null
@@ -0,0 +1,29 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
+import setuptools
+
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
+try:
+    import multiprocessing  # noqa
+except ImportError:
+    pass
+
+setuptools.setup(
+    setup_requires=['pbr>=1.8'],
+    pbr=True)
diff --git a/networking-odl/test-requirements.txt b/networking-odl/test-requirements.txt
new file mode 100644 (file)
index 0000000..7b35758
--- /dev/null
@@ -0,0 +1,20 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+
+hacking<0.11,>=0.10.0
+
+coverage>=3.6 # Apache-2.0
+doc8 # Apache-2.0
+python-subunit>=0.0.18 # Apache-2.0/BSD
+sphinx!=1.3b1,<1.3,>=1.2.1 # BSD
+oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0
+oslotest>=1.10.0 # Apache-2.0
+testrepository>=0.0.18 # Apache-2.0/BSD
+testresources>=0.2.4 # Apache-2.0/BSD
+testscenarios>=0.4 # Apache-2.0/BSD
+WebTest>=2.0 # MIT
+testtools>=1.4.0 # MIT
+
+# releasenotes
+reno>=1.8.0 # Apache2
diff --git a/networking-odl/tools/check_bash.sh b/networking-odl/tools/check_bash.sh
new file mode 100644 (file)
index 0000000..e9d178e
--- /dev/null
@@ -0,0 +1,31 @@
+#! /bin/sh
+
+# Copyright (C) 2014 VA Linux Systems Japan K.K.
+# Copyright (C) 2014 YAMAMOTO Takashi <yamamoto at valinux co jp>
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# The purpose of this script is to avoid casual introduction of more
+# bash dependency.  Please consider alternatives before commiting code
+# which uses bash specific features.
+
+# Ignore comments, but include shebangs
+OBSERVED=$(grep -E '^([^#]|#!).*bash' tox.ini tools/* | wc -l)
+EXPECTED=5
+if [ ${EXPECTED} -ne ${OBSERVED} ]; then
+    echo Unexpected number of bash usages are detected.
+    echo Please read the comment in $0
+    exit 1
+fi
+exit 0
diff --git a/networking-odl/tools/check_i18n.py b/networking-odl/tools/check_i18n.py
new file mode 100644 (file)
index 0000000..697ad18
--- /dev/null
@@ -0,0 +1,153 @@
+#    Copyright 2012 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+from __future__ import print_function
+
+import compiler
+import imp
+import os.path
+import sys
+
+
+def is_localized(node):
+    """Check message wrapped by _()"""
+    if isinstance(node.parent, compiler.ast.CallFunc):
+        if isinstance(node.parent.node, compiler.ast.Name):
+            if node.parent.node.name == '_':
+                return True
+    return False
+
+
+class ASTWalker(compiler.visitor.ASTVisitor):
+
+    def default(self, node, *args):
+        for child in node.getChildNodes():
+            child.parent = node
+        compiler.visitor.ASTVisitor.default(self, node, *args)
+
+
+class Visitor(object):
+
+    def __init__(self, filename, i18n_msg_predicates,
+                 msg_format_checkers, debug):
+        self.filename = filename
+        self.debug = debug
+        self.error = 0
+        self.i18n_msg_predicates = i18n_msg_predicates
+        self.msg_format_checkers = msg_format_checkers
+        with open(filename) as f:
+            self.lines = f.readlines()
+
+    def visitConst(self, node):
+        if not isinstance(node.value, str):
+            return
+
+        if is_localized(node):
+            for (checker, msg) in self.msg_format_checkers:
+                if checker(node):
+                    print('%s:%d %s: %s Error: %s' %
+                          (self.filename, node.lineno,
+                           self.lines[node.lineno - 1][:-1],
+                           checker.__name__, msg),
+                           file=sys.stderr)
+                    self.error = 1
+                    return
+            if debug:
+                print('%s:%d %s: %s' %
+                      (self.filename, node.lineno,
+                       self.lines[node.lineno - 1][:-1],
+                       "Pass"))
+        else:
+            for (predicate, action, msg) in self.i18n_msg_predicates:
+                if predicate(node):
+                    if action == 'skip':
+                        if debug:
+                            print('%s:%d %s: %s' %
+                                  (self.filename, node.lineno,
+                                  self.lines[node.lineno - 1][:-1],
+                                  "Pass"))
+                        return
+                    elif action == 'error':
+                        print('%s:%d %s: %s Error: %s' %
+                              (self.filename, node.lineno,
+                               self.lines[node.lineno - 1][:-1],
+                               predicate.__name__, msg),
+                               file=sys.stderr)
+                        self.error = 1
+                        return
+                    elif action == 'warn':
+                        print('%s:%d %s: %s' %
+                              (self.filename, node.lineno,
+                              self.lines[node.lineno - 1][:-1],
+                              "Warn: %s" % msg))
+                        return
+                    print('Predicate with wrong action!', file=sys.stderr)
+
+
+def is_file_in_black_list(black_list, f):
+    for f in black_list:
+        if os.path.abspath(input_file).startswith(
+            os.path.abspath(f)):
+            return True
+    return False
+
+
+def check_i18n(input_file, i18n_msg_predicates, msg_format_checkers, debug):
+    input_mod = compiler.parseFile(input_file)
+    v = compiler.visitor.walk(input_mod,
+                              Visitor(input_file,
+                                      i18n_msg_predicates,
+                                      msg_format_checkers,
+                                      debug),
+                              ASTWalker())
+    return v.error
+
+
+if __name__ == '__main__':
+    input_path = sys.argv[1]
+    cfg_path = sys.argv[2]
+    try:
+        cfg_mod = imp.load_source('', cfg_path)
+    except Exception:
+        print("Load cfg module failed", file=sys.stderr)
+        sys.exit(1)
+
+    i18n_msg_predicates = cfg_mod.i18n_msg_predicates
+    msg_format_checkers = cfg_mod.msg_format_checkers
+    black_list = cfg_mod.file_black_list
+
+    debug = False
+    if len(sys.argv) > 3:
+        if sys.argv[3] == '-d':
+            debug = True
+
+    if os.path.isfile(input_path):
+        sys.exit(check_i18n(input_path,
+                            i18n_msg_predicates,
+                            msg_format_checkers,
+                            debug))
+
+    error = 0
+    for dirpath, dirs, files in os.walk(input_path):
+        for f in files:
+            if not f.endswith('.py'):
+                continue
+            input_file = os.path.join(dirpath, f)
+            if is_file_in_black_list(black_list, input_file):
+                continue
+            if check_i18n(input_file,
+                          i18n_msg_predicates,
+                          msg_format_checkers,
+                          debug):
+                error = 1
+    sys.exit(error)
diff --git a/networking-odl/tools/check_i18n_test_case.txt b/networking-odl/tools/check_i18n_test_case.txt
new file mode 100644 (file)
index 0000000..3d1391d
--- /dev/null
@@ -0,0 +1,67 @@
+# test-case for check_i18n.py
+# python check_i18n.py check_i18n.txt -d
+
+# message format checking
+#  capital checking
+msg = _("hello world, error")
+msg = _("hello world_var, error")
+msg = _('file_list xyz, pass')
+msg = _("Hello world, pass")
+
+#  format specifier checking
+msg = _("Hello %s world %d, error")
+msg = _("Hello %s world, pass")
+msg = _("Hello %(var1)s world %(var2)s, pass")
+
+# message has been localized
+#  is_localized
+msg = _("Hello world, pass")
+msg = _("Hello world, pass") % var
+LOG.debug(_('Hello world, pass'))
+LOG.info(_('Hello world, pass'))
+raise x.y.Exception(_('Hello world, pass'))
+raise Exception(_('Hello world, pass'))
+
+# message need be localized
+#  is_log_callfunc
+LOG.debug('hello world, error')
+LOG.debug('hello world, error' % xyz)
+sys.append('hello world, warn')
+
+# is_log_i18n_msg_with_mod
+LOG.debug(_('Hello world, error') % xyz)
+
+# default warn
+msg = 'hello world, warn'
+msg = 'hello world, warn' % var
+
+# message needn't be localized
+#  skip only one word
+msg = ''
+msg = "hello,pass"
+
+#  skip dict
+msg = {'hello world, pass': 1}
+
+#  skip list
+msg = ["hello world, pass"]
+
+#  skip subscript
+msg['hello world, pass']
+
+#  skip xml marker
+msg = "<test><t></t></test>, pass"
+
+#  skip sql statement
+msg = "SELECT * FROM xyz WHERE hello=1, pass"
+msg = "select * from xyz, pass"
+
+#  skip add statement
+msg = 'hello world' + e + 'world hello, pass'
+
+#  skip doc string
+"""
+Hello world, pass
+"""
+class Msg:
+    pass
diff --git a/networking-odl/tools/clean.sh b/networking-odl/tools/clean.sh
new file mode 100755 (executable)
index 0000000..27bc219
--- /dev/null
@@ -0,0 +1,5 @@
+#!/bin/bash
+rm -rf ./*.deb ./*.tar.gz ./*.dsc ./*.changes
+rm -rf */*.deb
+rm -rf ./plugins/**/build/ ./plugins/**/dist
+rm -rf ./plugins/**/lib/neutron_*_plugin.egg-info ./plugins/neutron-*
diff --git a/networking-odl/tools/i18n_cfg.py b/networking-odl/tools/i18n_cfg.py
new file mode 100644 (file)
index 0000000..5ad1a51
--- /dev/null
@@ -0,0 +1,97 @@
+import compiler
+import re
+
+
+def is_log_callfunc(n):
+    """LOG.xxx('hello %s' % xyz) and LOG('hello')"""
+    if isinstance(n.parent, compiler.ast.Mod):
+        n = n.parent
+    if isinstance(n.parent, compiler.ast.CallFunc):
+        if isinstance(n.parent.node, compiler.ast.Getattr):
+            if isinstance(n.parent.node.getChildNodes()[0],
+                          compiler.ast.Name):
+                if n.parent.node.getChildNodes()[0].name == 'LOG':
+                    return True
+    return False
+
+
+def is_log_i18n_msg_with_mod(n):
+    """LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)"""
+    if not isinstance(n.parent.parent, compiler.ast.Mod):
+        return False
+    n = n.parent.parent
+    if isinstance(n.parent, compiler.ast.CallFunc):
+        if isinstance(n.parent.node, compiler.ast.Getattr):
+            if isinstance(n.parent.node.getChildNodes()[0],
+                          compiler.ast.Name):
+                if n.parent.node.getChildNodes()[0].name == 'LOG':
+                    return True
+    return False
+
+
+def is_wrong_i18n_format(n):
+    """Check _('hello %s' % xyz)"""
+    if isinstance(n.parent, compiler.ast.Mod):
+        n = n.parent
+    if isinstance(n.parent, compiler.ast.CallFunc):
+        if isinstance(n.parent.node, compiler.ast.Name):
+            if n.parent.node.name == '_':
+                return True
+    return False
+
+
+"""
+Used for check message need be localized or not.
+(predicate_func, action, message)
+"""
+i18n_msg_predicates = [
+    # Skip ['hello world', 1]
+    (lambda n: isinstance(n.parent, compiler.ast.List), 'skip', ''),
+    # Skip {'hellow world', 1}
+    (lambda n: isinstance(n.parent, compiler.ast.Dict), 'skip', ''),
+    # Skip msg['hello world']
+    (lambda n: isinstance(n.parent, compiler.ast.Subscript), 'skip', ''),
+    # Skip doc string
+    (lambda n: isinstance(n.parent, compiler.ast.Discard), 'skip', ''),
+    # Skip msg = "hello", in normal, message should more than one word
+    (lambda n: len(n.value.strip().split(' ')) <= 1, 'skip', ''),
+    # Skip msg = 'hello world' + vars + 'world hello'
+    (lambda n: isinstance(n.parent, compiler.ast.Add), 'skip', ''),
+    # Skip xml markers msg = "<test></test>"
+    (lambda n: len(re.compile("</.*>").findall(n.value)) > 0, 'skip', ''),
+    # Skip sql statement
+    (lambda n: len(
+        re.compile("^SELECT.*FROM", flags=re.I).findall(n.value)) > 0,
+     'skip', ''),
+    # LOG.xxx()
+    (is_log_callfunc, 'error', 'Message must be localized'),
+    # _('hello %s' % xyz) should be _('hello %s') % xyz
+    (is_wrong_i18n_format, 'error',
+     ("Message format was wrong, _('hello %s' % xyz) "
+      "should be _('hello %s') % xyz")),
+    # default
+    (lambda n: True, 'warn', 'Message might need localized')
+]
+
+
+"""
+Used for checking message format. (checker_func, message)
+"""
+msg_format_checkers = [
+    # If message contain more than on format specifier, it should use
+    # mapping key
+    (lambda n: len(re.compile("%[bcdeEfFgGnosxX]").findall(n.value)) > 1,
+     "The message shouldn't contain more than one format specifier"),
+    # Check capital
+    (lambda n: n.value.split(' ')[0].count('_') == 0 and
+     n.value[0].isalpha() and
+     n.value[0].islower(),
+     "First letter must be capital"),
+    (is_log_i18n_msg_with_mod,
+     'LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)')
+]
+
+
+file_black_list = ["./neutron/tests/unit",
+                   "./neutron/openstack",
+                   "./neutron/plugins/bigswitch/tests"]
diff --git a/networking-odl/tools/install_venv.py b/networking-odl/tools/install_venv.py
new file mode 100644 (file)
index 0000000..f8fb8fa
--- /dev/null
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 OpenStack Foundation.
+# Copyright 2013 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Installation script for Neutron's development virtualenv
+"""
+from __future__ import print_function
+
+import os
+import sys
+
+import install_venv_common as install_venv
+
+
+def print_help():
+    help = """
+ Neutron development environment setup is complete.
+
+ Neutron development uses virtualenv to track and manage Python dependencies
+ while in development and testing.
+
+ To activate the Neutron virtualenv for the extent of your current shell
+ session you can run:
+
+ $ source .venv/bin/activate
+
+ Or, if you prefer, you can run commands in the virtualenv on a case by case
+ basis by running:
+
+ $ tools/with_venv.sh <your command>
+
+ Also, make test will automatically use the virtualenv.
+    """
+    print(help)
+
+
+def main(argv):
+    root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+    venv = os.path.join(root, '.venv')
+    pip_requires = os.path.join(root, 'requirements.txt')
+    test_requires = os.path.join(root, 'test-requirements.txt')
+    py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
+    project = 'Neutron'
+    install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
+                                       py_version, project)
+    options = install.parse_args(argv)
+    install.check_python_version()
+    install.check_dependencies()
+    install.create_virtualenv(no_site_packages=options.no_site_packages)
+    install.install_dependencies()
+    print_help()
+
+
+if __name__ == '__main__':
+    main(sys.argv)
diff --git a/networking-odl/tools/pretty_tox.sh b/networking-odl/tools/pretty_tox.sh
new file mode 100755 (executable)
index 0000000..a40f248
--- /dev/null
@@ -0,0 +1,6 @@
+#! /bin/sh
+
+TESTRARGS=$1
+
+exec 3>&1
+status=$(exec 4>&1 >&3; ( python setup.py testr --slowest --testr-args="--subunit $TESTRARGS"; echo $? >&4 ) | $(dirname $0)/subunit-trace.py -f) && exit $status
diff --git a/networking-odl/tools/subunit-trace.py b/networking-odl/tools/subunit-trace.py
new file mode 100755 (executable)
index 0000000..73f2f10
--- /dev/null
@@ -0,0 +1,307 @@
+#!/usr/bin/env python
+
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# Copyright 2014 Samsung Electronics
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Trace a subunit stream in reasonable detail and high accuracy."""
+
+import argparse
+import functools
+import os
+import re
+import sys
+
+import mimeparse
+import subunit
+import testtools
+
+DAY_SECONDS = 60 * 60 * 24
+FAILS = []
+RESULTS = {}
+
+
+class Starts(testtools.StreamResult):
+
+    def __init__(self, output):
+        super(Starts, self).__init__()
+        self._output = output
+
+    def startTestRun(self):
+        self._neednewline = False
+        self._emitted = set()
+
+    def status(self, test_id=None, test_status=None, test_tags=None,
+               runnable=True, file_name=None, file_bytes=None, eof=False,
+               mime_type=None, route_code=None, timestamp=None):
+        super(Starts, self).status(
+            test_id, test_status,
+            test_tags=test_tags, runnable=runnable, file_name=file_name,
+            file_bytes=file_bytes, eof=eof, mime_type=mime_type,
+            route_code=route_code, timestamp=timestamp)
+        if not test_id:
+            if not file_bytes:
+                return
+            if not mime_type or mime_type == 'test/plain;charset=utf8':
+                mime_type = 'text/plain; charset=utf-8'
+            primary, sub, parameters = mimeparse.parse_mime_type(mime_type)
+            content_type = testtools.content_type.ContentType(
+                primary, sub, parameters)
+            content = testtools.content.Content(
+                content_type, lambda: [file_bytes])
+            text = content.as_text()
+            if text and text[-1] not in '\r\n':
+                self._neednewline = True
+            self._output.write(text)
+        elif test_status == 'inprogress' and test_id not in self._emitted:
+            if self._neednewline:
+                self._neednewline = False
+                self._output.write('\n')
+            worker = ''
+            for tag in test_tags or ():
+                if tag.startswith('worker-'):
+                    worker = '(' + tag[7:] + ') '
+            if timestamp:
+                timestr = timestamp.isoformat()
+            else:
+                timestr = ''
+                self._output.write('%s: %s%s [start]\n' %
+                                   (timestr, worker, test_id))
+            self._emitted.add(test_id)
+
+
+def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):
+    """Clean up the test name for display.
+
+    By default we strip out the tags in the test because they don't help us
+    in identifying the test that is run to it's result.
+
+    Make it possible to strip out the testscenarios information (not to
+    be confused with tempest scenarios) however that's often needed to
+    indentify generated negative tests.
+    """
+    if strip_tags:
+        tags_start = name.find('[')
+        tags_end = name.find(']')
+        if tags_start > 0 and tags_end > tags_start:
+            newname = name[:tags_start]
+            newname += name[tags_end + 1:]
+            name = newname
+
+    if strip_scenarios:
+        tags_start = name.find('(')
+        tags_end = name.find(')')
+        if tags_start > 0 and tags_end > tags_start:
+            newname = name[:tags_start]
+            newname += name[tags_end + 1:]
+            name = newname
+
+    return name
+
+
+def get_duration(timestamps):
+    start, end = timestamps
+    if not start or not end:
+        duration = ''
+    else:
+        delta = end - start
+        duration = '%d.%06ds' % (
+            delta.days * DAY_SECONDS + delta.seconds, delta.microseconds)
+    return duration
+
+
+def find_worker(test):
+    for tag in test['tags']:
+        if tag.startswith('worker-'):
+            return int(tag[7:])
+    return 'NaN'
+
+
+# Print out stdout/stderr if it exists, always
+def print_attachments(stream, test, all_channels=False):
+    """Print out subunit attachments.
+
+    Print out subunit attachments that contain content. This
+    runs in 2 modes, one for successes where we print out just stdout
+    and stderr, and an override that dumps all the attachments.
+    """
+    channels = ('stdout', 'stderr')
+    for name, detail in test['details'].items():
+        # NOTE(sdague): the subunit names are a little crazy, and actually
+        # are in the form pythonlogging:'' (with the colon and quotes)
+        name = name.split(':')[0]
+        if detail.content_type.type == 'test':
+            detail.content_type.type = 'text'
+        if (all_channels or name in channels) and detail.as_text():
+            title = "Captured %s:" % name
+            stream.write("\n%s\n%s\n" % (title, ('~' * len(title))))
+            # indent attachment lines 4 spaces to make them visually
+            # offset
+            for line in detail.as_text().split('\n'):
+                stream.write("    %s\n" % line)
+
+
+def show_outcome(stream, test, print_failures=False, failonly=False):
+    global RESULTS
+    status = test['status']
+    # TODO(sdague): ask lifeless why on this?
+    if status == 'exists':
+        return
+
+    worker = find_worker(test)
+    name = cleanup_test_name(test['id'])
+    duration = get_duration(test['timestamps'])
+
+    if worker not in RESULTS:
+        RESULTS[worker] = []
+    RESULTS[worker].append(test)
+
+    # don't count the end of the return code as a fail
+    if name == 'process-returncode':
+        return
+
+    if status == 'fail':
+        FAILS.append(test)
+        stream.write('{%s} %s [%s] ... FAILED\n' % (
+            worker, name, duration))
+        if not print_failures:
+            print_attachments(stream, test, all_channels=True)
+    elif not failonly:
+        if status == 'success':
+            stream.write('{%s} %s [%s] ... ok\n' % (
+                worker, name, duration))
+            print_attachments(stream, test)
+        elif status == 'skip':
+            stream.write('{%s} %s ... SKIPPED: %s\n' % (
+                worker, name, test['details']['reason'].as_text()))
+        else:
+            stream.write('{%s} %s [%s] ... %s\n' % (
+                worker, name, duration, test['status']))
+            if not print_failures:
+                print_attachments(stream, test, all_channels=True)
+
+    stream.flush()
+
+
+def print_fails(stream):
+    """Print summary failure report.
+
+    Currently unused, however there remains debate on inline vs. at end
+    reporting, so leave the utility function for later use.
+    """
+    if not FAILS:
+        return
+    stream.write("\n==============================\n")
+    stream.write("Failed %s tests - output below:" % len(FAILS))
+    stream.write("\n==============================\n")
+    for f in FAILS:
+        stream.write("\n%s\n" % f['id'])
+        stream.write("%s\n" % ('-' * len(f['id'])))
+        print_attachments(stream, f, all_channels=True)
+    stream.write('\n')
+
+
+def count_tests(key, value):
+    count = 0
+    for k, v in RESULTS.items():
+        for item in v:
+            if key in item:
+                if re.search(value, item[key]):
+                    count += 1
+    return count
+
+
+def run_time():
+    runtime = 0.0
+    for k, v in RESULTS.items():
+        for test in v:
+            runtime += float(get_duration(test['timestamps']).strip('s'))
+    return runtime
+
+
+def worker_stats(worker):
+    tests = RESULTS[worker]
+    num_tests = len(tests)
+    delta = tests[-1]['timestamps'][1] - tests[0]['timestamps'][0]
+    return num_tests, delta
+
+
+def print_summary(stream):
+    stream.write("\n======\nTotals\n======\n")
+    stream.write("Run: %s in %s sec.\n" % (count_tests('status', '.*'),
+                                           run_time()))
+    stream.write(" - Passed: %s\n" % count_tests('status', 'success'))
+    stream.write(" - Skipped: %s\n" % count_tests('status', 'skip'))
+    stream.write(" - Failed: %s\n" % count_tests('status', 'fail'))
+
+    # we could have no results, especially as we filter out the process-codes
+    if RESULTS:
+        stream.write("\n==============\nWorker Balance\n==============\n")
+
+        for w in range(max(RESULTS.keys()) + 1):
+            if w not in RESULTS:
+                stream.write(
+                    " - WARNING: missing Worker %s! "
+                    "Race in testr accounting.\n" % w)
+            else:
+                num, time = worker_stats(w)
+                stream.write(" - Worker %s (%s tests) => %ss\n" %
+                             (w, num, time))
+
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--no-failure-debug', '-n', action='store_true',
+                        dest='print_failures', help='Disable printing failure '
+                        'debug information in realtime')
+    parser.add_argument('--fails', '-f', action='store_true',
+                        dest='post_fails', help='Print failure debug '
+                        'information after the stream is proccesed')
+    parser.add_argument('--failonly', action='store_true',
+                        dest='failonly', help="Don't print success items",
+                        default=(
+                            os.environ.get('TRACE_FAILONLY', False)
+                            is not False))
+    return parser.parse_args()
+
+
+def main():
+    args = parse_args()
+    stream = subunit.ByteStreamToStreamResult(
+        sys.stdin, non_subunit_name='stdout')
+    starts = Starts(sys.stdout)
+    outcomes = testtools.StreamToDict(
+        functools.partial(show_outcome, sys.stdout,
+                          print_failures=args.print_failures,
+                          failonly=args.failonly
+                      ))
+    summary = testtools.StreamSummary()
+    result = testtools.CopyStreamResult([starts, outcomes, summary])
+    result.startTestRun()
+    try:
+        stream.run(result)
+    finally:
+        result.stopTestRun()
+    if count_tests('status', '.*') == 0:
+        print("The test run didn't actually run any tests")
+        return 1
+    if args.post_fails:
+        print_fails(sys.stdout)
+    print_summary(sys.stdout)
+    return (0 if summary.wasSuccessful() else 1)
+
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/networking-odl/tools/with_venv.sh b/networking-odl/tools/with_venv.sh
new file mode 100755 (executable)
index 0000000..dea5c5f
--- /dev/null
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+TOOLS=`dirname $0`
+VENV=$TOOLS/../.venv
+source $VENV/bin/activate && "$@"
diff --git a/networking-odl/tox.ini b/networking-odl/tox.ini
new file mode 100644 (file)
index 0000000..644dbf3
--- /dev/null
@@ -0,0 +1,72 @@
+[tox]
+envlist = docs,py35,py34,py27,pep8
+minversion = 1.6
+skipsdist = True
+
+[testenv]
+setenv = VIRTUAL_ENV={envdir}
+         PYTHONWARNINGS=default::DeprecationWarning
+passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
+usedevelop = True
+install_command = pip install -r requirements.txt -U {opts} {packages}
+deps = -egit+https://git.openstack.org/openstack/neutron#egg=neutron
+       -egit+https://git.openstack.org/openstack/neutron-fwaas#egg=neutron-fwaas
+       -egit+https://git.openstack.org/openstack/neutron-lbaas#egg=neutron-lbaas
+       -egit+https://git.openstack.org/openstack/networking-l2gw#egg=networking-l2gw
+       -r{toxinidir}/test-requirements.txt
+whitelist_externals = bash
+commands = bash tools/pretty_tox.sh '{posargs}'
+
+[testenv:pep8]
+commands =
+  flake8
+  doc8 doc/source devstack releasenotes/source rally-jobs
+  neutron-db-manage --subproject networking-odl check_migration
+
+[testenv:i18n]
+commands = python ./tools/check_i18n.py ./networking_odl ./tools/i18n_cfg.py
+
+[testenv:venv]
+# NOTE(yamahata): translation job can't use zuul-cloner or upper-constraints
+install_command = pip install -U {opts} {packages}
+commands = {posargs}
+
+[testenv:cover]
+commands =
+  python setup.py test --coverage --coverage-package-name=networking_odl --testr-args='{posargs}'
+  coverage report
+
+[testenv:docs]
+commands =
+  doc8 doc/source devstack releasenotes/source rally-jobs
+  python setup.py build_sphinx
+
+[testenv:debug]
+commands = oslo_debug_helper -t networking_odl/tests {posargs}
+
+[hacking]
+import_exceptions = networking_odl._i18n
+local-check-factory = neutron_lib.hacking.checks.factory
+
+[doc8]
+# File extensions to check
+extensions = .rst
+
+[flake8]
+# H803 skipped on purpose per list discussion.
+# E123, E125 skipped as they are invalid PEP-8.
+# TODO(dougwig) -- uncomment this to test for remaining linkages
+# N530 direct neutron imports not allowed
+show-source = True
+ignore = E123,E125,H803,N530
+exclude=./.*,dist,doc,releasenotes,*lib/python*,*egg,build,tools
+
+[testenv:genconfig]
+deps = -r{toxinidir}/requirements.txt
+commands =
+    mkdir -p etc/neutron/plugins/ml2
+    oslo-config-generator --namespace ml2_odl --output-file etc/neutron/plugins/ml2/ml2_conf_odl.ini.sample
+whitelist_externals = mkdir
+
+[testenv:releasenotes]
+commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html